diff --git a/contrib/build/build.go b/contrib/build/build.go
index 07601e07..e3678290 100644
--- a/contrib/build/build.go
+++ b/contrib/build/build.go
@@ -125,15 +125,11 @@ func main() {
}
build("pktd", ".", &conf)
- build("pktwallet", "./pktwallet", &conf)
build("pktctl", "./cmd/pktctl", &conf)
- build("checksig", "./cmd/checksig", &conf)
- build("pld", "./lnd/cmd/lnd", &conf)
- build("pldctl", "./lnd/cmd/lncli", &conf)
if strings.Contains(strings.Join(os.Args, "|"), "--test") {
test()
} else {
fmt.Println("Pass the --test flag if you want to run the tests as well")
}
- fmt.Println("Everything looks good, type `./bin/pktwallet --create` to make a wallet")
+ fmt.Println("Everything looks good, type `./bin/pktd` to launch the full node.")
}
diff --git a/go.mod b/go.mod
index d2b1e6d3..8708d85c 100644
--- a/go.mod
+++ b/go.mod
@@ -9,80 +9,33 @@ replace github.com/coreos/bbolt => go.etcd.io/bbolt v1.3.5
replace google.golang.org/grpc => google.golang.org/grpc v1.29.1
require (
- git.schwanenlied.me/yawning/bsaes.git v0.0.0-20180720073208-c0276d75487e // indirect
- github.com/NebulousLabs/fastrand v0.0.0-20181203155948-6fb6489aac4e // indirect
- github.com/NebulousLabs/go-upnp v0.0.0-20180202185039-29b680b06c82
- github.com/Yawning/aez v0.0.0-20180114000226-4dad034d9db2
github.com/aead/chacha20 v0.0.0-20180709150244-8b13a72661da
github.com/aead/siphash v1.0.1
github.com/arl/statsviz v0.2.2-0.20201115121518-5ea9f0cf1bd1
github.com/btcsuite/winsvc v1.0.0
- github.com/coreos/bbolt v0.0.0-00010101000000-000000000000 // indirect
- github.com/coreos/etcd v3.3.22+incompatible
- github.com/coreos/go-semver v0.3.0 // indirect
- github.com/coreos/go-systemd v0.0.0-20191104093116-d3cd4ed1dbcf // indirect
- github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f // indirect
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc
github.com/dchest/blake2b v1.0.0
- github.com/dgrijalva/jwt-go v3.2.0+incompatible // indirect
- github.com/dustin/go-humanize v1.0.0 // indirect
- github.com/emirpasic/gods v1.12.1-0.20200630092735-7e2349589531
github.com/fsnotify/fsnotify v1.4.10-0.20200417215612-7f4cf4dd2b52 // indirect
- github.com/go-errors/errors v1.0.1
- github.com/go-openapi/strfmt v0.19.5 // indirect
- github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e // indirect
- github.com/golang/protobuf v1.4.3
+ github.com/golang/protobuf v1.4.3 // indirect
github.com/golang/snappy v0.0.2
- github.com/google/btree v1.0.0 // indirect
github.com/gorilla/websocket v1.4.3-0.20200912193213-c3dd95aea977
- github.com/grpc-ecosystem/go-grpc-middleware v1.0.0
- github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0
- github.com/grpc-ecosystem/grpc-gateway v1.14.3
- github.com/jackpal/gateway v1.0.5
- github.com/jackpal/go-nat-pmp v0.0.0-20170405195558-28a68d0c24ad
- github.com/jedib0t/go-pretty v4.3.0+incompatible
github.com/jessevdk/go-flags v1.4.1-0.20200711081900-c17162fe8fd7
- github.com/jonboulle/clockwork v0.1.0 // indirect
github.com/json-iterator/go v1.1.11-0.20200806011408-6821bec9fa5c
- github.com/juju/clock v0.0.0-20190205081909-9c5c9712527c // indirect
- github.com/juju/errors v0.0.0-20190806202954-0232dcc7464d // indirect
- github.com/juju/loggo v0.0.0-20190526231331-6e530bcce5d8 // indirect
- github.com/juju/retry v0.0.0-20180821225755-9058e192b216 // indirect
- github.com/juju/testing v0.0.0-20190723135506-ce30eb24acd2 // indirect
- github.com/juju/utils v0.0.0-20180820210520-bf9cc5bdd62d // indirect
- github.com/juju/version v0.0.0-20180108022336-b64dbd566305 // indirect
github.com/kkdai/bstream v1.0.0
- github.com/lightninglabs/protobuf-hex-display v1.3.3-0.20191212020323-b444784ce75d
- github.com/ltcsuite/ltcd v0.0.0-20190101042124-f37f8bf35796
- github.com/mattn/go-runewidth v0.0.9 // indirect
- github.com/miekg/dns v0.0.0-20171125082028-79bfde677fa8
+ github.com/kr/pretty v0.1.0 // indirect
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
github.com/modern-go/reflect2 v1.0.1 // indirect
github.com/nxadm/tail v1.4.6-0.20201001195649-edf6bc2dfc36 // indirect
github.com/onsi/ginkgo v1.14.3-0.20201013214636-dfe369837f25
github.com/onsi/gomega v1.10.3
- github.com/prometheus/client_golang v0.9.3
github.com/sethgrid/pester v1.1.1-0.20200617174401-d2ad9ec9a8b6
- github.com/soheilhy/cmux v0.1.4 // indirect
github.com/stretchr/testify v1.6.1
- github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5 // indirect
- github.com/tv42/zbase32 v0.0.0-20160707012821-501572607d02
- github.com/urfave/cli v1.18.0
- github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2 // indirect
- go.etcd.io/bbolt v1.3.6-0.20200807205753-f6be82302843
- go.uber.org/zap v1.14.1 // indirect
golang.org/x/crypto v0.0.0-20201016220609-9e8e0b390897
- golang.org/x/net v0.0.0-20201029221708-28c70e62bb1d
- golang.org/x/sys v0.0.0-20201029080932-201ba4db2418
+ golang.org/x/net v0.0.0-20201029221708-28c70e62bb1d // indirect
+ golang.org/x/sys v0.15.0 // indirect
golang.org/x/text v0.3.4 // indirect
- golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 // indirect
- google.golang.org/genproto v0.0.0-20201021134325-0d71844de594 // indirect
- google.golang.org/grpc v1.34.0-dev.0.20201021230544-4e8458e5c638
- gopkg.in/errgo.v1 v1.0.1 // indirect
- gopkg.in/macaroon-bakery.v2 v2.0.1
- gopkg.in/macaroon.v2 v2.0.0
- gopkg.in/mgo.v2 v2.0.0-20190816093944-a6b53ec6cb22 // indirect
+ google.golang.org/protobuf v1.24.0 // indirect
+ gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 // indirect
gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776 // indirect
- sigs.k8s.io/yaml v1.1.0 // indirect
)
diff --git a/go.sum b/go.sum
index ded99c1a..22156792 100644
--- a/go.sum
+++ b/go.sum
@@ -1,89 +1,28 @@
-github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ=
-github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
-github.com/NebulousLabs/fastrand v0.0.0-20181203155948-6fb6489aac4e h1:n+DcnTNkQnHlwpsrHoQtkrJIO7CBx029fw6oR4vIob4=
-github.com/NebulousLabs/fastrand v0.0.0-20181203155948-6fb6489aac4e/go.mod h1:Bdzq+51GR4/0DIhaICZEOm+OHvXGwwB2trKZ8B4Y6eQ=
-github.com/NebulousLabs/go-upnp v0.0.0-20180202185039-29b680b06c82 h1:MG93+PZYs9PyEsj/n5/haQu2gK0h4tUtSy9ejtMwWa0=
-github.com/NebulousLabs/go-upnp v0.0.0-20180202185039-29b680b06c82/go.mod h1:GbuBk21JqF+driLX3XtJYNZjGa45YDoa9IqCTzNSfEc=
-github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU=
-github.com/Yawning/aez v0.0.0-20180114000226-4dad034d9db2 h1:2be4ykKKov3M1yISM2E8gnGXZ/N2SsPawfnGiXxaYEU=
-github.com/Yawning/aez v0.0.0-20180114000226-4dad034d9db2/go.mod h1:9pIqrY6SXNL8vjRQE5Hd/OL5GyK/9MrGUWs87z/eFfk=
-github.com/Yawning/bsaes v0.0.0-20180720073208-c0276d75487e h1:n88VxLC80RPVHbFG/kq7ItMizCVRPCyLj63UMqxLkOw=
-github.com/Yawning/bsaes v0.0.0-20180720073208-c0276d75487e/go.mod h1:3JAJz+vEO82SkYEkAa2lRPkQC7lslUY24HX3929i2Ec=
github.com/aead/chacha20 v0.0.0-20180709150244-8b13a72661da h1:KjTM2ks9d14ZYCvmHS9iAKVt9AyzRSqNU1qabPih5BY=
github.com/aead/chacha20 v0.0.0-20180709150244-8b13a72661da/go.mod h1:eHEWzANqSiWQsof+nXEI9bUVUyV6F53Fp89EuCh2EAA=
github.com/aead/siphash v1.0.1 h1:FwHfE/T45KPKYuuSAKyyvE+oPWcaQ+CUmFW0bPlM+kg=
github.com/aead/siphash v1.0.1/go.mod h1:Nywa3cDsYNNK3gaciGTWPwHt0wlpNV15vwmswBAUSII=
-github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
-github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
-github.com/antihax/optional v0.0.0-20180407024304-ca021399b1a6/go.mod h1:V8iCPQYkqmusNa815XgQio277wI47sdRh1dUOLdyC6Q=
github.com/arl/statsviz v0.2.2-0.20201115121518-5ea9f0cf1bd1 h1:k6L9CoSCgZjUXhMhJgmaMx2WhW54cpBKHBoa6tmDcKg=
github.com/arl/statsviz v0.2.2-0.20201115121518-5ea9f0cf1bd1/go.mod h1:Dg/DhcWPSzBVk70gVbZWcymzHDkYRhVpeScx5l+Zj7o=
-github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a h1:idn718Q4B6AGu/h5Sxe66HYVdqdGu2l9Iebqhi/AEoA=
-github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY=
-github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
-github.com/beorn7/perks v1.0.0 h1:HWo1m869IqiPhD389kmkxeTalrjNbbJTC8LXupb+sl0=
-github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8=
-github.com/btcsuite/btclog v0.0.0-20170628155309-84c8d2346e9f/go.mod h1:TdznJufoqS23FtqVCzL0ZqgP5MqXbb4fg/WgDys70nA=
-github.com/btcsuite/go-socks v0.0.0-20170105172521-4720035b7bfd/go.mod h1:HHNXQzUsZCxOoE+CPiyCTO6x34Zs86zZUiwtpXoGdtg=
-github.com/btcsuite/golangcrypto v0.0.0-20150304025918-53f62d9b43e8/go.mod h1:tYvUd8KLhm/oXvUeSEs2VlLghFjQt9+ZaF9ghH0JNjc=
-github.com/btcsuite/goleveldb v0.0.0-20160330041536-7834afc9e8cd/go.mod h1:F+uVaaLLH7j4eDXPRvw78tMflu7Ie2bzYOH4Y8rRKBY=
-github.com/btcsuite/snappy-go v0.0.0-20151229074030-0bdef8d06723/go.mod h1:8woku9dyThutzjeg+3xrA5iCpBRH8XEEg3lh6TiUghc=
-github.com/btcsuite/websocket v0.0.0-20150119174127-31079b680792/go.mod h1:ghJtEyQwv5/p4Mg4C0fgbePVuGr935/5ddU9Z3TmDRY=
github.com/btcsuite/winsvc v1.0.0 h1:J9B4L7e3oqhXOcm+2IuNApwzQec85lE+QaikUcCs+dk=
github.com/btcsuite/winsvc v1.0.0/go.mod h1:jsenWakMcC0zFBFurPLEAyrnc/teJEM1O46fmI40EZs=
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
-github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc=
github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc=
-github.com/coreos/etcd v3.3.22+incompatible h1:AnRMUyVdVvh1k7lHe61YEd227+CLoNogQuAypztGSK4=
-github.com/coreos/etcd v3.3.22+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
-github.com/coreos/go-semver v0.3.0 h1:wkHLiw0WNATZnSG7epLsujiMCgPAc9xhjJ4tgnAxmfM=
-github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk=
-github.com/coreos/go-systemd v0.0.0-20191104093116-d3cd4ed1dbcf h1:iW4rZ826su+pqaw19uhpSCzhj44qo35pNgKFGqzDKkU=
-github.com/coreos/go-systemd v0.0.0-20191104093116-d3cd4ed1dbcf/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
-github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f h1:lBNOc5arjvs8E5mO2tbpBpLoyyu8B6e44T7hJy6potg=
-github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA=
-github.com/davecgh/go-spew v0.0.0-20171005155431-ecdeabc65495/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM=
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/dchest/blake2b v1.0.0 h1:KK9LimVmE0MjRl9095XJmKqZ+iLxWATvlcpVFRtaw6s=
github.com/dchest/blake2b v1.0.0/go.mod h1:U034kXgbJpCle2wSk5ybGIVhOSHCVLMDqOzcPEA0F7s=
-github.com/dgrijalva/jwt-go v3.2.0+incompatible h1:7qlOGliEKZXTDg6OTjfoBKDXWrumCAMpl/TFQ4/5kLM=
-github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ=
-github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no=
-github.com/dustin/go-humanize v1.0.0 h1:VSnTsYCnlFHaM2/igO1h6X3HA71jcobQuxemgkq4zYo=
-github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk=
-github.com/emirpasic/gods v1.12.1-0.20200630092735-7e2349589531 h1:gNOxjQ2UtCFsNdUvfF8fcifUheqb1z3tcDNso+QMDuk=
-github.com/emirpasic/gods v1.12.1-0.20200630092735-7e2349589531/go.mod h1:YfzfFFoVP/catgzJb4IKIqXjX78Ha8FMSDh3ymbK86o=
github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98=
github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
-github.com/frankban/quicktest v1.2.2 h1:xfmOhhoH5fGPgbEAlhLpJH9p0z/0Qizio9osmvn9IUY=
-github.com/frankban/quicktest v1.2.2/go.mod h1:Qh/WofXFeiAFII1aEBu529AtJo6Zg2VHscnEsbBnJ20=
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ=
github.com/fsnotify/fsnotify v1.4.10-0.20200417215612-7f4cf4dd2b52 h1:0NmERxogGTU8hgzOhRKNoKivtBZkDW29GeuJtK9e0sc=
github.com/fsnotify/fsnotify v1.4.10-0.20200417215612-7f4cf4dd2b52/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ=
-github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
-github.com/go-errors/errors v1.0.1 h1:LUHzmkK3GUKUrL/1gfBUxAHzcev3apQlezX/+O7ma6w=
-github.com/go-errors/errors v1.0.1/go.mod h1:f4zRHt4oKfwPJE5k8C9vpYG+aDHdBFUsgrm6/TyX73Q=
-github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
-github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE=
-github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk=
-github.com/go-openapi/errors v0.19.2 h1:a2kIyV3w+OS3S97zxUndRVD46+FhGOUBDFY7nmu4CsY=
-github.com/go-openapi/errors v0.19.2/go.mod h1:qX0BLWsyaKfvhluLejVpVNwNRdXZhEbTA4kxxpKBC94=
-github.com/go-openapi/strfmt v0.19.5 h1:0utjKrw+BAh8s57XE9Xz8DUBsVvPmRUB6styvl9wWIM=
-github.com/go-openapi/strfmt v0.19.5/go.mod h1:eftuHTlB/dI8Uq8JJOyRlieZf+WkkxUuk0dgdHXr2Qk=
-github.com/go-stack/stack v1.8.0 h1:5SgMzNM5HxrEjV0ww2lTmX6E2Izsfxas4+YHWRs3Lsk=
-github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
-github.com/gogo/protobuf v1.1.1 h1:72R+M5VuhED/KujmZVcIquuo8mBgX4oVda//DQb3PXo=
-github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
-github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e h1:1r7pUrabqp18hOBcwBwiTsbnFeTZHV9eER/QT5JVZxY=
-github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
-github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw=
github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8=
@@ -97,92 +36,35 @@ github.com/golang/protobuf v1.4.3 h1:JjCZWpVbqXDqFVmTfYWEVTMIYrL/NPdPSCHPJ0T/raM
github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
github.com/golang/snappy v0.0.2 h1:aeE13tS0IiQgFjYdoL8qN3K1N2bXXtI6Vi51/y7BpMw=
github.com/golang/snappy v0.0.2/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
-github.com/google/btree v1.0.0 h1:0udJVsspx3VBr5FwtLhQQtuAsVc79tTq0ocGIPAU6qo=
-github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
-github.com/google/go-cmp v0.2.1-0.20190312032427-6f77996f0c42/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
github.com/google/go-cmp v0.4.0 h1:xsAVV57WRhGj6kEIi8ReJzQlHHqcBYCElAvkovg3B/4=
github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
-github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI=
-github.com/google/uuid v1.1.1 h1:Gkbcsh/GbpXz7lPftLA3P6TYMwjCLYm83jiFQZF/3gY=
-github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/gorilla/websocket v1.4.3-0.20200912193213-c3dd95aea977 h1:a5PtLMWJYzuNNFNzGNl0oHZUsMJbE7qxvjSLbA3boiY=
github.com/gorilla/websocket v1.4.3-0.20200912193213-c3dd95aea977/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
-github.com/grpc-ecosystem/go-grpc-middleware v1.0.0 h1:Iju5GlWwrvL6UBg4zJJt3btmonfrMlCDdsejg4CZE7c=
-github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs=
-github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 h1:Ovs26xHkKqVztRpIrF/92BcuyuQ/YW4NSIpoGtfXNho=
-github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk=
-github.com/grpc-ecosystem/grpc-gateway v1.14.3 h1:OCJlWkOUoTnl0neNGlf4fUm3TmbEtguw7vR+nGtnDjY=
-github.com/grpc-ecosystem/grpc-gateway v1.14.3/go.mod h1:6CwZWGDSPRJidgKAtJVvND6soZe6fT7iteq8wDPdhb0=
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
-github.com/jackpal/gateway v1.0.5 h1:qzXWUJfuMdlLMtt0a3Dgt+xkWQiA5itDEITVJtuSwMc=
-github.com/jackpal/gateway v1.0.5/go.mod h1:lTpwd4ACLXmpyiCTRtfiNyVnUmqT9RivzCDQetPfnjA=
-github.com/jackpal/go-nat-pmp v0.0.0-20170405195558-28a68d0c24ad h1:heFfj7z0pGsNCekUlsFhO2jstxO4b5iQ665LjwM5mDc=
-github.com/jackpal/go-nat-pmp v0.0.0-20170405195558-28a68d0c24ad/go.mod h1:QPH045xvCAeXUZOxsnwmrtiCoxIr9eob+4orBN1SBKc=
-github.com/jedib0t/go-pretty v4.3.0+incompatible h1:CGs8AVhEKg/n9YbUenWmNStRW2PHJzaeDodcfvRAbIo=
-github.com/jedib0t/go-pretty v4.3.0+incompatible/go.mod h1:XemHduiw8R651AF9Pt4FwCTKeG3oo7hrHJAoznj9nag=
-github.com/jessevdk/go-flags v0.0.0-20141203071132-1679536dcc89/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI=
github.com/jessevdk/go-flags v1.4.1-0.20200711081900-c17162fe8fd7 h1:Ug59miTxVKVg5Oi2S5uHlKOIV5jBx4Hb2u0jIxxDaSs=
github.com/jessevdk/go-flags v1.4.1-0.20200711081900-c17162fe8fd7/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI=
-github.com/jonboulle/clockwork v0.1.0 h1:VKV+ZcuP6l3yW9doeqz6ziZGgcynBVQO+obU0+0hcPo=
-github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo=
-github.com/jrick/logrotate v1.0.0/go.mod h1:LNinyqDIJnpAur+b8yyulnQw/wDuN1+BYKlTRt3OuAQ=
github.com/json-iterator/go v1.1.11-0.20200806011408-6821bec9fa5c h1:pyHLN175+U/9YIGgS34PCGLWQcw2tGiDNpnXaQv9U2Y=
github.com/json-iterator/go v1.1.11-0.20200806011408-6821bec9fa5c/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
-github.com/juju/clock v0.0.0-20190205081909-9c5c9712527c h1:3UvYABOQRhJAApj9MdCN+Ydv841ETSoy6xLzdmmr/9A=
-github.com/juju/clock v0.0.0-20190205081909-9c5c9712527c/go.mod h1:nD0vlnrUjcjJhqN5WuCWZyzfd5AHZAC9/ajvbSx69xA=
-github.com/juju/errors v0.0.0-20190806202954-0232dcc7464d h1:hJXjZMxj0SWlMoQkzeZDLi2cmeiWKa7y1B8Rg+qaoEc=
-github.com/juju/errors v0.0.0-20190806202954-0232dcc7464d/go.mod h1:W54LbzXuIE0boCoNJfwqpmkKJ1O4TCTZMetAt6jGk7Q=
-github.com/juju/loggo v0.0.0-20190526231331-6e530bcce5d8 h1:UUHMLvzt/31azWTN/ifGWef4WUqvXk0iRqdhdy/2uzI=
-github.com/juju/loggo v0.0.0-20190526231331-6e530bcce5d8/go.mod h1:vgyd7OREkbtVEN/8IXZe5Ooef3LQePvuBm9UWj6ZL8U=
-github.com/juju/retry v0.0.0-20180821225755-9058e192b216 h1:/eQL7EJQKFHByJe3DeE8Z36yqManj9UY5zppDoQi4FU=
-github.com/juju/retry v0.0.0-20180821225755-9058e192b216/go.mod h1:OohPQGsr4pnxwD5YljhQ+TZnuVRYpa5irjugL1Yuif4=
-github.com/juju/testing v0.0.0-20190723135506-ce30eb24acd2 h1:Pp8RxiF4rSoXP9SED26WCfNB28/dwTDpPXS8XMJR8rc=
-github.com/juju/testing v0.0.0-20190723135506-ce30eb24acd2/go.mod h1:63prj8cnj0tU0S9OHjGJn+b1h0ZghCndfnbQolrYTwA=
-github.com/juju/utils v0.0.0-20180820210520-bf9cc5bdd62d h1:irPlN9z5VCe6BTsqVsxheCZH99OFSmqSVyTigW4mEoY=
-github.com/juju/utils v0.0.0-20180820210520-bf9cc5bdd62d/go.mod h1:6/KLg8Wz/y2KVGWEpkK9vMNGkOnu4k/cqs8Z1fKjTOk=
-github.com/juju/version v0.0.0-20180108022336-b64dbd566305 h1:lQxPJ1URr2fjsKnJRt/BxiIxjLt9IKGvS+0injMHbag=
-github.com/juju/version v0.0.0-20180108022336-b64dbd566305/go.mod h1:kE8gK5X0CImdr7qpSKl3xB2PmpySSmfj7zVbkZFs81U=
-github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w=
-github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
-github.com/kkdai/bstream v0.0.0-20161212061736-f391b8402d23/go.mod h1:J+Gs4SYgM6CZQHDETBtE9HaSEkGmuNXF86RwHhHUvq4=
github.com/kkdai/bstream v1.0.0 h1:Se5gHwgp2VT2uHfDrkbbgbgEvV9cimLELwrPJctSjg8=
github.com/kkdai/bstream v1.0.0/go.mod h1:FDnDOHt5Yx4p3FaHcioFT0QjDOtgUpvjeZqAs+NVZZA=
-github.com/konsorten/go-windows-terminal-sequences v1.0.1 h1:mweAR1A6xJ3oS2pRaGiHgQ4OO8tzTaLawm8vnODuwDk=
-github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
-github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc=
github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI=
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE=
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
-github.com/lightninglabs/protobuf-hex-display v1.3.3-0.20191212020323-b444784ce75d h1:QWD/5MPnaZfUVP7P8wLa4M8Td2DI7XXHXt2vhVtUgGI=
-github.com/lightninglabs/protobuf-hex-display v1.3.3-0.20191212020323-b444784ce75d/go.mod h1:KDb67YMzoh4eudnzClmvs2FbiLG9vxISmLApUkCa4uI=
-github.com/ltcsuite/ltcd v0.0.0-20190101042124-f37f8bf35796 h1:sjOGyegMIhvgfq5oaue6Td+hxZuf3tDC8lAPrFldqFw=
-github.com/ltcsuite/ltcd v0.0.0-20190101042124-f37f8bf35796/go.mod h1:3p7ZTf9V1sNPI5H8P3NkTFF4LuwMdPl2DodF60qAKqY=
-github.com/ltcsuite/ltcutil v0.0.0-20181217130922-17f3b04680b6/go.mod h1:8Vg/LTOO0KYa/vlHWJ6XZAevPQThGH5sufO0Hrou/lA=
-github.com/mattn/go-runewidth v0.0.9 h1:Lm995f3rfxdpd6TSmuVCHVb/QhupuXlYr8sCI/QdE+0=
-github.com/mattn/go-runewidth v0.0.9/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI=
-github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU=
-github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
-github.com/miekg/dns v0.0.0-20171125082028-79bfde677fa8 h1:PRMAcldsl4mXKJeRNB/KVNz6TlbS6hk2Rs42PqgU3Ws=
-github.com/miekg/dns v0.0.0-20171125082028-79bfde677fa8/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg=
-github.com/mitchellh/mapstructure v1.1.2 h1:fmNYVwqnSfB9mZU6OS2O6GsXM+wcskZDuKQzvN1EDeE=
-github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg=
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
github.com/modern-go/reflect2 v1.0.1 h1:9f412s+6RmYXLWZSEzVVgPGK7C2PphHj5RJrvfx9AWI=
github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
-github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A=
github.com/nxadm/tail v1.4.6-0.20201001195649-edf6bc2dfc36 h1:PRRpSsmsTtwhP1qI6upsrOzE5M8ic156VLo+rCfVUJo=
github.com/nxadm/tail v1.4.6-0.20201001195649-edf6bc2dfc36/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A=
-github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U=
github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk=
github.com/onsi/ginkgo v1.14.3-0.20201013214636-dfe369837f25 h1:fJfvJUUCt/J+eEtCQ0IEwk21eis9Bvts7kQHUc6dS0g=
@@ -191,88 +73,27 @@ github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7J
github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo=
github.com/onsi/gomega v1.10.3 h1:gph6h/qe9GSUw1NhH1gp+qb+h8rXD8Cy60Z32Qw3ELA=
github.com/onsi/gomega v1.10.3/go.mod h1:V9xEwhxec5O8UDM77eCW8vLymOMltsqPVYWrpDsH8xc=
-github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
-github.com/pkg/errors v0.8.1 h1:iURUrRGxPUNPdy5/HRSm+Yj6okJ6UtLINN0Q9M4+h3I=
-github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
-github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
-github.com/prometheus/client_golang v0.9.3 h1:9iH4JKXLzFbOAdtqv/a+j8aewx2Y8lAjAydhbaScPF8=
-github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso=
-github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
-github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
-github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4 h1:gQz4mCbXsO+nc9n1hCxHcGA3Zx3Eo+UHZoInFGUIXNM=
github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
-github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro=
-github.com/prometheus/common v0.4.0 h1:7etb9YClo3a6HjLzfl6rIQaU+FDfi0VSX39io3aQ+DM=
-github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
-github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
-github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084 h1:sofwID9zm4tzrgykg80hfFph1mryUeLRsUfoocVVmRY=
-github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
-github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU=
-github.com/rogpeppe/fastuuid v1.2.0 h1:Ppwyp6VYCF1nvBTXL3trRso7mXMlRrw9ooo375wvi2s=
-github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ=
-github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
github.com/sethgrid/pester v1.1.1-0.20200617174401-d2ad9ec9a8b6 h1:PZ6YTNMEyy6GMAL+xsBHP3Ohjq4DIhNviW4evP+BzB0=
github.com/sethgrid/pester v1.1.1-0.20200617174401-d2ad9ec9a8b6/go.mod h1:hEUINb4RqvDxtoCaU0BNT/HV4ig5kfgOasrf1xcvr0A=
-github.com/sirupsen/logrus v1.2.0 h1:juTguoYk5qI21pwyTXY3B3Y5cOTH3ZUyZCg1v/mihuo=
-github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
-github.com/soheilhy/cmux v0.1.4 h1:0HKaf1o97UwFjHH9o5XsHUOF+tqmdA7KEzXLpiyaw0E=
-github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM=
-github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
-github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
-github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE=
-github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
-github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
github.com/stretchr/testify v1.6.1 h1:hDPOHmpOpP40lSULcqw7IrRb/u7w6RpDC9399XyoNd0=
github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
-github.com/tidwall/pretty v1.0.0 h1:HsD+QiTn7sK6flMKIvNmpqz1qrpP3Ps6jOKIKMooyg4=
-github.com/tidwall/pretty v1.0.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk=
-github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5 h1:LnC5Kc/wtumK+WB441p7ynQJzVuNRJiqddSIE3IlSEQ=
-github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U=
-github.com/tv42/zbase32 v0.0.0-20160707012821-501572607d02 h1:tcJ6OjwOMvExLlzrAVZute09ocAGa7KqOON60++Gz4E=
-github.com/tv42/zbase32 v0.0.0-20160707012821-501572607d02/go.mod h1:tHlrkM198S068ZqfrO6S8HsoJq2bF3ETfTL+kt4tInY=
-github.com/urfave/cli v1.18.0 h1:m9MfmZWX7bwr9kUcs/Asr95j0IVXzGNNc+/5ku2m26Q=
-github.com/urfave/cli v1.18.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA=
-github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2 h1:eY9dn8+vbi4tKz5Qo6v2eYzo7kUS51QINcR5jNpbZS8=
-github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU=
-go.etcd.io/bbolt v1.3.5 h1:XAzx9gjCb0Rxj7EoqcClPD1d5ZBxZJk0jbuoPHenBt0=
-go.etcd.io/bbolt v1.3.5/go.mod h1:G5EMThwa9y8QZGBClrRx5EY+Yw9kAhnjy3bSjsnlVTQ=
-go.etcd.io/bbolt v1.3.6-0.20200807205753-f6be82302843 h1:g0YWcnTxZ70pMN+rjjHC2/ba4T+R6okysNm3KdSt7gA=
-go.etcd.io/bbolt v1.3.6-0.20200807205753-f6be82302843/go.mod h1:G5EMThwa9y8QZGBClrRx5EY+Yw9kAhnjy3bSjsnlVTQ=
-go.mongodb.org/mongo-driver v1.0.3 h1:GKoji1ld3tw2aC+GX1wbr/J2fX13yNacEYoJ8Nhr0yU=
-go.mongodb.org/mongo-driver v1.0.3/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM=
-go.uber.org/atomic v1.6.0 h1:Ezj3JGmsOnG1MoRWQkPBsKLe9DwWD9QeXzTRzzldNVk=
-go.uber.org/atomic v1.6.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ=
-go.uber.org/multierr v1.5.0 h1:KCa4XfM8CWFCpxXRGok+Q0SS/0XBhMDbHHGABQLvD2A=
-go.uber.org/multierr v1.5.0/go.mod h1:FeouvMocqHpRaaGuG9EjoKcStLC43Zu/fmqdUMPcKYU=
-go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee h1:0mgffUl7nfd+FpvXMVz4IDEaUSmT1ysygQC7qYo7sG4=
-go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee/go.mod h1:vJERXedbb3MVM5f9Ejo0C68/HhF8uaILCdgjnY+goOA=
-go.uber.org/zap v1.14.1 h1:nYDKopTbvAPq/NrUVZwT15y2lpROBiLLyoRTbXOYWOo=
-go.uber.org/zap v1.14.1/go.mod h1:Mb2vm2krFEG5DV0W9qcHBYFtp/Wku1cvYaqPsS/WYfc=
-golang.org/x/crypto v0.0.0-20170930174604-9419663f5a44/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
-golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
-golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.0.0-20201016220609-9e8e0b390897 h1:pLI5jrR7OSLijeIDcmRxNmw2api+jEfxLoykJVice/E=
golang.org/x/crypto v0.0.0-20201016220609-9e8e0b390897/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
-golang.org/x/lint v0.0.0-20190930215403-16217165b5de h1:5hukYrvBGR8/eNkX5mdUezrA6JiaEZDtJb9Ei+1LlBs=
-golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
-golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc=
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
-golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
-golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
-golang.org/x/net v0.0.0-20191002035440-2ec189313ef0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
golang.org/x/net v0.0.0-20201006153459-a7d1128ccaa0/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
golang.org/x/net v0.0.0-20201029221708-28c70e62bb1d h1:dOiJ2n2cMwGLce/74I/QHMbnpk5GfY7InR8rczoMqRM=
@@ -280,49 +101,35 @@ golang.org/x/net v0.0.0-20201029221708-28c70e62bb1d/go.mod h1:sp8m0HH+o8qH0wwXwY
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
-golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
-golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200519105757-fe76b779f299/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20201029080932-201ba4db2418 h1:HlFl4V6pEMziuLXyRkm5BIYq1y1GAbb02pRlWvI54OM=
golang.org/x/sys v0.0.0-20201029080932-201ba4db2418/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.15.0 h1:h48lPFYpsTvQJZF4EKyI4aLHaev3CxivZmv7yZig9pc=
+golang.org/x/sys v0.15.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.4 h1:0YWbFKbhXG/wIiuHDSKpS0Iy7FSA+u45VtBMfQcFTTc=
golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
-golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2 h1:+DCIGbF/swA92ohVg0//6X2IVY3KZs6p9mix0ziNYJM=
-golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
-golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
-golang.org/x/tools v0.0.0-20191029041327-9cc4af7d6b2c/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
-golang.org/x/tools v0.0.0-20191029190741-b9c20aec41a5 h1:hKsoRgsbwY1NafxrwTs+k64bikrLBkAgPir1TNCj3Zs=
-golang.org/x/tools v0.0.0-20191029190741-b9c20aec41a5/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
-golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE=
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
-google.golang.org/genproto v0.0.0-20190927181202-20e1ac93f88c/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8=
google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo=
-google.golang.org/genproto v0.0.0-20201021134325-0d71844de594 h1:JZWUHUjZJojCHxs9ZZLFsnRGKVBXBoOHGxeTSt6OE+Q=
-google.golang.org/genproto v0.0.0-20201021134325-0d71844de594/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
-google.golang.org/grpc v1.29.1 h1:EC2SB8S04d2r73uptxphDSUG+kTKVgjRPF+N3xpxRB4=
google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk=
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
@@ -334,25 +141,12 @@ google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2
google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
google.golang.org/protobuf v1.24.0 h1:UhZDfRO8JRQru4/+LlLE0BRKGF8L+PICnvYZmx/fEGA=
google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4=
-gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY=
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
-gopkg.in/errgo.v1 v1.0.1 h1:oQFRXzZ7CkBGdm1XZm/EbQYaYNNEElNBOd09M6cqNso=
-gopkg.in/errgo.v1 v1.0.1/go.mod h1:3NjfXwocQRYAPTq4/fzX+CwUhPRcR/azYRhj8G+LqMo=
-gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI=
gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
-gopkg.in/macaroon-bakery.v2 v2.0.1 h1:0N1TlEdfLP4HXNCg7MQUMp5XwvOoxk+oe9Owr2cpvsc=
-gopkg.in/macaroon-bakery.v2 v2.0.1/go.mod h1:B4/T17l+ZWGwxFSZQmlBwp25x+og7OkhETfr3S9MbIA=
-gopkg.in/macaroon.v2 v2.0.0 h1:LVWycAfeJBUjCIqfR9gqlo7I8vmiXRr51YEOZ1suop8=
-gopkg.in/macaroon.v2 v2.0.0/go.mod h1:+I6LnTMkm/uV5ew/0nsulNjL16SK4+C8yDmRUzHR17I=
-gopkg.in/mgo.v2 v2.0.0-20190816093944-a6b53ec6cb22 h1:VpOs+IwYnYBaFnrNAeB8UUWtL3vEUnzSCL1nVjPhqrw=
-gopkg.in/mgo.v2 v2.0.0-20190816093944-a6b53ec6cb22/go.mod h1:yeKp02qBN3iKW1OzL3MGk2IdtZzaj7SFntXj72NppTA=
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ=
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw=
-gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
-gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
-gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.3.0 h1:clyUAQHOM3G0M3f5vQj7LuJrETvjVot3Z5el9nffUtU=
gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
@@ -361,7 +155,3 @@ gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776 h1:tQIYjPdBoyREyB9XMu+nnTclp
gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
-honnef.co/go/tools v0.0.1-2019.2.3 h1:3JgtbtFHMiCmsznwGVTUWbgGov+pVqnlf1dEJTNAXeM=
-honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg=
-sigs.k8s.io/yaml v1.1.0 h1:4A07+ZFc2wgJwo8YNlQpr1rVlgUDlxXHhPJciaPY5gs=
-sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o=
diff --git a/lnd/Dockerfile b/lnd/Dockerfile
deleted file mode 100644
index d66220a3..00000000
--- a/lnd/Dockerfile
+++ /dev/null
@@ -1,43 +0,0 @@
-FROM golang:1.14.5-alpine as builder
-
-# Force Go to use the cgo based DNS resolver. This is required to ensure DNS
-# queries required to connect to linked containers succeed.
-ENV GODEBUG netdns=cgo
-
-# Pass a tag, branch or a commit using build-arg. This allows a docker
-# image to be built from a specified Git state. The default image
-# will use the Git tip of master by default.
-ARG checkout="master"
-
-# Install dependencies and build the binaries.
-RUN apk add --no-cache --update alpine-sdk \
- git \
- make \
- gcc \
-&& git clone https://github.com/lightningnetwork/lnd /go/src/github.com/lightningnetwork/lnd \
-&& cd /go/src/github.com/lightningnetwork/lnd \
-&& git checkout $checkout \
-&& make \
-&& make install tags="signrpc walletrpc chainrpc invoicesrpc"
-
-# Start a new, final image.
-FROM alpine as final
-
-# Define a root volume for data persistence.
-VOLUME /root/.lnd
-
-# Add bash, jq and ca-certs, for quality of life and SSL-related reasons.
-RUN apk --no-cache add \
- bash \
- jq \
- ca-certificates
-
-# Copy the binaries from the builder image.
-COPY --from=builder /go/bin/lncli /bin/
-COPY --from=builder /go/bin/lnd /bin/
-
-# Expose lnd ports (p2p, rpc).
-EXPOSE 9735 10009
-
-# Specify the start command and entrypoint as the lnd daemon.
-ENTRYPOINT ["lnd"]
diff --git a/lnd/LICENSE b/lnd/LICENSE
deleted file mode 100644
index 70f1f0df..00000000
--- a/lnd/LICENSE
+++ /dev/null
@@ -1,19 +0,0 @@
-Copyright (C) 2015-2018 Lightning Labs and The Lightning Network Developers
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in
-all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-THE SOFTWARE.
diff --git a/lnd/Makefile b/lnd/Makefile
deleted file mode 100644
index 22d7b9d8..00000000
--- a/lnd/Makefile
+++ /dev/null
@@ -1,343 +0,0 @@
-PKG := github.com/lightningnetwork/lnd
-ESCPKG := github.com\/lightningnetwork\/lnd
-MOBILE_PKG := $(PKG)/mobile
-
-BTCD_PKG := github.com/btcsuite/btcd
-GOVERALLS_PKG := github.com/mattn/goveralls
-LINT_PKG := github.com/golangci/golangci-lint/cmd/golangci-lint
-GOACC_PKG := github.com/ory/go-acc
-FALAFEL_PKG := github.com/lightninglabs/falafel
-GOIMPORTS_PKG := golang.org/x/tools/cmd/goimports
-GOFUZZ_BUILD_PKG := github.com/dvyukov/go-fuzz/go-fuzz-build
-GOFUZZ_PKG := github.com/dvyukov/go-fuzz/go-fuzz
-
-GO_BIN := ${GOPATH}/bin
-BTCD_BIN := $(GO_BIN)/btcd
-GOMOBILE_BIN := GO111MODULE=off $(GO_BIN)/gomobile
-GOVERALLS_BIN := $(GO_BIN)/goveralls
-LINT_BIN := $(GO_BIN)/golangci-lint
-GOACC_BIN := $(GO_BIN)/go-acc
-GOFUZZ_BUILD_BIN := $(GO_BIN)/go-fuzz-build
-GOFUZZ_BIN := $(GO_BIN)/go-fuzz
-
-BTCD_DIR :=${GOPATH}/src/$(BTCD_PKG)
-MOBILE_BUILD_DIR :=${GOPATH}/src/$(MOBILE_PKG)/build
-IOS_BUILD_DIR := $(MOBILE_BUILD_DIR)/ios
-IOS_BUILD := $(IOS_BUILD_DIR)/Lndmobile.framework
-ANDROID_BUILD_DIR := $(MOBILE_BUILD_DIR)/android
-ANDROID_BUILD := $(ANDROID_BUILD_DIR)/Lndmobile.aar
-
-COMMIT := $(shell git describe --abbrev=40 --dirty)
-COMMIT_HASH := $(shell git rev-parse HEAD)
-
-BTCD_COMMIT := $(shell cat go.mod | \
- grep $(BTCD_PKG) | \
- tail -n1 | \
- awk -F " " '{ print $$2 }' | \
- awk -F "/" '{ print $$1 }')
-
-LINT_COMMIT := v1.18.0
-GOACC_COMMIT := ddc355013f90fea78d83d3a6c71f1d37ac07ecd5
-FALAFEL_COMMIT := v0.7.1
-GOFUZZ_COMMIT := 21309f307f61
-
-DEPGET := cd /tmp && GO111MODULE=on go get -v
-GOBUILD := GO111MODULE=on go build -v
-GOINSTALL := GO111MODULE=on go install -v
-GOTEST := GO111MODULE=on go test
-
-GOVERSION := $(shell go version | awk '{print $$3}')
-GOFILES_NOVENDOR = $(shell find . -type f -name '*.go' -not -path "./vendor/*")
-
-RM := rm -f
-CP := cp
-MAKE := make
-XARGS := xargs -L 1
-
-include make/testing_flags.mk
-include make/release_flags.mk
-include make/fuzz_flags.mk
-
-DEV_TAGS := $(if ${tags},$(DEV_TAGS) ${tags},$(DEV_TAGS))
-
-# We only return the part inside the double quote here to avoid escape issues
-# when calling the external release script. The second parameter can be used to
-# add additional ldflags if needed (currently only used for the release).
-make_ldflags = $(2) -X $(PKG)/build.Commit=$(COMMIT) \
- -X $(PKG)/build.CommitHash=$(COMMIT_HASH) \
- -X $(PKG)/build.GoVersion=$(GOVERSION) \
- -X $(PKG)/build.RawTags=$(shell echo $(1) | sed -e 's/ /,/g')
-
-LDFLAGS := -ldflags "$(call make_ldflags, ${tags}, -s -w)"
-DEV_LDFLAGS := -ldflags "$(call make_ldflags, $(DEV_TAGS))"
-ITEST_LDFLAGS := -ldflags "$(call make_ldflags, $(ITEST_TAGS))"
-
-# For the release, we want to remove the symbol table and debug information (-s)
-# and omit the DWARF symbol table (-w). Also we clear the build ID.
-RELEASE_LDFLAGS := $(call make_ldflags, $(RELEASE_TAGS), -s -w -buildid=)
-
-# Linting uses a lot of memory, so keep it under control by limiting the number
-# of workers if requested.
-ifneq ($(workers),)
-LINT_WORKERS = --concurrency=$(workers)
-endif
-
-LINT = $(LINT_BIN) run -v $(LINT_WORKERS)
-
-GREEN := "\\033[0;32m"
-NC := "\\033[0m"
-define print
- echo $(GREEN)$1$(NC)
-endef
-
-default: scratch
-
-all: scratch check install
-
-# ============
-# DEPENDENCIES
-# ============
-
-$(GOVERALLS_BIN):
- @$(call print, "Fetching goveralls.")
- go get -u $(GOVERALLS_PKG)
-
-$(LINT_BIN):
- @$(call print, "Fetching linter")
- $(DEPGET) $(LINT_PKG)@$(LINT_COMMIT)
-
-$(GOACC_BIN):
- @$(call print, "Fetching go-acc")
- $(DEPGET) $(GOACC_PKG)@$(GOACC_COMMIT)
-
-btcd:
- @$(call print, "Installing btcd.")
- $(DEPGET) $(BTCD_PKG)@$(BTCD_COMMIT)
-
-falafel:
- @$(call print, "Installing falafel.")
- $(DEPGET) $(FALAFEL_PKG)@$(FALAFEL_COMMIT)
-
-goimports:
- @$(call print, "Installing goimports.")
- $(DEPGET) $(GOIMPORTS_PKG)
-
-$(GOFUZZ_BIN):
- @$(call print, "Fetching go-fuzz")
- $(DEPGET) $(GOFUZZ_PKG)@$(GOFUZZ_COMMIT)
-
-$(GOFUZZ_BUILD_BIN):
- @$(call print, "Fetching go-fuzz-build")
- $(DEPGET) $(GOFUZZ_BUILD_PKG)@$(GOFUZZ_COMMIT)
-
-# ============
-# INSTALLATION
-# ============
-
-build:
- @$(call print, "Building debug lnd and lncli.")
- $(GOBUILD) -tags="$(DEV_TAGS)" -o lnd-debug $(DEV_LDFLAGS) $(PKG)/cmd/lnd
- $(GOBUILD) -tags="$(DEV_TAGS)" -o lncli-debug $(DEV_LDFLAGS) $(PKG)/cmd/lncli
-
-build-itest:
- @$(call print, "Building itest lnd and lncli.")
- $(GOBUILD) -tags="$(ITEST_TAGS)" -o lnd-itest $(ITEST_LDFLAGS) $(PKG)/cmd/lnd
- $(GOBUILD) -tags="$(ITEST_TAGS)" -o lncli-itest $(ITEST_LDFLAGS) $(PKG)/cmd/lncli
-
-build-itest-windows:
- @$(call print, "Building itest lnd and lncli.")
- $(GOBUILD) -tags="$(ITEST_TAGS)" -o lnd-itest.exe $(ITEST_LDFLAGS) $(PKG)/cmd/lnd
- $(GOBUILD) -tags="$(ITEST_TAGS)" -o lncli-itest.exe $(ITEST_LDFLAGS) $(PKG)/cmd/lncli
-
-install:
- @$(call print, "Installing lnd and lncli.")
- $(GOINSTALL) -tags="${tags}" $(LDFLAGS) $(PKG)/cmd/lnd
- $(GOINSTALL) -tags="${tags}" $(LDFLAGS) $(PKG)/cmd/lncli
-
-release:
- @$(call print, "Releasing lnd and lncli binaries.")
- $(VERSION_CHECK)
- ./scripts/release.sh build-release "$(VERSION_TAG)" "$(BUILD_SYSTEM)" "$(RELEASE_TAGS)" "$(RELEASE_LDFLAGS)"
-
-scratch: build
-
-
-# =======
-# TESTING
-# =======
-
-check: unit itest
-
-itest-only:
- @$(call print, "Running integration tests with ${backend} backend.")
- $(ITEST)
- lntest/itest/log_check_errors.sh
-
-itest: btcd build-itest itest-only
-
-itest-parallel: btcd
- @$(call print, "Building lnd binary")
- CGO_ENABLED=0 $(GOBUILD) -tags="$(ITEST_TAGS)" -o lntest/itest/lnd-itest $(ITEST_LDFLAGS) $(PKG)/cmd/lnd
-
- @$(call print, "Building itest binary for $(backend) backend")
- CGO_ENABLED=0 $(GOTEST) -v ./lntest/itest -tags="$(DEV_TAGS) $(RPC_TAGS) rpctest $(backend)" -logoutput -goroutinedump -c -o lntest/itest/itest.test
-
- @$(call print, "Running tests")
- rm -rf lntest/itest/*.log lntest/itest/.logs-*
- echo "$$(seq 0 $$(expr $(ITEST_PARALLELISM) - 1))" | xargs -P $(ITEST_PARALLELISM) -n 1 -I {} scripts/itest_part.sh {} $(NUM_ITEST_TRANCHES) $(TEST_FLAGS)
-
-itest-parallel-windows: btcd
- @$(call print, "Building lnd binary")
- CGO_ENABLED=0 $(GOBUILD) -tags="$(ITEST_TAGS)" -o lntest/itest/lnd-itest.exe $(ITEST_LDFLAGS) $(PKG)/cmd/lnd
-
- @$(call print, "Building itest binary for $(backend) backend")
- CGO_ENABLED=0 $(GOTEST) -v ./lntest/itest -tags="$(DEV_TAGS) $(RPC_TAGS) rpctest $(backend)" -logoutput -goroutinedump -c -o lntest/itest/itest.test.exe
-
- @$(call print, "Running tests")
- EXEC_SUFFIX=".exe" echo "$$(seq 0 $$(expr $(ITEST_PARALLELISM) - 1))" | xargs -P $(ITEST_PARALLELISM) -n 1 -I {} scripts/itest_part.sh {} $(NUM_ITEST_TRANCHES) $(TEST_FLAGS)
-
-itest-windows: btcd build-itest-windows itest-only
-
-unit: btcd
- @$(call print, "Running unit tests.")
- $(UNIT)
-
-unit-cover: $(GOACC_BIN)
- @$(call print, "Running unit coverage tests.")
- $(GOACC_BIN) $(COVER_PKG) -- -tags="$(DEV_TAGS) $(LOG_TAGS)"
-
-
-unit-race:
- @$(call print, "Running unit race tests.")
- env CGO_ENABLED=1 GORACE="history_size=7 halt_on_errors=1" $(UNIT_RACE)
-
-goveralls: $(GOVERALLS_BIN)
- @$(call print, "Sending coverage report.")
- $(GOVERALLS_BIN) -coverprofile=coverage.txt -service=travis-ci
-
-
-travis-race: btcd unit-race
-
-travis-cover: btcd unit-cover goveralls
-
-# =============
-# FLAKE HUNTING
-# =============
-
-flakehunter: build-itest
- @$(call print, "Flake hunting ${backend} integration tests.")
- while [ $$? -eq 0 ]; do $(ITEST); done
-
-flake-unit:
- @$(call print, "Flake hunting unit tests.")
- while [ $$? -eq 0 ]; do GOTRACEBACK=all $(UNIT) -count=1; done
-
-flakehunter-parallel:
- @$(call print, "Flake hunting ${backend} integration tests in parallel.")
- while [ $$? -eq 0 ]; do make itest-parallel tranches=1 parallel=${ITEST_PARALLELISM} icase='${icase}' backend='${backend}'; done
-
-# =============
-# FUZZING
-# =============
-fuzz-build: $(GOFUZZ_BUILD_BIN)
- @$(call print, "Creating fuzz harnesses for packages '$(FUZZPKG)'.")
- scripts/fuzz.sh build "$(FUZZPKG)"
-
-fuzz-run: $(GOFUZZ_BIN)
- @$(call print, "Fuzzing packages '$(FUZZPKG)'.")
- scripts/fuzz.sh run "$(FUZZPKG)" "$(FUZZ_TEST_RUN_TIME)" "$(FUZZ_TEST_TIMEOUT)" "$(FUZZ_NUM_PROCESSES)" "$(FUZZ_BASE_WORKDIR)"
-
-# =========
-# UTILITIES
-# =========
-
-fmt:
- @$(call print, "Formatting source.")
- gofmt -l -w -s $(GOFILES_NOVENDOR)
-
-lint: $(LINT_BIN)
- @$(call print, "Linting source.")
- $(LINT)
-
-list:
- @$(call print, "Listing commands.")
- @$(MAKE) -qp | \
- awk -F':' '/^[a-zA-Z0-9][^$$#\/\t=]*:([^=]|$$)/ {split($$1,A,/ /);for(i in A)print A[i]}' | \
- grep -v Makefile | \
- sort
-
-rpc:
- @$(call print, "Compiling protos.")
- cd ./lnrpc; ./gen_protos.sh
-
-rpc-format:
- @$(call print, "Formatting protos.")
- cd ./lnrpc; find . -name "*.proto" | xargs clang-format --style=file -i
-
-rpc-check: rpc
- @$(call print, "Verifying protos.")
- for rpc in $$(find lnrpc/ -name "*.proto" | $(XARGS) awk '/ rpc /{print $$2}'); do if ! grep -q $$rpc lnrpc/rest-annotations.yaml; then echo "RPC $$rpc not added to lnrpc/rest-annotations.yaml"; exit 1; fi; done
- if test -n "$$(git describe --dirty | grep dirty)"; then echo "Protos not properly formatted or not compiled with v3.4.0"; git status; git diff; exit 1; fi
-
-sample-conf-check:
- @$(call print, "Making sure every flag has an example in the sample-lnd.conf file")
- for flag in $$(GO_FLAGS_COMPLETION=1 go run -tags="$(RELEASE_TAGS)" $(PKG)/cmd/lnd -- | grep -v help | cut -c3-); do if ! grep -q $$flag sample-lnd.conf; then echo "Command line flag --$$flag not added to sample-lnd.conf"; exit 1; fi; done
-
-mobile-rpc: falafel goimports
- @$(call print, "Creating mobile RPC from protos.")
- cd ./mobile; ./gen_bindings.sh $(FALAFEL_COMMIT)
-
-vendor:
- @$(call print, "Re-creating vendor directory.")
- rm -r vendor/; GO111MODULE=on go mod vendor
-
-ios: vendor mobile-rpc
- @$(call print, "Building iOS framework ($(IOS_BUILD)).")
- mkdir -p $(IOS_BUILD_DIR)
- $(GOMOBILE_BIN) bind -target=ios -tags="mobile $(DEV_TAGS) autopilotrpc" $(LDFLAGS) -v -o $(IOS_BUILD) $(MOBILE_PKG)
-
-android: vendor mobile-rpc
- @$(call print, "Building Android library ($(ANDROID_BUILD)).")
- mkdir -p $(ANDROID_BUILD_DIR)
- $(GOMOBILE_BIN) bind -target=android -tags="mobile $(DEV_TAGS) autopilotrpc" $(LDFLAGS) -v -o $(ANDROID_BUILD) $(MOBILE_PKG)
-
-mobile: ios android
-
-clean:
- @$(call print, "Cleaning source.$(NC)")
- $(RM) ./lnd-debug ./lncli-debug
- $(RM) ./lnd-itest ./lncli-itest
- $(RM) -r ./vendor .vendor-new
-
-
-.PHONY: all \
- btcd \
- default \
- build \
- install \
- scratch \
- check \
- itest-only \
- itest \
- unit \
- unit-cover \
- unit-race \
- falafel \
- goveralls \
- travis-race \
- travis-cover \
- travis-itest \
- flakehunter \
- flake-unit \
- fmt \
- lint \
- list \
- rpc \
- rpc-format \
- rpc-check \
- mobile-rpc \
- vendor \
- ios \
- android \
- mobile \
- clean
diff --git a/lnd/README.md b/lnd/README.md
deleted file mode 100644
index 6d882390..00000000
--- a/lnd/README.md
+++ /dev/null
@@ -1,97 +0,0 @@
-## Lightning Network Daemon
-
-[![Build Status](https://img.shields.io/travis/lightningnetwork/lnd.svg)](https://travis-ci.org/lightningnetwork/lnd)
-[![MIT licensed](https://img.shields.io/badge/license-MIT-blue.svg)](https://github.com/lightningnetwork/lnd/blob/master/LICENSE)
-[![Irc](https://img.shields.io/badge/chat-on%20freenode-brightgreen.svg)](https://webchat.freenode.net/?channels=lnd)
-[![Godoc](https://godoc.org/github.com/lightningnetwork/lnd?status.svg)](https://godoc.org/github.com/lightningnetwork/lnd)
-
-
-
-The Lightning Network Daemon (`lnd`) - is a complete implementation of a
-[Lightning Network](https://lightning.network) node. `lnd` has several pluggable back-end
-chain services including [`btcd`](https://github.com/btcsuite/btcd) (a
-full-node), [`bitcoind`](https://github.com/bitcoin/bitcoin), and
-[`neutrino`](https://github.com/pkt-cash/pktd/neutrino) (a new experimental light client). The project's codebase uses the
-[btcsuite](https://github.com/btcsuite/) set of Bitcoin libraries, and also
-exports a large set of isolated re-usable Lightning Network related libraries
-within it. In the current state `lnd` is capable of:
-* Creating channels.
-* Closing channels.
-* Completely managing all channel states (including the exceptional ones!).
-* Maintaining a fully authenticated+validated channel graph.
-* Performing path finding within the network, passively forwarding incoming payments.
-* Sending outgoing [onion-encrypted payments](https://github.com/lightningnetwork/lightning-onion)
-through the network.
-* Updating advertised fee schedules.
-* Automatic channel management ([`autopilot`](https://github.com/lightningnetwork/lnd/tree/master/autopilot)).
-
-## Lightning Network Specification Compliance
-`lnd` _fully_ conforms to the [Lightning Network specification
-(BOLTs)](https://github.com/lightningnetwork/lightning-rfc). BOLT stands for:
-Basis of Lightning Technology. The specifications are currently being drafted
-by several groups of implementers based around the world including the
-developers of `lnd`. The set of specification documents as well as our
-implementation of the specification are still a work-in-progress. With that
-said, the current status of `lnd`'s BOLT compliance is:
-
- - [X] BOLT 1: Base Protocol
- - [X] BOLT 2: Peer Protocol for Channel Management
- - [X] BOLT 3: Bitcoin Transaction and Script Formats
- - [X] BOLT 4: Onion Routing Protocol
- - [X] BOLT 5: Recommendations for On-chain Transaction Handling
- - [X] BOLT 7: P2P Node and Channel Discovery
- - [X] BOLT 8: Encrypted and Authenticated Transport
- - [X] BOLT 9: Assigned Feature Flags
- - [X] BOLT 10: DNS Bootstrap and Assisted Node Location
- - [X] BOLT 11: Invoice Protocol for Lightning Payments
-
-## Developer Resources
-
-The daemon has been designed to be as developer friendly as possible in order
-to facilitate application development on top of `lnd`. Two primary RPC
-interfaces are exported: an HTTP REST API, and a [gRPC](https://grpc.io/)
-service. The exported API's are not yet stable, so be warned: they may change
-drastically in the near future.
-
-An automatically generated set of documentation for the RPC APIs can be found
-at [api.lightning.community](https://api.lightning.community). A set of developer
-resources including talks, articles, and example applications can be found at:
-[dev.lightning.community](https://dev.lightning.community).
-
-Finally, we also have an active
-[Slack](https://lightning.engineering/slack.html) where protocol developers, application developers, testers and users gather to
-discuss various aspects of `lnd` and also Lightning in general.
-
-## Installation
- In order to build from source, please see [the installation
- instructions](docs/INSTALL.md).
-
-## Docker
- To run lnd from Docker, please see the main [Docker instructions](docs/DOCKER.md)
-
-## IRC
- * irc.freenode.net
- * channel #lnd
- * [webchat](https://webchat.freenode.net/?channels=lnd)
-
-## Safety
-
-When operating a mainnet `lnd` node, please refer to our [operational safety
-guildelines](docs/safety.md). It is important to note that `lnd` is still
-**beta** software and that ignoring these operational guidelines can lead to
-loss of funds.
-
-## Security
-
-The developers of `lnd` take security _very_ seriously. The disclosure of
-security vulnerabilities helps us secure the health of `lnd`, privacy of our
-users, and also the health of the Lightning Network as a whole. If you find
-any issues regarding security or privacy, please disclose the information
-responsibly by sending an email to security at lightning dot engineering,
-preferably encrypted using our designated PGP key
-(`91FE464CD75101DA6B6BAB60555C6465E5BCB3AF`) which can be found
-[here](https://gist.githubusercontent.com/Roasbeef/6fb5b52886183239e4aa558f83d085d3/raw/5fa96010af201628bcfa61e9309d9b13d23d220f/security@lightning.engineering).
-
-## Further reading
-* [Step-by-step send payment guide with docker](https://github.com/lightningnetwork/lnd/tree/master/docker)
-* [Contribution guide](https://github.com/lightningnetwork/lnd/blob/master/docs/code_contribution_guidelines.md)
diff --git a/lnd/autopilot/agent.go b/lnd/autopilot/agent.go
deleted file mode 100644
index 4cb6d16a..00000000
--- a/lnd/autopilot/agent.go
+++ /dev/null
@@ -1,876 +0,0 @@
-package autopilot
-
-import (
- "bytes"
- "math/rand"
- "net"
- "sync"
- "time"
-
- "github.com/davecgh/go-spew/spew"
- "github.com/pkt-cash/pktd/btcec"
- "github.com/pkt-cash/pktd/btcutil"
- "github.com/pkt-cash/pktd/btcutil/er"
- "github.com/pkt-cash/pktd/lnd/lnwire"
- "github.com/pkt-cash/pktd/pktlog/log"
-)
-
-// Config couples all the items that an autopilot agent needs to function.
-// All items within the struct MUST be populated for the Agent to be able to
-// carry out its duties.
-type Config struct {
- // Self is the identity public key of the Lightning Network node that
- // is being driven by the agent. This is used to ensure that we don't
- // accidentally attempt to open a channel with ourselves.
- Self *btcec.PublicKey
-
- // Heuristic is an attachment heuristic which will govern to whom we
- // open channels to, and also what those channels look like in terms of
- // desired capacity. The Heuristic will take into account the current
- // state of the graph, our set of open channels, and the amount of
- // available funds when determining how channels are to be opened.
- // Additionally, a heuristic make also factor in extra-graph
- // information in order to make more pertinent recommendations.
- Heuristic AttachmentHeuristic
-
- // ChanController is an interface that is able to directly manage the
- // creation, closing and update of channels within the network.
- ChanController ChannelController
-
- // ConnectToPeer attempts to connect to the peer using one of its
- // advertised addresses. The boolean returned signals whether the peer
- // was already connected.
- ConnectToPeer func(*btcec.PublicKey, []net.Addr) (bool, er.R)
-
- // DisconnectPeer attempts to disconnect the peer with the given public
- // key.
- DisconnectPeer func(*btcec.PublicKey) er.R
-
- // WalletBalance is a function closure that should return the current
- // available balance of the backing wallet.
- WalletBalance func() (btcutil.Amount, er.R)
-
- // Graph is an abstract channel graph that the Heuristic and the Agent
- // will use to make decisions w.r.t channel allocation and placement
- // within the graph.
- Graph ChannelGraph
-
- // Constraints is the set of constraints the autopilot must adhere to
- // when opening channels.
- Constraints AgentConstraints
-
- // TODO(roasbeef): add additional signals from fee rates and revenue of
- // currently opened channels
-}
-
-// channelState is a type that represents the set of active channels of the
-// backing LN node that the Agent should be aware of. This type contains a few
-// helper utility methods.
-type channelState map[lnwire.ShortChannelID]LocalChannel
-
-// Channels returns a slice of all the active channels.
-func (c channelState) Channels() []LocalChannel {
- chans := make([]LocalChannel, 0, len(c))
- for _, channel := range c {
- chans = append(chans, channel)
- }
- return chans
-}
-
-// ConnectedNodes returns the set of nodes we currently have a channel with.
-// This information is needed as we want to avoid making repeated channels with
-// any node.
-func (c channelState) ConnectedNodes() map[NodeID]struct{} {
- nodes := make(map[NodeID]struct{})
- for _, channels := range c {
- nodes[channels.Node] = struct{}{}
- }
-
- // TODO(roasbeef): add outgoing, nodes, allow incoming and outgoing to
- // per node
- // * only add node is chan as funding amt set
-
- return nodes
-}
-
-// Agent implements a closed-loop control system which seeks to autonomously
-// optimize the allocation of satoshis within channels throughput the network's
-// channel graph. An agent is configurable by swapping out different
-// AttachmentHeuristic strategies. The agent uses external signals such as the
-// wallet balance changing, or new channels being opened/closed for the local
-// node as an indicator to re-examine its internal state, and the amount of
-// available funds in order to make updated decisions w.r.t the channel graph.
-// The Agent will automatically open, close, and splice in/out channel as
-// necessary for it to step closer to its optimal state.
-//
-// TODO(roasbeef): prob re-word
-type Agent struct {
- started sync.Once
- stopped sync.Once
-
- // cfg houses the configuration state of the Ant.
- cfg Config
-
- // chanState tracks the current set of open channels.
- chanState channelState
- chanStateMtx sync.Mutex
-
- // stateUpdates is a channel that any external state updates that may
- // affect the heuristics of the agent will be sent over.
- stateUpdates chan interface{}
-
- // balanceUpdates is a channel where notifications about updates to the
- // wallet's balance will be sent. This channel will be buffered to
- // ensure we have at most one pending update of this type to handle at
- // a given time.
- balanceUpdates chan *balanceUpdate
-
- // nodeUpdates is a channel that changes to the graph node landscape
- // will be sent over. This channel will be buffered to ensure we have
- // at most one pending update of this type to handle at a given time.
- nodeUpdates chan *nodeUpdates
-
- // pendingOpenUpdates is a channel where updates about channel pending
- // opening will be sent. This channel will be buffered to ensure we
- // have at most one pending update of this type to handle at a given
- // time.
- pendingOpenUpdates chan *chanPendingOpenUpdate
-
- // chanOpenFailures is a channel where updates about channel open
- // failures will be sent. This channel will be buffered to ensure we
- // have at most one pending update of this type to handle at a given
- // time.
- chanOpenFailures chan *chanOpenFailureUpdate
-
- // heuristicUpdates is a channel where updates from active heurstics
- // will be sent.
- heuristicUpdates chan *heuristicUpdate
-
- // totalBalance is the total number of satoshis the backing wallet is
- // known to control at any given instance. This value will be updated
- // when the agent receives external balance update signals.
- totalBalance btcutil.Amount
-
- // failedNodes lists nodes that we've previously attempted to initiate
- // channels with, but didn't succeed.
- failedNodes map[NodeID]struct{}
-
- // pendingConns tracks the nodes that we are attempting to make
- // connections to. This prevents us from making duplicate connection
- // requests to the same node.
- pendingConns map[NodeID]struct{}
-
- // pendingOpens tracks the channels that we've requested to be
- // initiated, but haven't yet been confirmed as being fully opened.
- // This state is required as otherwise, we may go over our allotted
- // channel limit, or open multiple channels to the same node.
- pendingOpens map[NodeID]LocalChannel
- pendingMtx sync.Mutex
-
- quit chan struct{}
- wg sync.WaitGroup
-}
-
-// New creates a new instance of the Agent instantiated using the passed
-// configuration and initial channel state. The initial channel state slice
-// should be populated with the set of Channels that are currently opened by
-// the backing Lightning Node.
-func New(cfg Config, initialState []LocalChannel) (*Agent, er.R) {
- a := &Agent{
- cfg: cfg,
- chanState: make(map[lnwire.ShortChannelID]LocalChannel),
- quit: make(chan struct{}),
- stateUpdates: make(chan interface{}),
- balanceUpdates: make(chan *balanceUpdate, 1),
- nodeUpdates: make(chan *nodeUpdates, 1),
- chanOpenFailures: make(chan *chanOpenFailureUpdate, 1),
- heuristicUpdates: make(chan *heuristicUpdate, 1),
- pendingOpenUpdates: make(chan *chanPendingOpenUpdate, 1),
- failedNodes: make(map[NodeID]struct{}),
- pendingConns: make(map[NodeID]struct{}),
- pendingOpens: make(map[NodeID]LocalChannel),
- }
-
- for _, c := range initialState {
- a.chanState[c.ChanID] = c
- }
-
- return a, nil
-}
-
-// Start starts the agent along with any goroutines it needs to perform its
-// normal duties.
-func (a *Agent) Start() er.R {
- var err er.R
- a.started.Do(func() {
- err = a.start()
- })
- return err
-}
-
-func (a *Agent) start() er.R {
- rand.Seed(time.Now().Unix())
- log.Infof("Autopilot Agent starting")
-
- a.wg.Add(1)
- go a.controller()
-
- return nil
-}
-
-// Stop signals the Agent to gracefully shutdown. This function will block
-// until all goroutines have exited.
-func (a *Agent) Stop() er.R {
- var err er.R
- a.stopped.Do(func() {
- err = a.stop()
- })
- return err
-}
-
-func (a *Agent) stop() er.R {
- log.Infof("Autopilot Agent stopping")
-
- close(a.quit)
- a.wg.Wait()
-
- return nil
-}
-
-// balanceUpdate is a type of external state update that reflects an
-// increase/decrease in the funds currently available to the wallet.
-type balanceUpdate struct {
-}
-
-// nodeUpdates is a type of external state update that reflects an addition or
-// modification in channel graph node membership.
-type nodeUpdates struct{}
-
-// chanOpenUpdate is a type of external state update that indicates a new
-// channel has been opened, either by the Agent itself (within the main
-// controller loop), or by an external user to the system.
-type chanOpenUpdate struct {
- newChan LocalChannel
-}
-
-// chanPendingOpenUpdate is a type of external state update that indicates a new
-// channel has been opened, either by the agent itself or an external subsystem,
-// but is still pending.
-type chanPendingOpenUpdate struct{}
-
-// chanOpenFailureUpdate is a type of external state update that indicates
-// a previous channel open failed, and that it might be possible to try again.
-type chanOpenFailureUpdate struct{}
-
-// heuristicUpdate is an update sent when one of the autopilot heuristics has
-// changed, and prompts the agent to make a new attempt at opening more
-// channels.
-type heuristicUpdate struct {
- heuristic AttachmentHeuristic
-}
-
-// chanCloseUpdate is a type of external state update that indicates that the
-// backing Lightning Node has closed a previously open channel.
-type chanCloseUpdate struct {
- closedChans []lnwire.ShortChannelID
-}
-
-// OnBalanceChange is a callback that should be executed each time the balance
-// of the backing wallet changes.
-func (a *Agent) OnBalanceChange() {
- select {
- case a.balanceUpdates <- &balanceUpdate{}:
- default:
- }
-}
-
-// OnNodeUpdates is a callback that should be executed each time our channel
-// graph has new nodes or their node announcements are updated.
-func (a *Agent) OnNodeUpdates() {
- select {
- case a.nodeUpdates <- &nodeUpdates{}:
- default:
- }
-}
-
-// OnChannelOpen is a callback that should be executed each time a new channel
-// is manually opened by the user or any system outside the autopilot agent.
-func (a *Agent) OnChannelOpen(c LocalChannel) {
- a.wg.Add(1)
- go func() {
- defer a.wg.Done()
-
- select {
- case a.stateUpdates <- &chanOpenUpdate{newChan: c}:
- case <-a.quit:
- }
- }()
-}
-
-// OnChannelPendingOpen is a callback that should be executed each time a new
-// channel is opened, either by the agent or an external subsystems, but is
-// still pending.
-func (a *Agent) OnChannelPendingOpen() {
- select {
- case a.pendingOpenUpdates <- &chanPendingOpenUpdate{}:
- default:
- }
-}
-
-// OnChannelOpenFailure is a callback that should be executed when the
-// autopilot has attempted to open a channel, but failed. In this case we can
-// retry channel creation with a different node.
-func (a *Agent) OnChannelOpenFailure() {
- select {
- case a.chanOpenFailures <- &chanOpenFailureUpdate{}:
- default:
- }
-}
-
-// OnChannelClose is a callback that should be executed each time a prior
-// channel has been closed for any reason. This includes regular
-// closes, force closes, and channel breaches.
-func (a *Agent) OnChannelClose(closedChans ...lnwire.ShortChannelID) {
- a.wg.Add(1)
- go func() {
- defer a.wg.Done()
-
- select {
- case a.stateUpdates <- &chanCloseUpdate{closedChans: closedChans}:
- case <-a.quit:
- }
- }()
-}
-
-// OnHeuristicUpdate is a method called when a heuristic has been updated, to
-// trigger the agent to do a new state assessment.
-func (a *Agent) OnHeuristicUpdate(h AttachmentHeuristic) {
- select {
- case a.heuristicUpdates <- &heuristicUpdate{
- heuristic: h,
- }:
- default:
- }
-}
-
-// mergeNodeMaps merges the Agent's set of nodes that it already has active
-// channels open to, with the other sets of nodes that should be removed from
-// consideration during heuristic selection. This ensures that the Agent doesn't
-// attempt to open any "duplicate" channels to the same node.
-func mergeNodeMaps(c map[NodeID]LocalChannel,
- skips ...map[NodeID]struct{}) map[NodeID]struct{} {
-
- numNodes := len(c)
- for _, skip := range skips {
- numNodes += len(skip)
- }
-
- res := make(map[NodeID]struct{}, numNodes)
- for nodeID := range c {
- res[nodeID] = struct{}{}
- }
- for _, skip := range skips {
- for nodeID := range skip {
- res[nodeID] = struct{}{}
- }
- }
-
- return res
-}
-
-// mergeChanState merges the Agent's set of active channels, with the set of
-// channels awaiting confirmation. This ensures that the agent doesn't go over
-// the prescribed channel limit or fund allocation limit.
-func mergeChanState(pendingChans map[NodeID]LocalChannel,
- activeChans channelState) []LocalChannel {
-
- numChans := len(pendingChans) + len(activeChans)
- totalChans := make([]LocalChannel, 0, numChans)
-
- totalChans = append(totalChans, activeChans.Channels()...)
-
- for _, pendingChan := range pendingChans {
- totalChans = append(totalChans, pendingChan)
- }
-
- return totalChans
-}
-
-// controller implements the closed-loop control system of the Agent. The
-// controller will make a decision w.r.t channel placement within the graph
-// based on: its current internal state of the set of active channels open,
-// and external state changes as a result of decisions it makes w.r.t channel
-// allocation, or attributes affecting its control loop being updated by the
-// backing Lightning Node.
-func (a *Agent) controller() {
- defer a.wg.Done()
-
- // We'll start off by assigning our starting balance, and injecting
- // that amount as an initial wake up to the main controller goroutine.
- a.OnBalanceChange()
-
- // TODO(roasbeef): do we in fact need to maintain order?
- // * use sync.Cond if so
- updateBalance := func() {
- newBalance, err := a.cfg.WalletBalance()
- if err != nil {
- log.Warnf("unable to update wallet balance: %v", err)
- return
- }
-
- a.totalBalance = newBalance
- }
-
- // TODO(roasbeef): add 10-minute wake up timer
- for {
- select {
- // A new external signal has arrived. We'll use this to update
- // our internal state, then determine if we should trigger a
- // channel state modification (open/close, splice in/out).
- case signal := <-a.stateUpdates:
- log.Infof("Processing new external signal")
-
- switch update := signal.(type) {
- // A new channel has been opened successfully. This was
- // either opened by the Agent, or an external system
- // that is able to drive the Lightning Node.
- case *chanOpenUpdate:
- log.Debugf("New channel successfully opened, "+
- "updating state with: %v",
- spew.Sdump(update.newChan))
-
- newChan := update.newChan
- a.chanStateMtx.Lock()
- a.chanState[newChan.ChanID] = newChan
- a.chanStateMtx.Unlock()
-
- a.pendingMtx.Lock()
- delete(a.pendingOpens, newChan.Node)
- a.pendingMtx.Unlock()
-
- updateBalance()
- // A channel has been closed, this may free up an
- // available slot, triggering a new channel update.
- case *chanCloseUpdate:
- log.Debugf("Applying closed channel "+
- "updates: %v",
- spew.Sdump(update.closedChans))
-
- a.chanStateMtx.Lock()
- for _, closedChan := range update.closedChans {
- delete(a.chanState, closedChan)
- }
- a.chanStateMtx.Unlock()
-
- updateBalance()
- }
-
- // A new channel has been opened by the agent or an external
- // subsystem, but is still pending confirmation.
- case <-a.pendingOpenUpdates:
- updateBalance()
-
- // The balance of the backing wallet has changed, if more funds
- // are now available, we may attempt to open up an additional
- // channel, or splice in funds to an existing one.
- case <-a.balanceUpdates:
- log.Debug("Applying external balance state update")
-
- updateBalance()
-
- // The channel we tried to open previously failed for whatever
- // reason.
- case <-a.chanOpenFailures:
- log.Debug("Retrying after previous channel open " +
- "failure.")
-
- updateBalance()
-
- // New nodes have been added to the graph or their node
- // announcements have been updated. We will consider opening
- // channels to these nodes if we haven't stabilized.
- case <-a.nodeUpdates:
- log.Debugf("Node updates received, assessing " +
- "need for more channels")
-
- // Any of the deployed heuristics has been updated, check
- // whether we have new channel candidates available.
- case upd := <-a.heuristicUpdates:
- log.Debugf("Heuristic %v updated, assessing need for "+
- "more channels", upd.heuristic.Name())
-
- // The agent has been signalled to exit, so we'll bail out
- // immediately.
- case <-a.quit:
- return
- }
-
- a.pendingMtx.Lock()
- log.Debugf("Pending channels: %v", spew.Sdump(a.pendingOpens))
- a.pendingMtx.Unlock()
-
- // With all the updates applied, we'll obtain a set of the
- // current active channels (confirmed channels), and also
- // factor in our set of unconfirmed channels.
- a.chanStateMtx.Lock()
- a.pendingMtx.Lock()
- totalChans := mergeChanState(a.pendingOpens, a.chanState)
- a.pendingMtx.Unlock()
- a.chanStateMtx.Unlock()
-
- // Now that we've updated our internal state, we'll consult our
- // channel attachment heuristic to determine if we can open
- // up any additional channels while staying within our
- // constraints.
- availableFunds, numChans := a.cfg.Constraints.ChannelBudget(
- totalChans, a.totalBalance,
- )
- switch {
- case numChans == 0:
- continue
-
- // If the amount is too small, we don't want to attempt opening
- // another channel.
- case availableFunds == 0:
- continue
- case availableFunds < a.cfg.Constraints.MinChanSize():
- continue
- }
-
- log.Infof("Triggering attachment directive dispatch, "+
- "total_funds=%v", a.totalBalance)
-
- err := a.openChans(availableFunds, numChans, totalChans)
- if err != nil {
- log.Errorf("Unable to open channels: %v", err)
- }
- }
-}
-
-// openChans queries the agent's heuristic for a set of channel candidates, and
-// attempts to open channels to them.
-func (a *Agent) openChans(availableFunds btcutil.Amount, numChans uint32,
- totalChans []LocalChannel) er.R {
-
- // As channel size we'll use the maximum channel size available.
- chanSize := a.cfg.Constraints.MaxChanSize()
- if availableFunds < chanSize {
- chanSize = availableFunds
- }
-
- if chanSize < a.cfg.Constraints.MinChanSize() {
- return er.Errorf("not enough funds available to open a " +
- "single channel")
- }
-
- // We're to attempt an attachment so we'll obtain the set of
- // nodes that we currently have channels with so we avoid
- // duplicate edges.
- a.chanStateMtx.Lock()
- connectedNodes := a.chanState.ConnectedNodes()
- a.chanStateMtx.Unlock()
-
- for nID := range connectedNodes {
- log.Tracef("Skipping node %x with open channel", nID[:])
- }
-
- a.pendingMtx.Lock()
-
- for nID := range a.pendingOpens {
- log.Tracef("Skipping node %x with pending channel open", nID[:])
- }
-
- for nID := range a.pendingConns {
- log.Tracef("Skipping node %x with pending connection", nID[:])
- }
-
- for nID := range a.failedNodes {
- log.Tracef("Skipping failed node %v", nID[:])
- }
-
- nodesToSkip := mergeNodeMaps(a.pendingOpens,
- a.pendingConns, connectedNodes, a.failedNodes,
- )
-
- a.pendingMtx.Unlock()
-
- // Gather the set of all nodes in the graph, except those we
- // want to skip.
- selfPubBytes := a.cfg.Self.SerializeCompressed()
- nodes := make(map[NodeID]struct{})
- addresses := make(map[NodeID][]net.Addr)
- if err := a.cfg.Graph.ForEachNode(func(node Node) er.R {
- nID := NodeID(node.PubKey())
-
- // If we come across ourselves, them we'll continue in
- // order to avoid attempting to make a channel with
- // ourselves.
- if bytes.Equal(nID[:], selfPubBytes) {
- log.Tracef("Skipping self node %x", nID[:])
- return nil
- }
-
- // If the node has no known addresses, we cannot connect to it,
- // so we'll skip it.
- addrs := node.Addrs()
- if len(addrs) == 0 {
- log.Tracef("Skipping node %x since no addresses known",
- nID[:])
- return nil
- }
- addresses[nID] = addrs
-
- // Additionally, if this node is in the blacklist, then
- // we'll skip it.
- if _, ok := nodesToSkip[nID]; ok {
- log.Tracef("Skipping blacklisted node %x", nID[:])
- return nil
- }
-
- nodes[nID] = struct{}{}
- return nil
- }); err != nil {
- return er.Errorf("unable to get graph nodes: %v", err)
- }
-
- // Use the heuristic to calculate a score for each node in the
- // graph.
- log.Debugf("Scoring %d nodes for chan_size=%v", len(nodes), chanSize)
- scores, err := a.cfg.Heuristic.NodeScores(
- a.cfg.Graph, totalChans, chanSize, nodes,
- )
- if err != nil {
- return er.Errorf("unable to calculate node scores : %v", err)
- }
-
- log.Debugf("Got scores for %d nodes", len(scores))
-
- // Now use the score to make a weighted choice which nodes to attempt
- // to open channels to.
- scores, err = chooseN(numChans, scores)
- if err != nil {
- return er.Errorf("unable to make weighted choice: %v",
- err)
- }
-
- chanCandidates := make(map[NodeID]*AttachmentDirective)
- for nID := range scores {
- log.Tracef("Creating attachment directive for chosen node %x",
- nID[:])
-
- // Track the available funds we have left.
- if availableFunds < chanSize {
- chanSize = availableFunds
- }
- availableFunds -= chanSize
-
- // If we run out of funds, we can break early.
- if chanSize < a.cfg.Constraints.MinChanSize() {
- log.Tracef("Chan size %v too small to satisfy min "+
- "channel size %v, breaking", chanSize,
- a.cfg.Constraints.MinChanSize())
- break
- }
-
- chanCandidates[nID] = &AttachmentDirective{
- NodeID: nID,
- ChanAmt: chanSize,
- Addrs: addresses[nID],
- }
- }
-
- if len(chanCandidates) == 0 {
- log.Infof("No eligible candidates to connect to")
- return nil
- }
-
- log.Infof("Attempting to execute channel attachment "+
- "directives: %v", spew.Sdump(chanCandidates))
-
- // Before proceeding, check to see if we have any slots
- // available to open channels. If there are any, we will attempt
- // to dispatch the retrieved directives since we can't be
- // certain which ones may actually succeed. If too many
- // connections succeed, they will be ignored and made
- // available to future heuristic selections.
- a.pendingMtx.Lock()
- defer a.pendingMtx.Unlock()
- if uint16(len(a.pendingOpens)) >= a.cfg.Constraints.MaxPendingOpens() {
- log.Debugf("Reached cap of %v pending "+
- "channel opens, will retry "+
- "after success/failure",
- a.cfg.Constraints.MaxPendingOpens())
- return nil
- }
-
- // For each recommended attachment directive, we'll launch a
- // new goroutine to attempt to carry out the directive. If any
- // of these succeed, then we'll receive a new state update,
- // taking us back to the top of our controller loop.
- for _, chanCandidate := range chanCandidates {
- // Skip candidates which we are already trying
- // to establish a connection with.
- nodeID := chanCandidate.NodeID
- if _, ok := a.pendingConns[nodeID]; ok {
- continue
- }
- a.pendingConns[nodeID] = struct{}{}
-
- a.wg.Add(1)
- go a.executeDirective(*chanCandidate)
- }
- return nil
-}
-
-// executeDirective attempts to connect to the channel candidate specified by
-// the given attachment directive, and open a channel of the given size.
-//
-// NOTE: MUST be run as a goroutine.
-func (a *Agent) executeDirective(directive AttachmentDirective) {
- defer a.wg.Done()
-
- // We'll start out by attempting to connect to the peer in order to
- // begin the funding workflow.
- nodeID := directive.NodeID
- pub, err := btcec.ParsePubKey(nodeID[:], btcec.S256())
- if err != nil {
- log.Errorf("Unable to parse pubkey %x: %v", nodeID, err)
- return
- }
-
- connected := make(chan bool)
- errChan := make(chan er.R)
-
- // To ensure a call to ConnectToPeer doesn't block the agent from
- // shutting down, we'll launch it in a non-waitgrouped goroutine, that
- // will signal when a result is returned.
- // TODO(halseth): use DialContext to cancel on transport level.
- go func() {
- alreadyConnected, err := a.cfg.ConnectToPeer(
- pub, directive.Addrs,
- )
- if err != nil {
- select {
- case errChan <- err:
- case <-a.quit:
- }
- return
- }
-
- select {
- case connected <- alreadyConnected:
- case <-a.quit:
- return
- }
- }()
-
- var alreadyConnected bool
- select {
- case alreadyConnected = <-connected:
- case err = <-errChan:
- case <-a.quit:
- return
- }
-
- if err != nil {
- log.Warnf("Unable to connect to %x: %v",
- pub.SerializeCompressed(), err)
-
- // Since we failed to connect to them, we'll mark them as
- // failed so that we don't attempt to connect to them again.
- a.pendingMtx.Lock()
- delete(a.pendingConns, nodeID)
- a.failedNodes[nodeID] = struct{}{}
- a.pendingMtx.Unlock()
-
- // Finally, we'll trigger the agent to select new peers to
- // connect to.
- a.OnChannelOpenFailure()
-
- return
- }
-
- // The connection was successful, though before progressing we must
- // check that we have not already met our quota for max pending open
- // channels. This can happen if multiple directives were spawned but
- // fewer slots were available, and other successful attempts finished
- // first.
- a.pendingMtx.Lock()
- if uint16(len(a.pendingOpens)) >= a.cfg.Constraints.MaxPendingOpens() {
- // Since we've reached our max number of pending opens, we'll
- // disconnect this peer and exit. However, if we were
- // previously connected to them, then we'll make sure to
- // maintain the connection alive.
- if alreadyConnected {
- // Since we succeeded in connecting, we won't add this
- // peer to the failed nodes map, but we will remove it
- // from a.pendingConns so that it can be retried in the
- // future.
- delete(a.pendingConns, nodeID)
- a.pendingMtx.Unlock()
- return
- }
-
- err = a.cfg.DisconnectPeer(pub)
- if err != nil {
- log.Warnf("Unable to disconnect peer %x: %v",
- pub.SerializeCompressed(), err)
- }
-
- // Now that we have disconnected, we can remove this node from
- // our pending conns map, permitting subsequent connection
- // attempts.
- delete(a.pendingConns, nodeID)
- a.pendingMtx.Unlock()
- return
- }
-
- // If we were successful, we'll track this peer in our set of pending
- // opens. We do this here to ensure we don't stall on selecting new
- // peers if the connection attempt happens to take too long.
- delete(a.pendingConns, nodeID)
- a.pendingOpens[nodeID] = LocalChannel{
- Balance: directive.ChanAmt,
- Node: nodeID,
- }
- a.pendingMtx.Unlock()
-
- // We can then begin the funding workflow with this peer.
- err = a.cfg.ChanController.OpenChannel(pub, directive.ChanAmt)
- if err != nil {
- log.Warnf("Unable to open channel to %x of %v: %v",
- pub.SerializeCompressed(), directive.ChanAmt, err)
-
- // As the attempt failed, we'll clear the peer from the set of
- // pending opens and mark them as failed so we don't attempt to
- // open a channel to them again.
- a.pendingMtx.Lock()
- delete(a.pendingOpens, nodeID)
- a.failedNodes[nodeID] = struct{}{}
- a.pendingMtx.Unlock()
-
- // Trigger the agent to re-evaluate everything and possibly
- // retry with a different node.
- a.OnChannelOpenFailure()
-
- // Finally, we should also disconnect the peer if we weren't
- // already connected to them beforehand by an external
- // subsystem.
- if alreadyConnected {
- return
- }
-
- err = a.cfg.DisconnectPeer(pub)
- if err != nil {
- log.Warnf("Unable to disconnect peer %x: %v",
- pub.SerializeCompressed(), err)
- }
- }
-
- // Since the channel open was successful and is currently pending,
- // we'll trigger the autopilot agent to query for more peers.
- // TODO(halseth): this triggers a new loop before all the new channels
- // are added to the pending channels map. Should add before executing
- // directive in goroutine?
- a.OnChannelPendingOpen()
-}
diff --git a/lnd/autopilot/agent_constraints.go b/lnd/autopilot/agent_constraints.go
deleted file mode 100644
index 31053e45..00000000
--- a/lnd/autopilot/agent_constraints.go
+++ /dev/null
@@ -1,151 +0,0 @@
-package autopilot
-
-import (
- "github.com/pkt-cash/pktd/btcutil"
-)
-
-// AgentConstraints is an interface the agent will query to determine what
-// limits it will need to stay inside when opening channels.
-type AgentConstraints interface {
- // ChannelBudget should, given the passed parameters, return whether
- // more channels can be opened while still staying within the set
- // constraints. If the constraints allow us to open more channels, then
- // the first return value will represent the amount of additional funds
- // available towards creating channels. The second return value is the
- // exact *number* of additional channels available.
- ChannelBudget(chans []LocalChannel, balance btcutil.Amount) (
- btcutil.Amount, uint32)
-
- // MaxPendingOpens returns the maximum number of pending channel
- // establishment goroutines that can be lingering. We cap this value in
- // order to control the level of parallelism caused by the autopilot
- // agent.
- MaxPendingOpens() uint16
-
- // MinChanSize returns the smallest channel that the autopilot agent
- // should create.
- MinChanSize() btcutil.Amount
-
- // MaxChanSize returns largest channel that the autopilot agent should
- // create.
- MaxChanSize() btcutil.Amount
-}
-
-// agenConstraints is an implementation of the AgentConstraints interface that
-// indicate the constraints the autopilot agent must adhere to when opening
-// channels.
-type agentConstraints struct {
- // minChanSize is the smallest channel that the autopilot agent should
- // create.
- minChanSize btcutil.Amount
-
- // maxChanSize is the largest channel that the autopilot agent should
- // create.
- maxChanSize btcutil.Amount
-
- // chanLimit is the maximum number of channels that should be created.
- chanLimit uint16
-
- // allocation is the percentage of total funds that should be committed
- // to automatic channel establishment.
- allocation float64
-
- // maxPendingOpens is the maximum number of pending channel
- // establishment goroutines that can be lingering. We cap this value in
- // order to control the level of parallelism caused by the autopilot
- // agent.
- maxPendingOpens uint16
-}
-
-// A compile time assertion to ensure agentConstraints satisfies the
-// AgentConstraints interface.
-var _ AgentConstraints = (*agentConstraints)(nil)
-
-// NewConstraints returns a new AgentConstraints with the given limits.
-func NewConstraints(minChanSize, maxChanSize btcutil.Amount, chanLimit,
- maxPendingOpens uint16, allocation float64) AgentConstraints {
-
- return &agentConstraints{
- minChanSize: minChanSize,
- maxChanSize: maxChanSize,
- chanLimit: chanLimit,
- allocation: allocation,
- maxPendingOpens: maxPendingOpens,
- }
-}
-
-// ChannelBudget should, given the passed parameters, return whether more
-// channels can be be opened while still staying within the set constraints.
-// If the constraints allow us to open more channels, then the first return
-// value will represent the amount of additional funds available towards
-// creating channels. The second return value is the exact *number* of
-// additional channels available.
-//
-// Note: part of the AgentConstraints interface.
-func (h *agentConstraints) ChannelBudget(channels []LocalChannel,
- funds btcutil.Amount) (btcutil.Amount, uint32) {
-
- // If we're already over our maximum allowed number of channels, then
- // we'll instruct the controller not to create any more channels.
- if len(channels) >= int(h.chanLimit) {
- return 0, 0
- }
-
- // The number of additional channels that should be opened is the
- // difference between the channel limit, and the number of channels we
- // already have open.
- numAdditionalChans := uint32(h.chanLimit) - uint32(len(channels))
-
- // First, we'll tally up the total amount of funds that are currently
- // present within the set of active channels.
- var totalChanAllocation btcutil.Amount
- for _, channel := range channels {
- totalChanAllocation += channel.Balance
- }
-
- // With this value known, we'll now compute the total amount of fund
- // allocated across regular utxo's and channel utxo's.
- totalFunds := funds + totalChanAllocation
-
- // Once the total amount has been computed, we then calculate the
- // fraction of funds currently allocated to channels.
- fundsFraction := float64(totalChanAllocation) / float64(totalFunds)
-
- // If this fraction is below our threshold, then we'll return true, to
- // indicate the controller should call Select to obtain a candidate set
- // of channels to attempt to open.
- needMore := fundsFraction < h.allocation
- if !needMore {
- return 0, 0
- }
-
- // Now that we know we need more funds, we'll compute the amount of
- // additional funds we should allocate towards channels.
- targetAllocation := btcutil.Amount(float64(totalFunds) * h.allocation)
- fundsAvailable := targetAllocation - totalChanAllocation
- return fundsAvailable, numAdditionalChans
-}
-
-// MaxPendingOpens returns the maximum number of pending channel establishment
-// goroutines that can be lingering. We cap this value in order to control the
-// level of parallelism caused by the autopilot agent.
-//
-// Note: part of the AgentConstraints interface.
-func (h *agentConstraints) MaxPendingOpens() uint16 {
- return h.maxPendingOpens
-}
-
-// MinChanSize returns the smallest channel that the autopilot agent should
-// create.
-//
-// Note: part of the AgentConstraints interface.
-func (h *agentConstraints) MinChanSize() btcutil.Amount {
- return h.minChanSize
-}
-
-// MaxChanSize returns largest channel that the autopilot agent should create.
-//
-// Note: part of the AgentConstraints interface.
-func (h *agentConstraints) MaxChanSize() btcutil.Amount {
- return h.maxChanSize
-}
diff --git a/lnd/autopilot/agent_constraints_test.go b/lnd/autopilot/agent_constraints_test.go
deleted file mode 100644
index ac0e006a..00000000
--- a/lnd/autopilot/agent_constraints_test.go
+++ /dev/null
@@ -1,166 +0,0 @@
-package autopilot
-
-import (
- "testing"
- "time"
-
- prand "math/rand"
-
- "github.com/pkt-cash/pktd/btcutil"
- "github.com/pkt-cash/pktd/lnd/lnwire"
-)
-
-func TestConstraintsChannelBudget(t *testing.T) {
- t.Parallel()
-
- prand.Seed(time.Now().Unix())
-
- maxChanSize := btcutil.Amount(btcutil.UnitsPerCoin())
- const (
- minChanSize = 0
-
- chanLimit = 3
-
- threshold = 0.5
- )
-
- constraints := NewConstraints(
- minChanSize,
- maxChanSize,
- chanLimit,
- 0,
- threshold,
- )
-
- randChanID := func() lnwire.ShortChannelID {
- return lnwire.NewShortChanIDFromInt(uint64(prand.Int63()))
- }
-
- testCases := []struct {
- channels []LocalChannel
- walletAmt btcutil.Amount
-
- needMore bool
- amtAvailable btcutil.Amount
- numMore uint32
- }{
- // Many available funds, but already have too many active open
- // channels.
- {
- []LocalChannel{
- {
- ChanID: randChanID(),
- Balance: btcutil.Amount(prand.Int31()),
- },
- {
- ChanID: randChanID(),
- Balance: btcutil.Amount(prand.Int31()),
- },
- {
- ChanID: randChanID(),
- Balance: btcutil.Amount(prand.Int31()),
- },
- },
- btcutil.Amount(btcutil.UnitsPerCoin() * 10),
- false,
- 0,
- 0,
- },
-
- // Ratio of funds in channels and total funds meets the
- // threshold.
- {
- []LocalChannel{
- {
- ChanID: randChanID(),
- Balance: btcutil.Amount(btcutil.UnitsPerCoin()),
- },
- {
- ChanID: randChanID(),
- Balance: btcutil.Amount(btcutil.UnitsPerCoin()),
- },
- },
- btcutil.Amount(btcutil.UnitsPerCoin() * 2),
- false,
- 0,
- 0,
- },
-
- // Ratio of funds in channels and total funds is below the
- // threshold. We have 10 BTC allocated amongst channels and
- // funds, atm. We're targeting 50%, so 5 BTC should be
- // allocated. Only 1 BTC is atm, so 4 BTC should be
- // recommended. We should also request 2 more channels as the
- // limit is 3.
- {
- []LocalChannel{
- {
- ChanID: randChanID(),
- Balance: btcutil.Amount(btcutil.UnitsPerCoin()),
- },
- },
- btcutil.Amount(btcutil.UnitsPerCoin() * 9),
- true,
- btcutil.Amount(btcutil.UnitsPerCoin() * 4),
- 2,
- },
-
- // Ratio of funds in channels and total funds is below the
- // threshold. We have 14 BTC total amongst the wallet's
- // balance, and our currently opened channels. Since we're
- // targeting a 50% allocation, we should commit 7 BTC. The
- // current channels commit 4 BTC, so we should expected 3 BTC
- // to be committed. We should only request a single additional
- // channel as the limit is 3.
- {
- []LocalChannel{
- {
- ChanID: randChanID(),
- Balance: btcutil.Amount(btcutil.UnitsPerCoin()),
- },
- {
- ChanID: randChanID(),
- Balance: btcutil.Amount(btcutil.UnitsPerCoin() * 3),
- },
- },
- btcutil.Amount(btcutil.UnitsPerCoin() * 10),
- true,
- btcutil.Amount(btcutil.UnitsPerCoin() * 3),
- 1,
- },
-
- // Ratio of funds in channels and total funds is above the
- // threshold.
- {
- []LocalChannel{
- {
- ChanID: randChanID(),
- Balance: btcutil.Amount(btcutil.UnitsPerCoin()),
- },
- {
- ChanID: randChanID(),
- Balance: btcutil.Amount(btcutil.UnitsPerCoin()),
- },
- },
- btcutil.Amount(btcutil.UnitsPerCoin()),
- false,
- 0,
- 0,
- },
- }
-
- for i, testCase := range testCases {
- amtToAllocate, numMore := constraints.ChannelBudget(
- testCase.channels, testCase.walletAmt,
- )
-
- if amtToAllocate != testCase.amtAvailable {
- t.Fatalf("test #%v: expected %v, got %v",
- i, testCase.amtAvailable, amtToAllocate)
- }
- if numMore != testCase.numMore {
- t.Fatalf("test #%v: expected %v, got %v",
- i, testCase.numMore, numMore)
- }
- }
-}
diff --git a/lnd/autopilot/agent_test.go b/lnd/autopilot/agent_test.go
deleted file mode 100644
index 3fb09001..00000000
--- a/lnd/autopilot/agent_test.go
+++ /dev/null
@@ -1,1377 +0,0 @@
-package autopilot
-
-import (
- "net"
- "sync"
- "testing"
- "time"
-
- "github.com/pkt-cash/pktd/btcec"
- "github.com/pkt-cash/pktd/btcutil"
- "github.com/pkt-cash/pktd/btcutil/er"
- "github.com/pkt-cash/pktd/wire"
-)
-
-type moreChansResp struct {
- numMore uint32
- amt btcutil.Amount
-}
-
-type moreChanArg struct {
- chans []LocalChannel
- balance btcutil.Amount
-}
-
-type mockConstraints struct {
- moreChansResps chan moreChansResp
- moreChanArgs chan moreChanArg
- quit chan struct{}
-}
-
-func (m *mockConstraints) ChannelBudget(chans []LocalChannel,
- balance btcutil.Amount) (btcutil.Amount, uint32) {
-
- if m.moreChanArgs != nil {
- moreChan := moreChanArg{
- chans: chans,
- balance: balance,
- }
-
- select {
- case m.moreChanArgs <- moreChan:
- case <-m.quit:
- return 0, 0
- }
- }
-
- select {
- case resp := <-m.moreChansResps:
- return resp.amt, resp.numMore
- case <-m.quit:
- return 0, 0
- }
-}
-
-func (m *mockConstraints) MaxPendingOpens() uint16 {
- return 10
-}
-
-func (m *mockConstraints) MinChanSize() btcutil.Amount {
- return 1e7
-}
-func (m *mockConstraints) MaxChanSize() btcutil.Amount {
- return 1e8
-}
-
-var _ AgentConstraints = (*mockConstraints)(nil)
-
-type mockHeuristic struct {
- nodeScoresResps chan map[NodeID]*NodeScore
- nodeScoresArgs chan directiveArg
-
- quit chan struct{}
-}
-
-type directiveArg struct {
- graph ChannelGraph
- amt btcutil.Amount
- chans []LocalChannel
- nodes map[NodeID]struct{}
-}
-
-func (m *mockHeuristic) Name() string {
- return "mock"
-}
-
-func (m *mockHeuristic) NodeScores(g ChannelGraph, chans []LocalChannel,
- chanSize btcutil.Amount, nodes map[NodeID]struct{}) (
- map[NodeID]*NodeScore, er.R) {
-
- if m.nodeScoresArgs != nil {
- directive := directiveArg{
- graph: g,
- amt: chanSize,
- chans: chans,
- nodes: nodes,
- }
-
- select {
- case m.nodeScoresArgs <- directive:
- case <-m.quit:
- return nil, er.New("exiting")
- }
- }
-
- select {
- case resp := <-m.nodeScoresResps:
- return resp, nil
- case <-m.quit:
- return nil, er.New("exiting")
- }
-}
-
-var _ AttachmentHeuristic = (*mockHeuristic)(nil)
-
-type openChanIntent struct {
- target *btcec.PublicKey
- amt btcutil.Amount
- private bool
-}
-
-type mockChanController struct {
- openChanSignals chan openChanIntent
- private bool
-}
-
-func (m *mockChanController) OpenChannel(target *btcec.PublicKey,
- amt btcutil.Amount) er.R {
-
- m.openChanSignals <- openChanIntent{
- target: target,
- amt: amt,
- private: m.private,
- }
-
- return nil
-}
-
-func (m *mockChanController) CloseChannel(chanPoint *wire.OutPoint) er.R {
- return nil
-}
-
-var _ ChannelController = (*mockChanController)(nil)
-
-type testContext struct {
- constraints *mockConstraints
- heuristic *mockHeuristic
- chanController ChannelController
- graph testGraph
- agent *Agent
- walletBalance btcutil.Amount
-
- quit chan struct{}
- sync.Mutex
-}
-
-func setup(t *testing.T, initialChans []LocalChannel) (*testContext, func()) {
- t.Helper()
-
- // First, we'll create all the dependencies that we'll need in order to
- // create the autopilot agent.
- self, err := randKey()
- if err != nil {
- t.Fatalf("unable to generate key: %v", err)
- }
-
- quit := make(chan struct{})
- heuristic := &mockHeuristic{
- nodeScoresArgs: make(chan directiveArg),
- nodeScoresResps: make(chan map[NodeID]*NodeScore),
- quit: quit,
- }
- constraints := &mockConstraints{
- moreChansResps: make(chan moreChansResp),
- moreChanArgs: make(chan moreChanArg),
- quit: quit,
- }
-
- chanController := &mockChanController{
- openChanSignals: make(chan openChanIntent, 10),
- }
- memGraph, _, _ := newMemChanGraph()
-
- // We'll keep track of the funds available to the agent, to make sure
- // it correctly uses this value when querying the ChannelBudget.
- var availableFunds btcutil.Amount = 10 * btcutil.UnitsPerCoin()
-
- ctx := &testContext{
- constraints: constraints,
- heuristic: heuristic,
- chanController: chanController,
- graph: memGraph,
- walletBalance: availableFunds,
- quit: quit,
- }
-
- // With the dependencies we created, we can now create the initial
- // agent itself.
- testCfg := Config{
- Self: self,
- Heuristic: heuristic,
- ChanController: chanController,
- WalletBalance: func() (btcutil.Amount, er.R) {
- ctx.Lock()
- defer ctx.Unlock()
- return ctx.walletBalance, nil
- },
- ConnectToPeer: func(*btcec.PublicKey, []net.Addr) (bool, er.R) {
- return false, nil
- },
- DisconnectPeer: func(*btcec.PublicKey) er.R {
- return nil
- },
- Graph: memGraph,
- Constraints: constraints,
- }
-
- agent, err := New(testCfg, initialChans)
- if err != nil {
- t.Fatalf("unable to create agent: %v", err)
- }
- ctx.agent = agent
-
- // With the autopilot agent and all its dependencies we'll start the
- // primary controller goroutine.
- if err := agent.Start(); err != nil {
- t.Fatalf("unable to start agent: %v", err)
- }
-
- cleanup := func() {
- // We must close quit before agent.Stop(), to make sure
- // ChannelBudget won't block preventing the agent from exiting.
- close(quit)
- agent.Stop()
- }
-
- return ctx, cleanup
-}
-
-// respondMoreChans consumes the moreChanArgs element and responds to the agent
-// with the given moreChansResp.
-func respondMoreChans(t *testing.T, testCtx *testContext, resp moreChansResp) {
- t.Helper()
-
- // The agent should now query the heuristic.
- select {
- case <-testCtx.constraints.moreChanArgs:
- case <-time.After(time.Second * 3):
- t.Fatalf("heuristic wasn't queried in time")
- }
-
- // We'll send the response.
- select {
- case testCtx.constraints.moreChansResps <- resp:
- case <-time.After(time.Second * 10):
- t.Fatalf("response wasn't sent in time")
- }
-}
-
-// respondMoreChans consumes the nodeScoresArgs element and responds to the
-// agent with the given node scores.
-func respondNodeScores(t *testing.T, testCtx *testContext,
- resp map[NodeID]*NodeScore) {
- t.Helper()
-
- // Send over an empty list of attachment directives, which should cause
- // the agent to return to waiting on a new signal.
- select {
- case <-testCtx.heuristic.nodeScoresArgs:
- case <-time.After(time.Second * 3):
- t.Fatalf("node scores weren't queried in time")
- }
- select {
- case testCtx.heuristic.nodeScoresResps <- resp:
- case <-time.After(time.Second * 10):
- t.Fatalf("node scores were not sent in time")
- }
-}
-
-// TestAgentChannelOpenSignal tests that upon receipt of a chanOpenUpdate, then
-// agent modifies its local state accordingly, and reconsults the heuristic.
-func TestAgentChannelOpenSignal(t *testing.T) {
- t.Parallel()
-
- testCtx, cleanup := setup(t, nil)
- defer cleanup()
-
- // We'll send an initial "no" response to advance the agent past its
- // initial check.
- respondMoreChans(t, testCtx, moreChansResp{0, 0})
-
- // Next we'll signal a new channel being opened by the backing LN node,
- // with a capacity of 1 BTC.
- newChan := LocalChannel{
- ChanID: randChanID(),
- Balance: btcutil.UnitsPerCoin(),
- }
- testCtx.agent.OnChannelOpen(newChan)
-
- // The agent should now query the heuristic in order to determine its
- // next action as it local state has now been modified.
- respondMoreChans(t, testCtx, moreChansResp{0, 0})
-
- // At this point, the local state of the agent should
- // have also been updated to reflect that the LN node
- // now has an additional channel with one BTC.
- if _, ok := testCtx.agent.chanState[newChan.ChanID]; !ok {
- t.Fatalf("internal channel state wasn't updated")
- }
-
- // There shouldn't be a call to the Select method as we've returned
- // "false" for NeedMoreChans above.
- select {
-
- // If this send success, then Select was erroneously called and the
- // test should be failed.
- case testCtx.heuristic.nodeScoresResps <- map[NodeID]*NodeScore{}:
- t.Fatalf("Select was called but shouldn't have been")
-
- // This is the correct path as Select should've be called.
- default:
- }
-}
-
-// TestAgentHeuristicUpdateSignal tests that upon notification about a
-// heuristic update, the agent reconsults the heuristic.
-func TestAgentHeuristicUpdateSignal(t *testing.T) {
- t.Parallel()
-
- testCtx, cleanup := setup(t, nil)
- defer cleanup()
-
- pub, err := testCtx.graph.addRandNode()
- if err != nil {
- t.Fatalf("unable to generate key: %v", err)
- }
-
- // We'll send an initial "no" response to advance the agent past its
- // initial check.
- respondMoreChans(t, testCtx, moreChansResp{0, 0})
-
- // Next we'll signal that one of the heuristcs have been updated.
- testCtx.agent.OnHeuristicUpdate(testCtx.heuristic)
-
- // The update should trigger the agent to ask for a channel budget.so
- // we'll respond that there is a budget for opening 1 more channel.
- respondMoreChans(t, testCtx,
- moreChansResp{
- numMore: 1,
- amt: 1 * btcutil.UnitsPerCoin(),
- },
- )
-
- // At this point, the agent should now be querying the heuristic for
- // scores. We'll respond.
- nodeID := NewNodeID(pub)
- scores := map[NodeID]*NodeScore{
- nodeID: {
- NodeID: nodeID,
- Score: 0.5,
- },
- }
- respondNodeScores(t, testCtx, scores)
-
- // Finally, this should result in the agent opening a channel.
- chanController := testCtx.chanController.(*mockChanController)
- select {
- case <-chanController.openChanSignals:
- case <-time.After(time.Second * 10):
- t.Fatalf("channel not opened in time")
- }
-}
-
-// A mockFailingChanController always fails to open a channel.
-type mockFailingChanController struct {
-}
-
-func (m *mockFailingChanController) OpenChannel(target *btcec.PublicKey,
- amt btcutil.Amount) er.R {
- return er.New("failure")
-}
-
-func (m *mockFailingChanController) CloseChannel(chanPoint *wire.OutPoint) er.R {
- return nil
-}
-
-var _ ChannelController = (*mockFailingChanController)(nil)
-
-// TestAgentChannelFailureSignal tests that if an autopilot channel fails to
-// open, the agent is signalled to make a new decision.
-func TestAgentChannelFailureSignal(t *testing.T) {
- t.Parallel()
-
- testCtx, cleanup := setup(t, nil)
- defer cleanup()
-
- testCtx.chanController = &mockFailingChanController{}
-
- node, err := testCtx.graph.addRandNode()
- if err != nil {
- t.Fatalf("unable to add node: %v", err)
- }
-
- // First ensure the agent will attempt to open a new channel. Return
- // that we need more channels, and have 5BTC to use.
- respondMoreChans(t, testCtx, moreChansResp{1, 5 * btcutil.UnitsPerCoin()})
-
- // At this point, the agent should now be querying the heuristic to
- // request attachment directives, return a fake so the agent will
- // attempt to open a channel.
- var fakeDirective = &NodeScore{
- NodeID: NewNodeID(node),
- Score: 0.5,
- }
-
- respondNodeScores(
- t, testCtx, map[NodeID]*NodeScore{
- NewNodeID(node): fakeDirective,
- },
- )
-
- // At this point the agent will attempt to create a channel and fail.
-
- // Now ensure that the controller loop is re-executed.
- respondMoreChans(t, testCtx, moreChansResp{1, 5 * btcutil.UnitsPerCoin()})
- respondNodeScores(t, testCtx, map[NodeID]*NodeScore{})
-}
-
-// TestAgentChannelCloseSignal ensures that once the agent receives an outside
-// signal of a channel belonging to the backing LN node being closed, then it
-// will query the heuristic to make its next decision.
-func TestAgentChannelCloseSignal(t *testing.T) {
- t.Parallel()
- // We'll start the agent with two channels already being active.
- initialChans := []LocalChannel{
- {
- ChanID: randChanID(),
- Balance: btcutil.UnitsPerCoin(),
- },
- {
- ChanID: randChanID(),
- Balance: btcutil.UnitsPerCoin() * 2,
- },
- }
-
- testCtx, cleanup := setup(t, initialChans)
- defer cleanup()
-
- // We'll send an initial "no" response to advance the agent past its
- // initial check.
- respondMoreChans(t, testCtx, moreChansResp{0, 0})
-
- // Next, we'll close both channels which should force the agent to
- // re-query the heuristic.
- testCtx.agent.OnChannelClose(initialChans[0].ChanID, initialChans[1].ChanID)
-
- // The agent should now query the heuristic in order to determine its
- // next action as it local state has now been modified.
- respondMoreChans(t, testCtx, moreChansResp{0, 0})
-
- // At this point, the local state of the agent should
- // have also been updated to reflect that the LN node
- // has no existing open channels.
- if len(testCtx.agent.chanState) != 0 {
- t.Fatalf("internal channel state wasn't updated")
- }
-
- // There shouldn't be a call to the Select method as we've returned
- // "false" for NeedMoreChans above.
- select {
-
- // If this send success, then Select was erroneously called and the
- // test should be failed.
- case testCtx.heuristic.nodeScoresResps <- map[NodeID]*NodeScore{}:
- t.Fatalf("Select was called but shouldn't have been")
-
- // This is the correct path as Select should've be called.
- default:
- }
-}
-
-// TestAgentBalanceUpdateIncrease ensures that once the agent receives an
-// outside signal concerning a balance update, then it will re-query the
-// heuristic to determine its next action.
-func TestAgentBalanceUpdate(t *testing.T) {
- t.Parallel()
-
- testCtx, cleanup := setup(t, nil)
- defer cleanup()
-
- // We'll send an initial "no" response to advance the agent past its
- // initial check.
- respondMoreChans(t, testCtx, moreChansResp{0, 0})
-
- // Next we'll send a new balance update signal to the agent, adding 5
- // BTC to the amount of available funds.
- testCtx.Lock()
- testCtx.walletBalance += btcutil.UnitsPerCoin() * 5
- testCtx.Unlock()
-
- testCtx.agent.OnBalanceChange()
-
- // The agent should now query the heuristic in order to determine its
- // next action as it local state has now been modified.
- respondMoreChans(t, testCtx, moreChansResp{0, 0})
-
- // At this point, the local state of the agent should
- // have also been updated to reflect that the LN node
- // now has an additional 5BTC available.
- if testCtx.agent.totalBalance != testCtx.walletBalance {
- t.Fatalf("expected %v wallet balance "+
- "instead have %v", testCtx.agent.totalBalance,
- testCtx.walletBalance)
- }
-
- // There shouldn't be a call to the Select method as we've returned
- // "false" for NeedMoreChans above.
- select {
-
- // If this send success, then Select was erroneously called and the
- // test should be failed.
- case testCtx.heuristic.nodeScoresResps <- map[NodeID]*NodeScore{}:
- t.Fatalf("Select was called but shouldn't have been")
-
- // This is the correct path as Select should've be called.
- default:
- }
-}
-
-// TestAgentImmediateAttach tests that if an autopilot agent is created, and it
-// has enough funds available to create channels, then it does so immediately.
-func TestAgentImmediateAttach(t *testing.T) {
- t.Parallel()
-
- testCtx, cleanup := setup(t, nil)
- defer cleanup()
-
- const numChans = 5
-
- // We'll generate 5 mock directives so it can progress within its loop.
- directives := make(map[NodeID]*NodeScore)
- nodeKeys := make(map[NodeID]struct{})
- for i := 0; i < numChans; i++ {
- pub, err := testCtx.graph.addRandNode()
- if err != nil {
- t.Fatalf("unable to generate key: %v", err)
- }
- nodeID := NewNodeID(pub)
- directives[nodeID] = &NodeScore{
- NodeID: nodeID,
- Score: 0.5,
- }
- nodeKeys[nodeID] = struct{}{}
- }
- // The very first thing the agent should do is query the NeedMoreChans
- // method on the passed heuristic. So we'll provide it with a response
- // that will kick off the main loop.
- respondMoreChans(t, testCtx,
- moreChansResp{
- numMore: numChans,
- amt: 5 * btcutil.UnitsPerCoin(),
- },
- )
-
- // At this point, the agent should now be querying the heuristic to
- // requests attachment directives. With our fake directives created,
- // we'll now send then to the agent as a return value for the Select
- // function.
- respondNodeScores(t, testCtx, directives)
-
- // Finally, we should receive 5 calls to the OpenChannel method with
- // the exact same parameters that we specified within the attachment
- // directives.
- chanController := testCtx.chanController.(*mockChanController)
- for i := 0; i < numChans; i++ {
- select {
- case openChan := <-chanController.openChanSignals:
- if openChan.amt != btcutil.UnitsPerCoin() {
- t.Fatalf("invalid chan amt: expected %v, got %v",
- btcutil.UnitsPerCoin(), openChan.amt)
- }
- nodeID := NewNodeID(openChan.target)
- _, ok := nodeKeys[nodeID]
- if !ok {
- t.Fatalf("unexpected key: %v, not found",
- nodeID)
- }
- delete(nodeKeys, nodeID)
-
- case <-time.After(time.Second * 10):
- t.Fatalf("channel not opened in time")
- }
- }
-}
-
-// TestAgentPrivateChannels ensure that only requests for private channels are
-// sent if set.
-func TestAgentPrivateChannels(t *testing.T) {
- t.Parallel()
-
- testCtx, cleanup := setup(t, nil)
- defer cleanup()
-
- // The chanController should be initialized such that all of its open
- // channel requests are for private channels.
- testCtx.chanController.(*mockChanController).private = true
-
- const numChans = 5
-
- // We'll generate 5 mock directives so the pubkeys will be found in the
- // agent's graph, and it can progress within its loop.
- directives := make(map[NodeID]*NodeScore)
- for i := 0; i < numChans; i++ {
- pub, err := testCtx.graph.addRandNode()
- if err != nil {
- t.Fatalf("unable to generate key: %v", err)
- }
- directives[NewNodeID(pub)] = &NodeScore{
- NodeID: NewNodeID(pub),
- Score: 0.5,
- }
- }
-
- // The very first thing the agent should do is query the NeedMoreChans
- // method on the passed heuristic. So we'll provide it with a response
- // that will kick off the main loop. We'll send over a response
- // indicating that it should establish more channels, and give it a
- // budget of 5 BTC to do so.
- resp := moreChansResp{
- numMore: numChans,
- amt: 5 * btcutil.UnitsPerCoin(),
- }
- respondMoreChans(t, testCtx, resp)
-
- // At this point, the agent should now be querying the heuristic to
- // requests attachment directives. With our fake directives created,
- // we'll now send then to the agent as a return value for the Select
- // function.
- respondNodeScores(t, testCtx, directives)
-
- // Finally, we should receive 5 calls to the OpenChannel method, each
- // specifying that it's for a private channel.
- chanController := testCtx.chanController.(*mockChanController)
- for i := 0; i < numChans; i++ {
- select {
- case openChan := <-chanController.openChanSignals:
- if !openChan.private {
- t.Fatal("expected open channel request to be private")
- }
- case <-time.After(10 * time.Second):
- t.Fatal("channel not opened in time")
- }
- }
-}
-
-// TestAgentPendingChannelState ensures that the agent properly factors in its
-// pending channel state when making decisions w.r.t if it needs more channels
-// or not, and if so, who is eligible to open new channels to.
-func TestAgentPendingChannelState(t *testing.T) {
- t.Parallel()
-
- testCtx, cleanup := setup(t, nil)
- defer cleanup()
-
- // We'll only return a single directive for a pre-chosen node.
- nodeKey, err := testCtx.graph.addRandNode()
- if err != nil {
- t.Fatalf("unable to generate key: %v", err)
- }
- nodeID := NewNodeID(nodeKey)
- nodeDirective := &NodeScore{
- NodeID: nodeID,
- Score: 0.5,
- }
-
- // Once again, we'll start by telling the agent as part of its first
- // query, that it needs more channels and has 3 BTC available for
- // attachment. We'll send over a response indicating that it should
- // establish more channels, and give it a budget of 1 BTC to do so.
- respondMoreChans(t, testCtx,
- moreChansResp{
- numMore: 1,
- amt: btcutil.UnitsPerCoin(),
- },
- )
-
- respondNodeScores(t, testCtx,
- map[NodeID]*NodeScore{
- nodeID: nodeDirective,
- },
- )
-
- // A request to open the channel should've also been sent.
- chanController := testCtx.chanController.(*mockChanController)
- select {
- case openChan := <-chanController.openChanSignals:
- chanAmt := testCtx.constraints.MaxChanSize()
- if openChan.amt != chanAmt {
- t.Fatalf("invalid chan amt: expected %v, got %v",
- chanAmt, openChan.amt)
- }
- if !openChan.target.IsEqual(nodeKey) {
- t.Fatalf("unexpected key: expected %x, got %x",
- nodeKey.SerializeCompressed(),
- openChan.target.SerializeCompressed())
- }
- case <-time.After(time.Second * 10):
- t.Fatalf("channel wasn't opened in time")
- }
-
- // Now, in order to test that the pending state was properly updated,
- // we'll trigger a balance update in order to trigger a query to the
- // heuristic.
- testCtx.Lock()
- testCtx.walletBalance += btcutil.Amount(0.4 * btcutil.UnitsPerCoinF())
- testCtx.Unlock()
-
- testCtx.agent.OnBalanceChange()
-
- // The heuristic should be queried, and the argument for the set of
- // channels passed in should include the pending channels that
- // should've been created above.
- select {
- // The request that we get should include a pending channel for the
- // one that we just created, otherwise the agent isn't properly
- // updating its internal state.
- case req := <-testCtx.constraints.moreChanArgs:
- chanAmt := testCtx.constraints.MaxChanSize()
- if len(req.chans) != 1 {
- t.Fatalf("should include pending chan in current "+
- "state, instead have %v chans", len(req.chans))
- }
- if req.chans[0].Balance != chanAmt {
- t.Fatalf("wrong chan balance: expected %v, got %v",
- req.chans[0].Balance, chanAmt)
- }
- if req.chans[0].Node != nodeID {
- t.Fatalf("wrong node ID: expected %x, got %x",
- nodeID, req.chans[0].Node[:])
- }
- case <-time.After(time.Second * 10):
- t.Fatalf("need more chans wasn't queried in time")
- }
-
- // We'll send across a response indicating that it *does* need more
- // channels.
- select {
- case testCtx.constraints.moreChansResps <- moreChansResp{1, btcutil.UnitsPerCoin()}:
- case <-time.After(time.Second * 10):
- t.Fatalf("need more chans wasn't queried in time")
- }
-
- // The response above should prompt the agent to make a query to the
- // Select method. The arguments passed should reflect the fact that the
- // node we have a pending channel to, should be ignored.
- select {
- case req := <-testCtx.heuristic.nodeScoresArgs:
- if len(req.chans) == 0 {
- t.Fatalf("expected to skip %v nodes, instead "+
- "skipping %v", 1, len(req.chans))
- }
- if req.chans[0].Node != nodeID {
- t.Fatalf("pending node not included in skip arguments")
- }
- case <-time.After(time.Second * 10):
- t.Fatalf("select wasn't queried in time")
- }
-}
-
-// TestAgentPendingOpenChannel ensures that the agent queries its heuristic once
-// it detects a channel is pending open. This allows the agent to use its own
-// change outputs that have yet to confirm for funding transactions.
-func TestAgentPendingOpenChannel(t *testing.T) {
- t.Parallel()
-
- testCtx, cleanup := setup(t, nil)
- defer cleanup()
-
- // We'll send an initial "no" response to advance the agent past its
- // initial check.
- respondMoreChans(t, testCtx, moreChansResp{0, 0})
-
- // Next, we'll signal that a new channel has been opened, but it is
- // still pending.
- testCtx.agent.OnChannelPendingOpen()
-
- // The agent should now query the heuristic in order to determine its
- // next action as its local state has now been modified.
- respondMoreChans(t, testCtx, moreChansResp{0, 0})
-
- // There shouldn't be a call to the Select method as we've returned
- // "false" for NeedMoreChans above.
- select {
- case testCtx.heuristic.nodeScoresResps <- map[NodeID]*NodeScore{}:
- t.Fatalf("Select was called but shouldn't have been")
- default:
- }
-}
-
-// TestAgentOnNodeUpdates tests that the agent will wake up in response to the
-// OnNodeUpdates signal. This is useful in ensuring that autopilot is always
-// pulling in the latest graph updates into its decision making. It also
-// prevents the agent from stalling after an initial attempt that finds no nodes
-// in the graph.
-func TestAgentOnNodeUpdates(t *testing.T) {
- t.Parallel()
-
- testCtx, cleanup := setup(t, nil)
- defer cleanup()
-
- // We'll send an initial "yes" response to advance the agent past its
- // initial check. This will cause it to try to get directives from an
- // empty graph.
- respondMoreChans(
- t, testCtx,
- moreChansResp{
- numMore: 2,
- amt: testCtx.walletBalance,
- },
- )
-
- // Send over an empty list of attachment directives, which should cause
- // the agent to return to waiting on a new signal.
- respondNodeScores(t, testCtx, map[NodeID]*NodeScore{})
-
- // Simulate more nodes being added to the graph by informing the agent
- // that we have node updates.
- testCtx.agent.OnNodeUpdates()
-
- // In response, the agent should wake up and see if it needs more
- // channels. Since we haven't done anything, we will send the same
- // response as before since we are still trying to open channels.
- respondMoreChans(
- t, testCtx,
- moreChansResp{
- numMore: 2,
- amt: testCtx.walletBalance,
- },
- )
-
- // Again the agent should pull in the next set of attachment directives.
- // It's not important that this list is also empty, so long as the node
- // updates signal is causing the agent to make this attempt.
- respondNodeScores(t, testCtx, map[NodeID]*NodeScore{})
-}
-
-// TestAgentSkipPendingConns asserts that the agent will not try to make
-// duplicate connection requests to the same node, even if the attachment
-// heuristic instructs the agent to do so. It also asserts that the agent
-// stops tracking the pending connection once it finishes. Note that in
-// practice, a failed connection would be inserted into the skip map passed to
-// the attachment heuristic, though this does not assert that case.
-func TestAgentSkipPendingConns(t *testing.T) {
- t.Parallel()
-
- testCtx, cleanup := setup(t, nil)
- defer cleanup()
-
- connect := make(chan chan er.R)
- testCtx.agent.cfg.ConnectToPeer = func(*btcec.PublicKey, []net.Addr) (bool, er.R) {
- errChan := make(chan er.R)
-
- select {
- case connect <- errChan:
- case <-testCtx.quit:
- return false, er.New("quit")
- }
-
- select {
- case err := <-errChan:
- return false, err
- case <-testCtx.quit:
- return false, er.New("quit")
- }
- }
-
- // We'll only return a single directive for a pre-chosen node.
- nodeKey, err := testCtx.graph.addRandNode()
- if err != nil {
- t.Fatalf("unable to generate key: %v", err)
- }
- nodeID := NewNodeID(nodeKey)
- nodeDirective := &NodeScore{
- NodeID: nodeID,
- Score: 0.5,
- }
-
- // We'll also add a second node to the graph, to keep the first one
- // company.
- nodeKey2, err := testCtx.graph.addRandNode()
- if err != nil {
- t.Fatalf("unable to generate key: %v", err)
- }
- nodeID2 := NewNodeID(nodeKey2)
-
- // We'll send an initial "yes" response to advance the agent past its
- // initial check. This will cause it to try to get directives from the
- // graph.
- respondMoreChans(t, testCtx,
- moreChansResp{
- numMore: 1,
- amt: testCtx.walletBalance,
- },
- )
-
- // Both nodes should be part of the arguments.
- select {
- case req := <-testCtx.heuristic.nodeScoresArgs:
- if len(req.nodes) != 2 {
- t.Fatalf("expected %v nodes, instead "+
- "had %v", 2, len(req.nodes))
- }
- if _, ok := req.nodes[nodeID]; !ok {
- t.Fatalf("node not included in arguments")
- }
- if _, ok := req.nodes[nodeID2]; !ok {
- t.Fatalf("node not included in arguments")
- }
- case <-time.After(time.Second * 10):
- t.Fatalf("select wasn't queried in time")
- }
-
- // Respond with a scored directive. We skip node2 for now, implicitly
- // giving it a zero-score.
- select {
- case testCtx.heuristic.nodeScoresResps <- map[NodeID]*NodeScore{
- NewNodeID(nodeKey): nodeDirective,
- }:
- case <-time.After(time.Second * 10):
- t.Fatalf("heuristic wasn't queried in time")
- }
-
- // The agent should attempt connection to the node.
- var errChan chan er.R
- select {
- case errChan = <-connect:
- case <-time.After(time.Second * 10):
- t.Fatalf("agent did not attempt connection")
- }
-
- // Signal the agent to go again, now that we've tried to connect.
- testCtx.agent.OnNodeUpdates()
-
- // The heuristic again informs the agent that we need more channels.
- respondMoreChans(t, testCtx,
- moreChansResp{
- numMore: 1,
- amt: testCtx.walletBalance,
- },
- )
-
- // Since the node now has a pending connection, it should be skipped
- // and not part of the nodes attempting to be scored.
- select {
- case req := <-testCtx.heuristic.nodeScoresArgs:
- if len(req.nodes) != 1 {
- t.Fatalf("expected %v nodes, instead "+
- "had %v", 1, len(req.nodes))
- }
- if _, ok := req.nodes[nodeID2]; !ok {
- t.Fatalf("node not included in arguments")
- }
- case <-time.After(time.Second * 10):
- t.Fatalf("select wasn't queried in time")
- }
-
- // Respond with an emtpty score set.
- select {
- case testCtx.heuristic.nodeScoresResps <- map[NodeID]*NodeScore{}:
- case <-time.After(time.Second * 10):
- t.Fatalf("heuristic wasn't queried in time")
- }
-
- // The agent should not attempt any connection, since no nodes were
- // scored.
- select {
- case <-connect:
- t.Fatalf("agent should not have attempted connection")
- case <-time.After(time.Second * 3):
- }
-
- // Now, timeout the original request, which should still be waiting for
- // a response.
- select {
- case errChan <- er.Errorf("connection timeout"):
- case <-time.After(time.Second * 10):
- t.Fatalf("agent did not receive connection timeout")
- }
-
- // The agent will now retry since the last connection attempt failed.
- // The heuristic again informs the agent that we need more channels.
- respondMoreChans(t, testCtx,
- moreChansResp{
- numMore: 1,
- amt: testCtx.walletBalance,
- },
- )
-
- // The node should now be marked as "failed", which should make it
- // being skipped during scoring. Again check that it won't be among the
- // score request.
- select {
- case req := <-testCtx.heuristic.nodeScoresArgs:
- if len(req.nodes) != 1 {
- t.Fatalf("expected %v nodes, instead "+
- "had %v", 1, len(req.nodes))
- }
- if _, ok := req.nodes[nodeID2]; !ok {
- t.Fatalf("node not included in arguments")
- }
- case <-time.After(time.Second * 10):
- t.Fatalf("select wasn't queried in time")
- }
-
- // Send a directive for the second node.
- nodeDirective2 := &NodeScore{
- NodeID: nodeID2,
- Score: 0.5,
- }
- select {
- case testCtx.heuristic.nodeScoresResps <- map[NodeID]*NodeScore{
- nodeID2: nodeDirective2,
- }:
- case <-time.After(time.Second * 10):
- t.Fatalf("heuristic wasn't queried in time")
- }
-
- // This time, the agent should try the connection to the second node.
- select {
- case <-connect:
- case <-time.After(time.Second * 10):
- t.Fatalf("agent should have attempted connection")
- }
-}
-
-// TestAgentQuitWhenPendingConns tests that we are able to stop the autopilot
-// agent even though there are pending connections to nodes.
-func TestAgentQuitWhenPendingConns(t *testing.T) {
- t.Parallel()
-
- testCtx, cleanup := setup(t, nil)
- defer cleanup()
-
- connect := make(chan chan er.R)
-
- testCtx.agent.cfg.ConnectToPeer = func(*btcec.PublicKey, []net.Addr) (bool, er.R) {
- errChan := make(chan er.R)
-
- select {
- case connect <- errChan:
- case <-testCtx.quit:
- return false, er.New("quit")
- }
-
- select {
- case err := <-errChan:
- return false, err
- case <-testCtx.quit:
- return false, er.New("quit")
- }
- }
-
- // We'll only return a single directive for a pre-chosen node.
- nodeKey, err := testCtx.graph.addRandNode()
- if err != nil {
- t.Fatalf("unable to generate key: %v", err)
- }
- nodeID := NewNodeID(nodeKey)
- nodeDirective := &NodeScore{
- NodeID: nodeID,
- Score: 0.5,
- }
-
- // We'll send an initial "yes" response to advance the agent past its
- // initial check. This will cause it to try to get directives from the
- // graph.
- respondMoreChans(t, testCtx,
- moreChansResp{
- numMore: 1,
- amt: testCtx.walletBalance,
- },
- )
-
- // Check the args.
- select {
- case req := <-testCtx.heuristic.nodeScoresArgs:
- if len(req.nodes) != 1 {
- t.Fatalf("expected %v nodes, instead "+
- "had %v", 1, len(req.nodes))
- }
- if _, ok := req.nodes[nodeID]; !ok {
- t.Fatalf("node not included in arguments")
- }
- case <-time.After(time.Second * 10):
- t.Fatalf("select wasn't queried in time")
- }
-
- // Respond with a scored directive.
- select {
- case testCtx.heuristic.nodeScoresResps <- map[NodeID]*NodeScore{
- NewNodeID(nodeKey): nodeDirective,
- }:
- case <-time.After(time.Second * 10):
- t.Fatalf("heuristic wasn't queried in time")
- }
-
- // The agent should attempt connection to the node.
- select {
- case <-connect:
- case <-time.After(time.Second * 10):
- t.Fatalf("agent did not attempt connection")
- }
-
- // Make sure that we are able to stop the agent, even though there is a
- // pending connection.
- stopped := make(chan er.R)
- go func() {
- stopped <- testCtx.agent.Stop()
- }()
-
- select {
- case err := <-stopped:
- if err != nil {
- t.Fatalf("error stopping agent: %v", err)
- }
- case <-time.After(2 * time.Second):
- t.Fatalf("unable to stop agent")
- }
-}
-
-// respondWithScores checks that the moreChansRequest contains what we expect,
-// and responds with the given node scores.
-func respondWithScores(t *testing.T, testCtx *testContext,
- channelBudget btcutil.Amount, existingChans, newChans int,
- nodeScores map[NodeID]*NodeScore) {
-
- t.Helper()
-
- select {
- case testCtx.constraints.moreChansResps <- moreChansResp{
- numMore: uint32(newChans),
- amt: channelBudget,
- }:
- case <-time.After(time.Second * 3):
- t.Fatalf("heuristic wasn't queried in time")
- }
-
- // The agent should query for scores using the constraints returned
- // above. We expect the agent to use the maximum channel size when
- // opening channels.
- chanSize := testCtx.constraints.MaxChanSize()
-
- select {
- case req := <-testCtx.heuristic.nodeScoresArgs:
- // All nodes in the graph should be potential channel
- // candidates.
- if len(req.nodes) != len(nodeScores) {
- t.Fatalf("expected %v nodes, instead had %v",
- len(nodeScores), len(req.nodes))
- }
-
- // 'existingChans' is already open.
- if len(req.chans) != existingChans {
- t.Fatalf("expected %d existing channel, got %v",
- existingChans, len(req.chans))
- }
- if req.amt != chanSize {
- t.Fatalf("expected channel size of %v, got %v",
- chanSize, req.amt)
- }
-
- case <-time.After(time.Second * 3):
- t.Fatalf("select wasn't queried in time")
- }
-
- // Respond with the given scores.
- select {
- case testCtx.heuristic.nodeScoresResps <- nodeScores:
- case <-time.After(time.Second * 3):
- t.Fatalf("NodeScores wasn't queried in time")
- }
-}
-
-// checkChannelOpens asserts that the channel controller attempts open the
-// number of channels we expect, and with the exact total allocation.
-func checkChannelOpens(t *testing.T, testCtx *testContext,
- allocation btcutil.Amount, numChans int) []NodeID {
-
- var nodes []NodeID
-
- // The agent should attempt to open channels, totaling what we expect.
- var totalAllocation btcutil.Amount
- chanController := testCtx.chanController.(*mockChanController)
- for i := 0; i < numChans; i++ {
- select {
- case openChan := <-chanController.openChanSignals:
- totalAllocation += openChan.amt
-
- testCtx.Lock()
- testCtx.walletBalance -= openChan.amt
- testCtx.Unlock()
-
- nodes = append(nodes, NewNodeID(openChan.target))
-
- case <-time.After(time.Second * 3):
- t.Fatalf("channel not opened in time")
- }
- }
-
- if totalAllocation != allocation {
- t.Fatalf("expected agent to open channels totalling %v, "+
- "instead was %v", allocation, totalAllocation)
- }
-
- // Finally, make sure the agent won't try opening more channels.
- select {
- case <-chanController.openChanSignals:
- t.Fatalf("agent unexpectedly opened channel")
-
- case <-time.After(50 * time.Millisecond):
- }
-
- return nodes
-}
-
-// TestAgentChannelSizeAllocation tests that the autopilot agent opens channel
-// of size that stays within the channel budget and size restrictions.
-func TestAgentChannelSizeAllocation(t *testing.T) {
- t.Parallel()
-
- // Total number of nodes in our mock graph.
- const numNodes = 20
-
- testCtx, cleanup := setup(t, nil)
- defer cleanup()
-
- nodeScores := make(map[NodeID]*NodeScore)
- for i := 0; i < numNodes; i++ {
- nodeKey, err := testCtx.graph.addRandNode()
- if err != nil {
- t.Fatalf("unable to generate key: %v", err)
- }
- nodeID := NewNodeID(nodeKey)
- nodeScores[nodeID] = &NodeScore{
- NodeID: nodeID,
- Score: 0.5,
- }
- }
-
- // The agent should now query the heuristic in order to determine its
- // next action as it local state has now been modified.
- select {
- case arg := <-testCtx.constraints.moreChanArgs:
- if len(arg.chans) != 0 {
- t.Fatalf("expected agent to have no channels open, "+
- "had %v", len(arg.chans))
- }
- if arg.balance != testCtx.walletBalance {
- t.Fatalf("expectd agent to have %v balance, had %v",
- testCtx.walletBalance, arg.balance)
- }
- case <-time.After(time.Second * 3):
- t.Fatalf("heuristic wasn't queried in time")
- }
-
- // We'll return a response telling the agent to open 5 channels, with a
- // total channel budget of 5 BTC.
- var channelBudget btcutil.Amount = 5 * btcutil.UnitsPerCoin()
- numExistingChannels := 0
- numNewChannels := 5
- respondWithScores(
- t, testCtx, channelBudget, numExistingChannels,
- numNewChannels, nodeScores,
- )
-
- // We expect the autopilot to have allocated all funds towards
- // channels.
- expectedAllocation := testCtx.constraints.MaxChanSize() * btcutil.Amount(numNewChannels)
- nodes := checkChannelOpens(
- t, testCtx, expectedAllocation, numNewChannels,
- )
-
- // Delete the selected nodes from our set of scores, to avoid scoring
- // nodes we already have channels to.
- for _, node := range nodes {
- delete(nodeScores, node)
- }
-
- // TODO(halseth): this loop is a hack to ensure all the attempted
- // channels are accounted for. This happens because the agent will
- // query the ChannelBudget before all the pending channels are added to
- // the map. Fix by adding them to the pending channels map before
- // executing directives in goroutines?
- waitForNumChans := func(expChans int) {
- t.Helper()
-
- var (
- numChans int
- balance btcutil.Amount
- )
-
- Loop:
- for {
- select {
- case arg := <-testCtx.constraints.moreChanArgs:
- numChans = len(arg.chans)
- balance = arg.balance
-
- // As long as the number of existing channels
- // is below our expected number of channels,
- // and the balance is not what we expect, we'll
- // keep responding with "no more channels".
- if numChans == expChans &&
- balance == testCtx.walletBalance {
- break Loop
- }
-
- select {
- case testCtx.constraints.moreChansResps <- moreChansResp{0, 0}:
- case <-time.After(time.Second * 3):
- t.Fatalf("heuristic wasn't queried " +
- "in time")
- }
-
- case <-time.After(time.Second * 3):
- t.Fatalf("did not receive expected "+
- "channels(%d) and balance(%d), "+
- "instead got %d and %d", expChans,
- testCtx.walletBalance, numChans,
- balance)
- }
- }
- }
-
- // Wait for the agent to have 5 channels.
- waitForNumChans(numNewChannels)
-
- // Set the channel budget to 1.5 BTC.
- channelBudget = btcutil.UnitsPerCoin() * 3 / 2
-
- // We'll return a response telling the agent to open 3 channels, with a
- // total channel budget of 1.5 BTC.
- numExistingChannels = 5
- numNewChannels = 3
- respondWithScores(
- t, testCtx, channelBudget, numExistingChannels,
- numNewChannels, nodeScores,
- )
-
- // To stay within the budget, we expect the autopilot to open 2
- // channels.
- expectedAllocation = channelBudget
- nodes = checkChannelOpens(t, testCtx, expectedAllocation, 2)
- numExistingChannels = 7
-
- for _, node := range nodes {
- delete(nodeScores, node)
- }
-
- waitForNumChans(numExistingChannels)
-
- // Finally check that we make maximum channels if we are well within
- // our budget.
- channelBudget = btcutil.UnitsPerCoin() * 5
- numNewChannels = 2
- respondWithScores(
- t, testCtx, channelBudget, numExistingChannels,
- numNewChannels, nodeScores,
- )
-
- // We now expect the autopilot to open 2 channels, and since it has
- // more than enough balance within the budget, they should both be of
- // maximum size.
- expectedAllocation = testCtx.constraints.MaxChanSize() *
- btcutil.Amount(numNewChannels)
-
- checkChannelOpens(t, testCtx, expectedAllocation, numNewChannels)
-}
diff --git a/lnd/autopilot/betweenness_centrality.go b/lnd/autopilot/betweenness_centrality.go
deleted file mode 100644
index e701b721..00000000
--- a/lnd/autopilot/betweenness_centrality.go
+++ /dev/null
@@ -1,266 +0,0 @@
-package autopilot
-
-import (
- "sync"
-
- "github.com/pkt-cash/pktd/btcutil/er"
-)
-
-// stack is a simple int stack to help with readability of Brandes'
-// betweenness centrality implementation below.
-type stack struct {
- stack []int
-}
-
-func (s *stack) push(v int) {
- s.stack = append(s.stack, v)
-}
-
-func (s *stack) top() int {
- return s.stack[len(s.stack)-1]
-}
-
-func (s *stack) pop() {
- s.stack = s.stack[:len(s.stack)-1]
-}
-
-func (s *stack) empty() bool {
- return len(s.stack) == 0
-}
-
-// queue is a simple int queue to help with readability of Brandes'
-// betweenness centrality implementation below.
-type queue struct {
- queue []int
-}
-
-func (q *queue) push(v int) {
- q.queue = append(q.queue, v)
-}
-
-func (q *queue) front() int {
- return q.queue[0]
-}
-
-func (q *queue) pop() {
- q.queue = q.queue[1:]
-}
-
-func (q *queue) empty() bool {
- return len(q.queue) == 0
-}
-
-// BetweennessCentrality is a NodeMetric that calculates node betweenness
-// centrality using Brandes' algorithm. Betweenness centrality for each node
-// is the number of shortest paths passing trough that node, not counting
-// shortest paths starting or ending at that node. This is a useful metric
-// to measure control of individual nodes over the whole network.
-type BetweennessCentrality struct {
- // workers number of goroutines are used to parallelize
- // centrality calculation.
- workers int
-
- // centrality stores original (not normalized) centrality values for
- // each node in the graph.
- centrality map[NodeID]float64
-
- // min is the minimum centrality in the graph.
- min float64
-
- // max is the maximum centrality in the graph.
- max float64
-}
-
-// NewBetweennessCentralityMetric creates a new BetweennessCentrality instance.
-// Users can specify the number of workers to use for calculating centrality.
-func NewBetweennessCentralityMetric(workers int) (*BetweennessCentrality, er.R) {
- // There should be at least one worker.
- if workers < 1 {
- return nil, er.Errorf("workers must be positive")
- }
- return &BetweennessCentrality{
- workers: workers,
- }, nil
-}
-
-// Name returns the name of the metric.
-func (bc *BetweennessCentrality) Name() string {
- return "betweenness_centrality"
-}
-
-// betweennessCentrality is the core of Brandes' algorithm.
-// We first calculate the shortest paths from the start node s to all other
-// nodes with BFS, then update the betweenness centrality values by using
-// Brandes' dependency trick.
-// For detailed explanation please read:
-// https://www.cl.cam.ac.uk/teaching/1617/MLRD/handbook/brandes.html
-func betweennessCentrality(g *SimpleGraph, s int, centrality []float64) {
- // pred[w] is the list of nodes that immediately precede w on a
- // shortest path from s to t for each node t.
- pred := make([][]int, len(g.Nodes))
-
- // sigma[t] is the number of shortest paths between nodes s and t
- // for each node t.
- sigma := make([]int, len(g.Nodes))
- sigma[s] = 1
-
- // dist[t] holds the distance between s and t for each node t.
- // We initialize this to -1 (meaning infinity) for each t != s.
- dist := make([]int, len(g.Nodes))
- for i := range dist {
- dist[i] = -1
- }
-
- dist[s] = 0
-
- var (
- st stack
- q queue
- )
- q.push(s)
-
- // BFS to calculate the shortest paths (sigma and pred)
- // from s to t for each node t.
- for !q.empty() {
- v := q.front()
- q.pop()
- st.push(v)
-
- for _, w := range g.Adj[v] {
- // If distance from s to w is infinity (-1)
- // then set it and enqueue w.
- if dist[w] < 0 {
- dist[w] = dist[v] + 1
- q.push(w)
- }
-
- // If w is on a shortest path the update
- // sigma and add v to w's predecessor list.
- if dist[w] == dist[v]+1 {
- sigma[w] += sigma[v]
- pred[w] = append(pred[w], v)
- }
- }
- }
-
- // delta[v] is the ratio of the shortest paths between s and t that go
- // through v and the total number of shortest paths between s and t.
- // If we have delta then the betweenness centrality is simply the sum
- // of delta[w] for each w != s.
- delta := make([]float64, len(g.Nodes))
-
- for !st.empty() {
- w := st.top()
- st.pop()
-
- // pred[w] is the list of nodes that immediately precede w on a
- // shortest path from s.
- for _, v := range pred[w] {
- // Update delta using Brandes' equation.
- delta[v] += (float64(sigma[v]) / float64(sigma[w])) * (1.0 + delta[w])
- }
-
- if w != s {
- // As noted above centrality is simply the sum
- // of delta[w] for each w != s.
- centrality[w] += delta[w]
- }
- }
-}
-
-// Refresh recaculates and stores centrality values.
-func (bc *BetweennessCentrality) Refresh(graph ChannelGraph) er.R {
- cache, err := NewSimpleGraph(graph)
- if err != nil {
- return err
- }
-
- var wg sync.WaitGroup
- work := make(chan int)
- partials := make(chan []float64, bc.workers)
-
- // Each worker will compute a partial result.
- // This partial result is a sum of centrality updates
- // on roughly N / workers nodes.
- worker := func() {
- defer wg.Done()
- partial := make([]float64, len(cache.Nodes))
-
- // Consume the next node, update centrality
- // parital to avoid unnecessary synchronizaton.
- for node := range work {
- betweennessCentrality(cache, node, partial)
- }
- partials <- partial
- }
-
- // Now start the N workers.
- wg.Add(bc.workers)
- for i := 0; i < bc.workers; i++ {
- go worker()
- }
-
- // Distribute work amongst workers.
- // Should be fair when the graph is sufficiently large.
- for node := range cache.Nodes {
- work <- node
- }
-
- close(work)
- wg.Wait()
- close(partials)
-
- // Collect and sum partials for final result.
- centrality := make([]float64, len(cache.Nodes))
- for partial := range partials {
- for i := 0; i < len(partial); i++ {
- centrality[i] += partial[i]
- }
- }
-
- // Get min/max to be able to normalize
- // centrality values between 0 and 1.
- bc.min = 0
- bc.max = 0
- if len(centrality) > 0 {
- for _, v := range centrality {
- if v < bc.min {
- bc.min = v
- } else if v > bc.max {
- bc.max = v
- }
- }
- }
-
- // Divide by two as this is an undirected graph.
- bc.min /= 2.0
- bc.max /= 2.0
-
- bc.centrality = make(map[NodeID]float64)
- for u, value := range centrality {
- // Divide by two as this is an undirected graph.
- bc.centrality[cache.Nodes[u]] = value / 2.0
- }
-
- return nil
-}
-
-// GetMetric returns the current centrality values for each node indexed
-// by node id.
-func (bc *BetweennessCentrality) GetMetric(normalize bool) map[NodeID]float64 {
- // Normalization factor.
- var z float64
- if (bc.max - bc.min) > 0 {
- z = 1.0 / (bc.max - bc.min)
- }
-
- centrality := make(map[NodeID]float64)
- for k, v := range bc.centrality {
- if normalize {
- v = (v - bc.min) * z
- }
- centrality[k] = v
- }
-
- return centrality
-}
diff --git a/lnd/autopilot/betweenness_centrality_test.go b/lnd/autopilot/betweenness_centrality_test.go
deleted file mode 100644
index 0391ef09..00000000
--- a/lnd/autopilot/betweenness_centrality_test.go
+++ /dev/null
@@ -1,146 +0,0 @@
-package autopilot
-
-import (
- "fmt"
- "os"
- "testing"
-
- "github.com/pkt-cash/pktd/btcutil/util"
- "github.com/pkt-cash/pktd/chaincfg/globalcfg"
- "github.com/stretchr/testify/require"
-)
-
-func TestBetweennessCentralityMetricConstruction(t *testing.T) {
- failing := []int{-1, 0}
- ok := []int{1, 10}
-
- for _, workers := range failing {
- m, err := NewBetweennessCentralityMetric(workers)
- util.RequireErr(
- t, err, "construction must fail with <= 0 workers",
- )
- require.Nil(t, m)
- }
-
- for _, workers := range ok {
- m, err := NewBetweennessCentralityMetric(workers)
- util.RequireNoErr(
- t, err, "construction must succeed with >= 1 workers",
- )
- require.NotNil(t, m)
- }
-}
-
-// Tests that empty graph results in empty centrality result.
-func TestBetweennessCentralityEmptyGraph(t *testing.T) {
- centralityMetric, err := NewBetweennessCentralityMetric(1)
- util.RequireNoErr(
- t, err,
- "construction must succeed with positive number of workers",
- )
-
- for _, chanGraph := range chanGraphs {
- graph, cleanup, err := chanGraph.genFunc()
- success := t.Run(chanGraph.name, func(t1 *testing.T) {
- util.RequireNoErr(t, err, "unable to create graph")
-
- if cleanup != nil {
- defer cleanup()
- }
-
- err := centralityMetric.Refresh(graph)
- util.RequireNoErr(t, err)
-
- centrality := centralityMetric.GetMetric(false)
- require.Equal(t, 0, len(centrality))
-
- centrality = centralityMetric.GetMetric(true)
- require.Equal(t, 0, len(centrality))
- })
- if !success {
- break
- }
- }
-}
-
-// Test betweenness centrality calculating using an example graph.
-func TestBetweennessCentralityWithNonEmptyGraph(t *testing.T) {
- workers := []int{1, 3, 9, 100}
-
- tests := []struct {
- normalize bool
- centrality []float64
- }{
- {
- normalize: true,
- centrality: normalizedTestGraphCentrality,
- },
- {
- normalize: false,
- centrality: testGraphCentrality,
- },
- }
-
- for _, numWorkers := range workers {
- for _, chanGraph := range chanGraphs {
- numWorkers := numWorkers
- graph, cleanup, err := chanGraph.genFunc()
- util.RequireNoErr(t, err, "unable to create graph")
-
- if cleanup != nil {
- defer cleanup()
- }
-
- testName := fmt.Sprintf(
- "%v %d workers", chanGraph.name, numWorkers,
- )
-
- success := t.Run(testName, func(t1 *testing.T) {
- metric, err := NewBetweennessCentralityMetric(
- numWorkers,
- )
- util.RequireNoErr(
- t, err,
- "construction must succeed with "+
- "positive number of workers",
- )
-
- graphNodes := buildTestGraph(
- t1, graph, centralityTestGraph,
- )
-
- err = metric.Refresh(graph)
- util.RequireNoErr(t, err)
-
- for _, expected := range tests {
- expected := expected
- centrality := metric.GetMetric(
- expected.normalize,
- )
-
- require.Equal(t,
- centralityTestGraph.nodes,
- len(centrality),
- )
-
- for i, c := range expected.centrality {
- nodeID := NewNodeID(
- graphNodes[i],
- )
- result, ok := centrality[nodeID]
- require.True(t, ok)
- require.Equal(t, c, result)
- }
- }
- })
- if !success {
- break
- }
- }
- }
-}
-
-func TestMain(m *testing.M) {
- globalcfg.SelectConfig(globalcfg.BitcoinDefaults())
- os.Exit(m.Run())
-}
diff --git a/lnd/autopilot/centrality_testdata_test.go b/lnd/autopilot/centrality_testdata_test.go
deleted file mode 100644
index 829a6a5d..00000000
--- a/lnd/autopilot/centrality_testdata_test.go
+++ /dev/null
@@ -1,68 +0,0 @@
-package autopilot
-
-import (
- "testing"
-
- "github.com/pkt-cash/pktd/btcec"
- "github.com/pkt-cash/pktd/btcutil"
- "github.com/pkt-cash/pktd/btcutil/util"
-)
-
-// testGraphDesc is a helper type to describe a test graph.
-type testGraphDesc struct {
- nodes int
- edges map[int][]int
-}
-
-var centralityTestGraph = testGraphDesc{
- nodes: 9,
- edges: map[int][]int{
- 0: {1, 2, 3},
- 1: {2},
- 2: {3},
- 3: {4, 5},
- 4: {5, 6, 7},
- 5: {6, 7},
- 6: {7, 8},
- },
-}
-
-var testGraphCentrality = []float64{
- 3.0, 0.0, 3.0, 15.0, 6.0, 6.0, 7.0, 0.0, 0.0,
-}
-
-var normalizedTestGraphCentrality = []float64{
- 0.2, 0.0, 0.2, 1.0, 0.4, 0.4, 7.0 / 15.0, 0.0, 0.0,
-}
-
-// buildTestGraph builds a test graph from a passed graph desriptor.
-func buildTestGraph(t *testing.T,
- graph testGraph, desc testGraphDesc) map[int]*btcec.PublicKey {
-
- nodes := make(map[int]*btcec.PublicKey)
-
- for i := 0; i < desc.nodes; i++ {
- key, err := graph.addRandNode()
- util.RequireNoErr(t, err, "cannot create random node")
-
- nodes[i] = key
- }
-
- chanCapacity := btcutil.UnitsPerCoin()
- for u, neighbors := range desc.edges {
- for _, v := range neighbors {
- _, _, err := graph.addRandChannel(
- nodes[u], nodes[v], chanCapacity,
- )
- util.RequireNoErr(t, err,
- "unexpected error adding random channel",
- )
- if err != nil {
- t.Fatalf("unexpected error adding"+
- "random channel: %v", err)
- }
- }
- }
-
- return nodes
-}
diff --git a/lnd/autopilot/choice.go b/lnd/autopilot/choice.go
deleted file mode 100644
index f52654e0..00000000
--- a/lnd/autopilot/choice.go
+++ /dev/null
@@ -1,89 +0,0 @@
-package autopilot
-
-import (
- "math/rand"
-
- "github.com/pkt-cash/pktd/btcutil/er"
-)
-
-var Err = er.NewErrorType("lnd.autopilot")
-
-// ErrNoPositive is returned from weightedChoice when there are no positive
-// weights left to choose from.
-var ErrNoPositive = Err.CodeWithDetail("ErrNoPositive", "no positive weights left")
-
-// weightedChoice draws a random index from the slice of weights, with a
-// probability propotional to the weight at the given index.
-func weightedChoice(w []float64) (int, er.R) {
- // Calculate the sum of weights.
- var sum float64
- for _, v := range w {
- sum += v
- }
-
- if sum <= 0 {
- return 0, ErrNoPositive.Default()
- }
-
- // Pick a random number in the range [0.0, 1.0) and multiply it with
- // the sum of weights. Then we'll iterate the weights until the number
- // goes below 0. This means that each index is picked with a probablity
- // equal to their normalized score.
- //
- // Example:
- // Items with scores [1, 5, 2, 2]
- // Normalized scores [0.1, 0.5, 0.2, 0.2]
- // Imagine they each occupy a "range" equal to their normalized score
- // in [0, 1.0]:
- // [|-0.1-||-----0.5-----||--0.2--||--0.2--|]
- // The following loop is now equivalent to "hitting" the intervals.
- r := rand.Float64() * sum
- for i := range w {
- r -= w[i]
- if r <= 0 {
- return i, nil
- }
- }
-
- return 0, er.Errorf("unable to make choice")
-}
-
-// chooseN picks at random min[n, len(s)] nodes if from the NodeScore map, with
-// a probability weighted by their score.
-func chooseN(n uint32, s map[NodeID]*NodeScore) (
- map[NodeID]*NodeScore, er.R) {
-
- // Keep track of the number of nodes not yet chosen, in addition to
- // their scores and NodeIDs.
- rem := len(s)
- scores := make([]float64, len(s))
- nodeIDs := make([]NodeID, len(s))
- i := 0
- for k, v := range s {
- scores[i] = v.Score
- nodeIDs[i] = k
- i++
- }
-
- // Pick a weighted choice from the remaining nodes as long as there are
- // nodes left, and we haven't already picked n.
- chosen := make(map[NodeID]*NodeScore)
- for len(chosen) < int(n) && rem > 0 {
- choice, err := weightedChoice(scores)
- if ErrNoPositive.Is(err) {
- return chosen, nil
- } else if err != nil {
- return nil, err
- }
-
- nID := nodeIDs[choice]
-
- chosen[nID] = s[nID]
-
- // We set the score of the chosen node to 0, so it won't be
- // picked the next iteration.
- scores[choice] = 0
- }
-
- return chosen, nil
-}
diff --git a/lnd/autopilot/choice_test.go b/lnd/autopilot/choice_test.go
deleted file mode 100644
index 44e50eb6..00000000
--- a/lnd/autopilot/choice_test.go
+++ /dev/null
@@ -1,338 +0,0 @@
-package autopilot
-
-import (
- "encoding/binary"
- "math/rand"
- "reflect"
- "testing"
- "testing/quick"
-)
-
-// TestWeightedChoiceEmptyMap tests that passing in an empty slice of weights
-// returns an error.
-func TestWeightedChoiceEmptyMap(t *testing.T) {
- t.Parallel()
-
- var w []float64
- _, err := weightedChoice(w)
- if !ErrNoPositive.Is(err) {
- t.Fatalf("expected ErrNoPositive when choosing in "+
- "empty map, instead got %v", err)
- }
-}
-
-// singeNonZero is a type used to generate float64 slices with one non-zero
-// element.
-type singleNonZero []float64
-
-// Generate generates a value of type sinelNonZero to be used during
-// QuickTests.
-func (singleNonZero) Generate(rand *rand.Rand, size int) reflect.Value {
- w := make([]float64, size)
-
- // Pick a random index and set it to a random float.
- i := rand.Intn(size)
- w[i] = rand.Float64()
-
- return reflect.ValueOf(w)
-}
-
-// TestWeightedChoiceSingleIndex tests that choosing randomly in a slice with
-// one positive element always returns that one index.
-func TestWeightedChoiceSingleIndex(t *testing.T) {
- t.Parallel()
-
- // Helper that returns the index of the non-zero element.
- allButOneZero := func(weights []float64) (bool, int) {
- var (
- numZero uint32
- nonZeroEl int
- )
-
- for i, w := range weights {
- if w != 0 {
- numZero++
- nonZeroEl = i
- }
- }
-
- return numZero == 1, nonZeroEl
- }
-
- property := func(weights singleNonZero) bool {
- // Make sure the generated slice has exactly one non-zero
- // element.
- conditionMet, nonZeroElem := allButOneZero(weights[:])
- if !conditionMet {
- return false
- }
-
- // Call weightedChoice and assert it picks the non-zero
- // element.
- choice, err := weightedChoice(weights[:])
- if err != nil {
- return false
- }
- return choice == nonZeroElem
- }
-
- if err := quick.Check(property, nil); err != nil {
- t.Fatal(err)
- }
-}
-
-// nonNegative is a type used to generate float64 slices with non-negative
-// elements.
-type nonNegative []float64
-
-// Generate generates a value of type nonNegative to be used during
-// QuickTests.
-func (nonNegative) Generate(rand *rand.Rand, size int) reflect.Value {
- w := make([]float64, size)
-
- for i := range w {
- r := rand.Float64()
-
- // For very small weights it won't work to check deviation from
- // expected value, so we set them to zero.
- if r < 0.01*float64(size) {
- r = 0
- }
- w[i] = float64(r)
- }
- return reflect.ValueOf(w)
-}
-
-func assertChoice(w []float64, iterations int) bool {
- var sum float64
- for _, v := range w {
- sum += v
- }
-
- // Calculate the expected frequency of each choice.
- expFrequency := make([]float64, len(w))
- for i, ww := range w {
- expFrequency[i] = ww / sum
- }
-
- chosen := make(map[int]int)
- for i := 0; i < iterations; i++ {
- res, err := weightedChoice(w)
- if err != nil {
- return false
- }
- chosen[res]++
- }
-
- // Since this is random we check that the number of times chosen is
- // within 20% of the expected value.
- totalChoices := 0
- for i, f := range expFrequency {
- exp := float64(iterations) * f
- v := float64(chosen[i])
- totalChoices += chosen[i]
- expHigh := exp + exp/5
- expLow := exp - exp/5
- if v < expLow || v > expHigh {
- return false
- }
- }
-
- // The sum of choices must be exactly iterations of course.
- return totalChoices == iterations
-
-}
-
-// TestWeightedChoiceDistribution asserts that the weighted choice algorithm
-// chooses among indexes according to their scores.
-func TestWeightedChoiceDistribution(t *testing.T) {
- const iterations = 100000
-
- property := func(weights nonNegative) bool {
- return assertChoice(weights, iterations)
- }
-
- if err := quick.Check(property, nil); err != nil {
- t.Fatal(err)
- }
-}
-
-// TestChooseNEmptyMap checks that chooseN returns an empty result when no
-// nodes are chosen among.
-func TestChooseNEmptyMap(t *testing.T) {
- t.Parallel()
-
- nodes := map[NodeID]*NodeScore{}
- property := func(n uint32) bool {
- res, err := chooseN(n, nodes)
- if err != nil {
- return false
- }
-
- // Result should always be empty.
- return len(res) == 0
- }
-
- if err := quick.Check(property, nil); err != nil {
- t.Fatal(err)
- }
-}
-
-// candidateMapVarLen is a type we'll use to generate maps of various lengths
-// up to 255 to be used during QuickTests.
-type candidateMapVarLen map[NodeID]*NodeScore
-
-// Generate generates a value of type candidateMapVarLen to be used during
-// QuickTests.
-func (candidateMapVarLen) Generate(rand *rand.Rand, size int) reflect.Value {
- nodes := make(map[NodeID]*NodeScore)
-
- // To avoid creating huge maps, we restrict them to max uint8 len.
- n := uint8(rand.Uint32())
-
- for i := uint8(0); i < n; i++ {
- s := rand.Float64()
-
- // We set small values to zero, to ensure we handle these
- // correctly.
- if s < 0.01 {
- s = 0
- }
-
- var nID [33]byte
- binary.BigEndian.PutUint32(nID[:], uint32(i))
- nodes[nID] = &NodeScore{
- Score: s,
- }
- }
-
- return reflect.ValueOf(nodes)
-}
-
-// TestChooseNMinimum test that chooseN returns the minimum of the number of
-// nodes we request and the number of positively scored nodes in the given map.
-func TestChooseNMinimum(t *testing.T) {
- t.Parallel()
-
- // Helper to count the number of positive scores in the given map.
- numPositive := func(nodes map[NodeID]*NodeScore) int {
- cnt := 0
- for _, v := range nodes {
- if v.Score > 0 {
- cnt++
- }
- }
- return cnt
- }
-
- // We use let the type of n be uint8 to avoid generating huge numbers.
- property := func(nodes candidateMapVarLen, n uint8) bool {
- res, err := chooseN(uint32(n), nodes)
- if err != nil {
- return false
- }
-
- positive := numPositive(nodes)
-
- // Result should always be the minimum of the number of nodes
- // we wanted to select and the number of positively scored
- // nodes in the map.
- min := positive
- if int(n) < min {
- min = int(n)
- }
-
- if len(res) != min {
- return false
-
- }
- return true
- }
-
- if err := quick.Check(property, nil); err != nil {
- t.Fatal(err)
- }
-}
-
-// TestChooseNSample sanity checks that nodes are picked by chooseN according
-// to their scores.
-func TestChooseNSample(t *testing.T) {
- t.Parallel()
-
- const numNodes = 500
- const maxIterations = 100000
- fifth := uint32(numNodes / 5)
-
- nodes := make(map[NodeID]*NodeScore)
-
- // we make 5 buckets of nodes: 0, 0.1, 0.2, 0.4 and 0.8 score. We want
- // to check that zero scores never gets chosen, while a doubling the
- // score makes a node getting chosen about double the amount (this is
- // true only when n <<< numNodes).
- j := 2 * fifth
- score := 0.1
- for i := uint32(0); i < numNodes; i++ {
-
- // Each time i surpasses j we double the score we give to the
- // next fifth of nodes.
- if i >= j {
- score *= 2
- j += fifth
- }
- s := score
-
- // The first 1/5 of nodes we give a score of 0.
- if i < fifth {
- s = 0
- }
-
- var nID [33]byte
- binary.BigEndian.PutUint32(nID[:], i)
- nodes[nID] = &NodeScore{
- Score: s,
- }
- }
-
- // For each value of N we'll check that the nodes are picked the
- // expected number of times over time.
- for _, n := range []uint32{1, 5, 10, 20, 50} {
- // Since choosing more nodes will result in chooseN getting
- // slower we decrease the number of iterations. This is okay
- // since the variance in the total picks for a node will be
- // lower when choosing more nodes each time.
- iterations := maxIterations / n
- count := make(map[NodeID]int)
- for i := 0; i < int(iterations); i++ {
- res, err := chooseN(n, nodes)
- if err != nil {
- t.Fatalf("failed choosing nodes: %v", err)
- }
-
- for nID := range res {
- count[nID]++
- }
- }
-
- // Sum the number of times a node in each score bucket was
- // picked.
- sums := make(map[float64]int)
- for nID, s := range nodes {
- sums[s.Score] += count[nID]
- }
-
- // The count of each bucket should be about double of the
- // previous bucket. Since this is all random, we check that
- // the result is within 20% of the expected value.
- for _, score := range []float64{0.2, 0.4, 0.8} {
- cnt := sums[score]
- half := cnt / 2
- expLow := half - half/5
- expHigh := half + half/5
- if sums[score/2] < expLow || sums[score/2] > expHigh {
- t.Fatalf("expected the nodes with score %v "+
- "to be chosen about %v times, instead "+
- "was %v", score/2, half, sums[score/2])
- }
- }
- }
-}
diff --git a/lnd/autopilot/combinedattach.go b/lnd/autopilot/combinedattach.go
deleted file mode 100644
index 39c7e0f1..00000000
--- a/lnd/autopilot/combinedattach.go
+++ /dev/null
@@ -1,174 +0,0 @@
-package autopilot
-
-import (
- "github.com/pkt-cash/pktd/btcutil"
- "github.com/pkt-cash/pktd/btcutil/er"
- "github.com/pkt-cash/pktd/pktlog/log"
-)
-
-// WeightedHeuristic is a tuple that associates a weight to an
-// AttachmentHeuristic. This is used to determining a node's final score when
-// querying several heuristics for scores.
-type WeightedHeuristic struct {
- // Weight is this AttachmentHeuristic's relative weight factor. It
- // should be between 0.0 and 1.0.
- Weight float64
-
- AttachmentHeuristic
-}
-
-// WeightedCombAttachment is an implementation of the AttachmentHeuristic
-// interface that combines the scores given by several sub-heuristics into one.
-type WeightedCombAttachment struct {
- heuristics []*WeightedHeuristic
-}
-
-// NewWeightedCombAttachment creates a new instance of a WeightedCombAttachment.
-func NewWeightedCombAttachment(h ...*WeightedHeuristic) (
- *WeightedCombAttachment, er.R) {
-
- // The sum of weights given to the sub-heuristics must sum to exactly
- // 1.0.
- var sum float64
- for _, w := range h {
- sum += w.Weight
- }
-
- if sum != 1.0 {
- return nil, er.Errorf("weights MUST sum to 1.0 (was %v)", sum)
- }
-
- return &WeightedCombAttachment{
- heuristics: h,
- }, nil
-}
-
-// A compile time assertion to ensure WeightedCombAttachment meets the
-// AttachmentHeuristic and ScoreSettable interfaces.
-var _ AttachmentHeuristic = (*WeightedCombAttachment)(nil)
-var _ ScoreSettable = (*WeightedCombAttachment)(nil)
-
-// Name returns the name of this heuristic.
-//
-// NOTE: This is a part of the AttachmentHeuristic interface.
-func (c *WeightedCombAttachment) Name() string {
- return "weightedcomb"
-}
-
-// NodeScores is a method that given the current channel graph, current set of
-// local channels and funds available, scores the given nodes according to the
-// preference of opening a channel with them. The returned channel candidates
-// maps the NodeID to an attachment directive containing a score and a channel
-// size.
-//
-// The scores is determined by quering the set of sub-heuristics, then
-// combining these scores into a final score according to the active
-// configuration.
-//
-// The returned scores will be in the range [0, 1.0], where 0 indicates no
-// improvement in connectivity if a channel is opened to this node, while 1.0
-// is the maximum possible improvement in connectivity.
-//
-// NOTE: This is a part of the AttachmentHeuristic interface.
-func (c *WeightedCombAttachment) NodeScores(g ChannelGraph, chans []LocalChannel,
- chanSize btcutil.Amount, nodes map[NodeID]struct{}) (
- map[NodeID]*NodeScore, er.R) {
-
- // We now query each heuristic to determine the score they give to the
- // nodes for the given channel size.
- var subScores []map[NodeID]*NodeScore
- for _, h := range c.heuristics {
- log.Tracef("Getting scores from sub heuristic %v", h.Name())
-
- s, err := h.NodeScores(
- g, chans, chanSize, nodes,
- )
- if err != nil {
- return nil, er.Errorf("unable to get sub score: %v",
- err)
- }
-
- subScores = append(subScores, s)
- }
-
- // We combine the scores given by the sub-heuristics by using the
- // heruistics' given weight factor.
- scores := make(map[NodeID]*NodeScore)
- for nID := range nodes {
- score := &NodeScore{
- NodeID: nID,
- }
-
- // Each sub-heuristic should have scored the node, if not it is
- // implicitly given a zero score by that heuristic.
- for i, h := range c.heuristics {
- sub, ok := subScores[i][nID]
- if !ok {
- log.Tracef("No score given to node %x by sub "+
- "heuristic %v", nID[:], h.Name())
- continue
- }
- // Use the heuristic's weight factor to determine of
- // how much weight we should give to this particular
- // score.
- subScore := h.Weight * sub.Score
- log.Tracef("Giving node %x a sub score of %v "+
- "(%v * %v) from sub heuristic %v", nID[:],
- subScore, h.Weight, sub.Score, h.Name())
-
- score.Score += subScore
- }
-
- log.Tracef("Node %x got final combined score %v", nID[:],
- score.Score)
-
- switch {
- // Instead of adding a node with score 0 to the returned set,
- // we just skip it.
- case score.Score == 0:
- continue
-
- // Sanity check the new score.
- case score.Score < 0 || score.Score > 1.0:
- return nil, er.Errorf("invalid node score from "+
- "combination: %v", score.Score)
- }
-
- scores[nID] = score
- }
-
- return scores, nil
-}
-
-// SetNodeScores is used to set the internal map from NodeIDs to scores. The
-// passed scores must be in the range [0, 1.0]. The fist parameter is the name
-// of the targeted heuristic, to allow recursively target specific
-// sub-heuristics. The returned boolean indicates whether the targeted
-// heuristic was found.
-//
-// Since this heuristic doesn't keep any internal scores, it will recursively
-// apply the scores to its sub-heuristics.
-//
-// NOTE: This is a part of the ScoreSettable interface.
-func (c *WeightedCombAttachment) SetNodeScores(targetHeuristic string,
- newScores map[NodeID]float64) (bool, er.R) {
-
- found := false
- for _, h := range c.heuristics {
- // It must be ScoreSettable to be available for external
- // scores.
- s, ok := h.AttachmentHeuristic.(ScoreSettable)
- if !ok {
- continue
- }
-
- // Heuristic supports scoring, attempt to set them.
- applied, err := s.SetNodeScores(targetHeuristic, newScores)
- if err != nil {
- return false, err
- }
- found = found || applied
- }
-
- return found, nil
-}
diff --git a/lnd/autopilot/externalscoreattach.go b/lnd/autopilot/externalscoreattach.go
deleted file mode 100644
index 77b504b8..00000000
--- a/lnd/autopilot/externalscoreattach.go
+++ /dev/null
@@ -1,130 +0,0 @@
-package autopilot
-
-import (
- "sync"
-
- "github.com/pkt-cash/pktd/btcutil"
- "github.com/pkt-cash/pktd/btcutil/er"
- "github.com/pkt-cash/pktd/pktlog/log"
-)
-
-// ExternalScoreAttachment is an implementation of the AttachmentHeuristic
-// interface that allows an external source to provide it with node scores.
-type ExternalScoreAttachment struct {
- // TODO(halseth): persist across restarts.
- nodeScores map[NodeID]float64
-
- sync.Mutex
-}
-
-// NewExternalScoreAttachment creates a new instance of an
-// ExternalScoreAttachment.
-func NewExternalScoreAttachment() *ExternalScoreAttachment {
- return &ExternalScoreAttachment{}
-}
-
-// A compile time assertion to ensure ExternalScoreAttachment meets the
-// AttachmentHeuristic and ScoreSettable interfaces.
-var _ AttachmentHeuristic = (*ExternalScoreAttachment)(nil)
-var _ ScoreSettable = (*ExternalScoreAttachment)(nil)
-
-// Name returns the name of this heuristic.
-//
-// NOTE: This is a part of the AttachmentHeuristic interface.
-func (s *ExternalScoreAttachment) Name() string {
- return "externalscore"
-}
-
-// SetNodeScores is used to set the internal map from NodeIDs to scores. The
-// passed scores must be in the range [0, 1.0]. The fist parameter is the name
-// of the targeted heuristic, to allow recursively target specific
-// sub-heuristics. The returned boolean indicates whether the targeted
-// heuristic was found.
-//
-// NOTE: This is a part of the ScoreSettable interface.
-func (s *ExternalScoreAttachment) SetNodeScores(targetHeuristic string,
- newScores map[NodeID]float64) (bool, er.R) {
-
- // Return if this heuristic wasn't targeted.
- if targetHeuristic != s.Name() {
- return false, nil
- }
-
- // Since there's a requirement that all score are in the range [0,
- // 1.0], we validate them before setting the internal list.
- for nID, s := range newScores {
- if s < 0 || s > 1.0 {
- return false, er.Errorf("invalid score %v for "+
- "nodeID %v", s, nID)
- }
- }
-
- s.Lock()
- defer s.Unlock()
-
- s.nodeScores = newScores
- log.Tracef("Setting %v external scores", len(s.nodeScores))
-
- return true, nil
-}
-
-// NodeScores is a method that given the current channel graph and current set
-// of local channels, scores the given nodes according to the preference of
-// opening a channel of the given size with them. The returned channel
-// candidates maps the NodeID to a NodeScore for the node.
-//
-// The returned scores will be in the range [0, 1.0], where 0 indicates no
-// improvement in connectivity if a channel is opened to this node, while 1.0
-// is the maximum possible improvement in connectivity.
-//
-// The scores are determined by checking the internal node scores list. Nodes
-// not known will get a score of 0.
-//
-// NOTE: This is a part of the AttachmentHeuristic interface.
-func (s *ExternalScoreAttachment) NodeScores(g ChannelGraph, chans []LocalChannel,
- chanSize btcutil.Amount, nodes map[NodeID]struct{}) (
- map[NodeID]*NodeScore, er.R) {
-
- existingPeers := make(map[NodeID]struct{})
- for _, c := range chans {
- existingPeers[c.Node] = struct{}{}
- }
-
- s.Lock()
- defer s.Unlock()
-
- log.Tracef("External scoring %v nodes, from %v set scores",
- len(nodes), len(s.nodeScores))
-
- // Fill the map of candidates to return.
- candidates := make(map[NodeID]*NodeScore)
- for nID := range nodes {
- var score float64
- if nodeScore, ok := s.nodeScores[nID]; ok {
- score = nodeScore
- }
-
- // If the node is among or existing channel peers, we don't
- // need another channel.
- if _, ok := existingPeers[nID]; ok {
- log.Tracef("Skipping existing peer %x from external "+
- "score results", nID[:])
- continue
- }
-
- log.Tracef("External score %v given to node %x", score, nID[:])
-
- // Instead of adding a node with score 0 to the returned set,
- // we just skip it.
- if score == 0 {
- continue
- }
-
- candidates[nID] = &NodeScore{
- NodeID: nID,
- Score: score,
- }
- }
-
- return candidates, nil
-}
diff --git a/lnd/autopilot/externalscoreattach_test.go b/lnd/autopilot/externalscoreattach_test.go
deleted file mode 100644
index bae28f0e..00000000
--- a/lnd/autopilot/externalscoreattach_test.go
+++ /dev/null
@@ -1,101 +0,0 @@
-package autopilot_test
-
-import (
- "testing"
-
- "github.com/pkt-cash/pktd/btcec"
- "github.com/pkt-cash/pktd/btcutil"
- "github.com/pkt-cash/pktd/btcutil/er"
- "github.com/pkt-cash/pktd/lnd/autopilot"
-)
-
-// randKey returns a random public key.
-func randKey() (*btcec.PublicKey, er.R) {
- priv, err := btcec.NewPrivateKey(btcec.S256())
- if err != nil {
- return nil, err
- }
-
- return priv.PubKey(), nil
-}
-
-// TestSetNodeScores tests that the scores returned by the
-// ExternalScoreAttachment correctly reflects the scores we set last.
-func TestSetNodeScores(t *testing.T) {
- t.Parallel()
-
- const name = "externalscore"
-
- h := autopilot.NewExternalScoreAttachment()
-
- // Create a list of random node IDs.
- const numKeys = 20
- var pubkeys []autopilot.NodeID
- for i := 0; i < numKeys; i++ {
- k, err := randKey()
- if err != nil {
- t.Fatal(err)
- }
-
- nID := autopilot.NewNodeID(k)
- pubkeys = append(pubkeys, nID)
- }
-
- // Set the score of half of the nodes.
- scores := make(map[autopilot.NodeID]float64)
- for i := 0; i < numKeys/2; i++ {
- nID := pubkeys[i]
- scores[nID] = 0.05 * float64(i)
- }
-
- applied, err := h.SetNodeScores(name, scores)
- if err != nil {
- t.Fatal(err)
- }
-
- if !applied {
- t.Fatalf("scores were not applied")
- }
-
- // Query all scores, half should be set, half should be zero.
- q := make(map[autopilot.NodeID]struct{})
- for _, nID := range pubkeys {
- q[nID] = struct{}{}
- }
- resp, err := h.NodeScores(
- nil, nil, btcutil.Amount(btcutil.UnitsPerCoin()), q,
- )
- if err != nil {
- t.Fatal(err)
- }
-
- for i := 0; i < numKeys; i++ {
- var expected float64
- if i < numKeys/2 {
- expected = 0.05 * float64(i)
- }
- nID := pubkeys[i]
-
- var score float64
- if s, ok := resp[nID]; ok {
- score = s.Score
- }
-
- if score != expected {
- t.Fatalf("expected score %v, got %v",
- expected, score)
- }
-
- }
-
- // Try to apply scores with bogus name, should not be applied.
- applied, err = h.SetNodeScores("dummy", scores)
- if err != nil {
- t.Fatal(err)
- }
-
- if applied {
- t.Fatalf("scores were applied")
- }
-
-}
diff --git a/lnd/autopilot/graph.go b/lnd/autopilot/graph.go
deleted file mode 100644
index 5bd7f17e..00000000
--- a/lnd/autopilot/graph.go
+++ /dev/null
@@ -1,525 +0,0 @@
-package autopilot
-
-import (
- "bytes"
- "math/big"
- "net"
- "sort"
- "sync/atomic"
- "time"
-
- "github.com/pkt-cash/pktd/btcec"
- "github.com/pkt-cash/pktd/btcutil"
- "github.com/pkt-cash/pktd/btcutil/er"
- "github.com/pkt-cash/pktd/lnd/channeldb"
- "github.com/pkt-cash/pktd/lnd/channeldb/kvdb"
- "github.com/pkt-cash/pktd/lnd/lnwire"
- "github.com/pkt-cash/pktd/lnd/routing/route"
-)
-
-var (
- testSig = &btcec.Signature{
- R: new(big.Int),
- S: new(big.Int),
- }
- _, _ = testSig.R.SetString("63724406601629180062774974542967536251589935445068131219452686511677818569431", 10)
- _, _ = testSig.S.SetString("18801056069249825825291287104931333862866033135609736119018462340006816851118", 10)
-
- chanIDCounter uint64 // To be used atomically.
-)
-
-// databaseChannelGraph wraps a channeldb.ChannelGraph instance with the
-// necessary API to properly implement the autopilot.ChannelGraph interface.
-//
-// TODO(roasbeef): move inmpl to main package?
-type databaseChannelGraph struct {
- db *channeldb.ChannelGraph
-}
-
-// A compile time assertion to ensure databaseChannelGraph meets the
-// autopilot.ChannelGraph interface.
-var _ ChannelGraph = (*databaseChannelGraph)(nil)
-
-// ChannelGraphFromDatabase returns an instance of the autopilot.ChannelGraph
-// backed by a live, open channeldb instance.
-func ChannelGraphFromDatabase(db *channeldb.ChannelGraph) ChannelGraph {
- return &databaseChannelGraph{
- db: db,
- }
-}
-
-// type dbNode is a wrapper struct around a database transaction an
-// channeldb.LightningNode. The wrapper method implement the autopilot.Node
-// interface.
-type dbNode struct {
- tx kvdb.RTx
-
- node *channeldb.LightningNode
-}
-
-// A compile time assertion to ensure dbNode meets the autopilot.Node
-// interface.
-var _ Node = (*dbNode)(nil)
-
-// PubKey is the identity public key of the node. This will be used to attempt
-// to target a node for channel opening by the main autopilot agent. The key
-// will be returned in serialized compressed format.
-//
-// NOTE: Part of the autopilot.Node interface.
-func (d dbNode) PubKey() [33]byte {
- return d.node.PubKeyBytes
-}
-
-// Addrs returns a slice of publicly reachable public TCP addresses that the
-// peer is known to be listening on.
-//
-// NOTE: Part of the autopilot.Node interface.
-func (d dbNode) Addrs() []net.Addr {
- return d.node.Addresses
-}
-
-// ForEachChannel is a higher-order function that will be used to iterate
-// through all edges emanating from/to the target node. For each active
-// channel, this function should be called with the populated ChannelEdge that
-// describes the active channel.
-//
-// NOTE: Part of the autopilot.Node interface.
-func (d dbNode) ForEachChannel(cb func(ChannelEdge) er.R) er.R {
- return d.node.ForEachChannel(d.tx, func(tx kvdb.RTx,
- ei *channeldb.ChannelEdgeInfo, ep, _ *channeldb.ChannelEdgePolicy) er.R {
-
- // Skip channels for which no outgoing edge policy is available.
- //
- // TODO(joostjager): Ideally the case where channels have a nil
- // policy should be supported, as autopilot is not looking at
- // the policies. For now, it is not easily possible to get a
- // reference to the other end LightningNode object without
- // retrieving the policy.
- if ep == nil {
- return nil
- }
-
- edge := ChannelEdge{
- ChanID: lnwire.NewShortChanIDFromInt(ep.ChannelID),
- Capacity: ei.Capacity,
- Peer: dbNode{
- tx: tx,
- node: ep.Node,
- },
- }
-
- return cb(edge)
- })
-}
-
-// ForEachNode is a higher-order function that should be called once for each
-// connected node within the channel graph. If the passed callback returns an
-// error, then execution should be terminated.
-//
-// NOTE: Part of the autopilot.ChannelGraph interface.
-func (d *databaseChannelGraph) ForEachNode(cb func(Node) er.R) er.R {
- return d.db.ForEachNode(func(tx kvdb.RTx, n *channeldb.LightningNode) er.R {
- // We'll skip over any node that doesn't have any advertised
- // addresses. As we won't be able to reach them to actually
- // open any channels.
- if len(n.Addresses) == 0 {
- return nil
- }
-
- node := dbNode{
- tx: tx,
- node: n,
- }
- return cb(node)
- })
-}
-
-// addRandChannel creates a new channel two target nodes. This function is
-// meant to aide in the generation of random graphs for use within test cases
-// the exercise the autopilot package.
-func (d *databaseChannelGraph) addRandChannel(node1, node2 *btcec.PublicKey,
- capacity btcutil.Amount) (*ChannelEdge, *ChannelEdge, er.R) {
-
- fetchNode := func(pub *btcec.PublicKey) (*channeldb.LightningNode, er.R) {
- if pub != nil {
- vertex, err := route.NewVertexFromBytes(
- pub.SerializeCompressed(),
- )
- if err != nil {
- return nil, err
- }
-
- dbNode, err := d.db.FetchLightningNode(nil, vertex)
- switch {
- case channeldb.ErrGraphNodeNotFound.Is(err):
- fallthrough
- case channeldb.ErrGraphNotFound.Is(err):
- graphNode := &channeldb.LightningNode{
- HaveNodeAnnouncement: true,
- Addresses: []net.Addr{
- &net.TCPAddr{
- IP: bytes.Repeat([]byte("a"), 16),
- },
- },
- Features: lnwire.NewFeatureVector(
- nil, lnwire.Features,
- ),
- AuthSigBytes: testSig.Serialize(),
- }
- graphNode.AddPubKey(pub)
- if err := d.db.AddLightningNode(graphNode); err != nil {
- return nil, err
- }
- case err != nil:
- return nil, err
- }
-
- return dbNode, nil
- }
-
- nodeKey, err := randKey()
- if err != nil {
- return nil, err
- }
- dbNode := &channeldb.LightningNode{
- HaveNodeAnnouncement: true,
- Addresses: []net.Addr{
- &net.TCPAddr{
- IP: bytes.Repeat([]byte("a"), 16),
- },
- },
- Features: lnwire.NewFeatureVector(
- nil, lnwire.Features,
- ),
- AuthSigBytes: testSig.Serialize(),
- }
- dbNode.AddPubKey(nodeKey)
- if err := d.db.AddLightningNode(dbNode); err != nil {
- return nil, err
- }
-
- return dbNode, nil
- }
-
- vertex1, err := fetchNode(node1)
- if err != nil {
- return nil, nil, err
- }
-
- vertex2, err := fetchNode(node2)
- if err != nil {
- return nil, nil, err
- }
-
- var lnNode1, lnNode2 *btcec.PublicKey
- if bytes.Compare(vertex1.PubKeyBytes[:], vertex2.PubKeyBytes[:]) == -1 {
- lnNode1, _ = vertex1.PubKey()
- lnNode2, _ = vertex2.PubKey()
- } else {
- lnNode1, _ = vertex2.PubKey()
- lnNode2, _ = vertex1.PubKey()
- }
-
- chanID := randChanID()
- edge := &channeldb.ChannelEdgeInfo{
- ChannelID: chanID.ToUint64(),
- Capacity: capacity,
- }
- edge.AddNodeKeys(lnNode1, lnNode2, lnNode1, lnNode2)
- if err := d.db.AddChannelEdge(edge); err != nil {
- return nil, nil, err
- }
- edgePolicy := &channeldb.ChannelEdgePolicy{
- SigBytes: testSig.Serialize(),
- ChannelID: chanID.ToUint64(),
- LastUpdate: time.Now(),
- TimeLockDelta: 10,
- MinHTLC: 1,
- MaxHTLC: lnwire.NewMSatFromSatoshis(capacity),
- FeeBaseMSat: 10,
- FeeProportionalMillionths: 10000,
- MessageFlags: 1,
- ChannelFlags: 0,
- }
-
- if err := d.db.UpdateEdgePolicy(edgePolicy); err != nil {
- return nil, nil, err
- }
- edgePolicy = &channeldb.ChannelEdgePolicy{
- SigBytes: testSig.Serialize(),
- ChannelID: chanID.ToUint64(),
- LastUpdate: time.Now(),
- TimeLockDelta: 10,
- MinHTLC: 1,
- MaxHTLC: lnwire.NewMSatFromSatoshis(capacity),
- FeeBaseMSat: 10,
- FeeProportionalMillionths: 10000,
- MessageFlags: 1,
- ChannelFlags: 1,
- }
- if err := d.db.UpdateEdgePolicy(edgePolicy); err != nil {
- return nil, nil, err
- }
-
- return &ChannelEdge{
- ChanID: chanID,
- Capacity: capacity,
- Peer: dbNode{
- node: vertex1,
- },
- },
- &ChannelEdge{
- ChanID: chanID,
- Capacity: capacity,
- Peer: dbNode{
- node: vertex2,
- },
- },
- nil
-}
-
-func (d *databaseChannelGraph) addRandNode() (*btcec.PublicKey, er.R) {
- nodeKey, err := randKey()
- if err != nil {
- return nil, err
- }
- dbNode := &channeldb.LightningNode{
- HaveNodeAnnouncement: true,
- Addresses: []net.Addr{
- &net.TCPAddr{
- IP: bytes.Repeat([]byte("a"), 16),
- },
- },
- Features: lnwire.NewFeatureVector(
- nil, lnwire.Features,
- ),
- AuthSigBytes: testSig.Serialize(),
- }
- dbNode.AddPubKey(nodeKey)
- if err := d.db.AddLightningNode(dbNode); err != nil {
- return nil, err
- }
-
- return nodeKey, nil
-
-}
-
-// memChannelGraph is an implementation of the autopilot.ChannelGraph backed by
-// an in-memory graph.
-type memChannelGraph struct {
- graph map[NodeID]*memNode
-}
-
-// A compile time assertion to ensure memChannelGraph meets the
-// autopilot.ChannelGraph interface.
-var _ ChannelGraph = (*memChannelGraph)(nil)
-
-// newMemChannelGraph creates a new blank in-memory channel graph
-// implementation.
-func newMemChannelGraph() *memChannelGraph {
- return &memChannelGraph{
- graph: make(map[NodeID]*memNode),
- }
-}
-
-// ForEachNode is a higher-order function that should be called once for each
-// connected node within the channel graph. If the passed callback returns an
-// error, then execution should be terminated.
-//
-// NOTE: Part of the autopilot.ChannelGraph interface.
-func (m memChannelGraph) ForEachNode(cb func(Node) er.R) er.R {
- for _, node := range m.graph {
- if err := cb(node); err != nil {
- return err
- }
- }
-
- return nil
-}
-
-// randChanID generates a new random channel ID.
-func randChanID() lnwire.ShortChannelID {
- id := atomic.AddUint64(&chanIDCounter, 1)
- return lnwire.NewShortChanIDFromInt(id)
-}
-
-// randKey returns a random public key.
-func randKey() (*btcec.PublicKey, er.R) {
- priv, err := btcec.NewPrivateKey(btcec.S256())
- if err != nil {
- return nil, err
- }
-
- return priv.PubKey(), nil
-}
-
-// addRandChannel creates a new channel two target nodes. This function is
-// meant to aide in the generation of random graphs for use within test cases
-// the exercise the autopilot package.
-func (m *memChannelGraph) addRandChannel(node1, node2 *btcec.PublicKey,
- capacity btcutil.Amount) (*ChannelEdge, *ChannelEdge, er.R) {
-
- var (
- vertex1, vertex2 *memNode
- ok bool
- )
-
- if node1 != nil {
- vertex1, ok = m.graph[NewNodeID(node1)]
- if !ok {
- vertex1 = &memNode{
- pub: node1,
- addrs: []net.Addr{
- &net.TCPAddr{
- IP: bytes.Repeat([]byte("a"), 16),
- },
- },
- }
- }
- } else {
- newPub, err := randKey()
- if err != nil {
- return nil, nil, err
- }
- vertex1 = &memNode{
- pub: newPub,
- addrs: []net.Addr{
- &net.TCPAddr{
- IP: bytes.Repeat([]byte("a"), 16),
- },
- },
- }
- }
-
- if node2 != nil {
- vertex2, ok = m.graph[NewNodeID(node2)]
- if !ok {
- vertex2 = &memNode{
- pub: node2,
- addrs: []net.Addr{
- &net.TCPAddr{
- IP: bytes.Repeat([]byte("a"), 16),
- },
- },
- }
- }
- } else {
- newPub, err := randKey()
- if err != nil {
- return nil, nil, err
- }
- vertex2 = &memNode{
- pub: newPub,
- addrs: []net.Addr{
- &net.TCPAddr{
- IP: bytes.Repeat([]byte("a"), 16),
- },
- },
- }
- }
-
- edge1 := ChannelEdge{
- ChanID: randChanID(),
- Capacity: capacity,
- Peer: vertex2,
- }
- vertex1.chans = append(vertex1.chans, edge1)
-
- edge2 := ChannelEdge{
- ChanID: randChanID(),
- Capacity: capacity,
- Peer: vertex1,
- }
- vertex2.chans = append(vertex2.chans, edge2)
-
- m.graph[NewNodeID(vertex1.pub)] = vertex1
- m.graph[NewNodeID(vertex2.pub)] = vertex2
-
- return &edge1, &edge2, nil
-}
-
-func (m *memChannelGraph) addRandNode() (*btcec.PublicKey, er.R) {
- newPub, err := randKey()
- if err != nil {
- return nil, err
- }
- vertex := &memNode{
- pub: newPub,
- addrs: []net.Addr{
- &net.TCPAddr{
- IP: bytes.Repeat([]byte("a"), 16),
- },
- },
- }
- m.graph[NewNodeID(newPub)] = vertex
-
- return newPub, nil
-}
-
-// memNode is a purely in-memory implementation of the autopilot.Node
-// interface.
-type memNode struct {
- pub *btcec.PublicKey
-
- chans []ChannelEdge
-
- addrs []net.Addr
-}
-
-// A compile time assertion to ensure memNode meets the autopilot.Node
-// interface.
-var _ Node = (*memNode)(nil)
-
-// PubKey is the identity public key of the node. This will be used to attempt
-// to target a node for channel opening by the main autopilot agent.
-//
-// NOTE: Part of the autopilot.Node interface.
-func (m memNode) PubKey() [33]byte {
- var n [33]byte
- copy(n[:], m.pub.SerializeCompressed())
-
- return n
-}
-
-// Addrs returns a slice of publicly reachable public TCP addresses that the
-// peer is known to be listening on.
-//
-// NOTE: Part of the autopilot.Node interface.
-func (m memNode) Addrs() []net.Addr {
- return m.addrs
-}
-
-// ForEachChannel is a higher-order function that will be used to iterate
-// through all edges emanating from/to the target node. For each active
-// channel, this function should be called with the populated ChannelEdge that
-// describes the active channel.
-//
-// NOTE: Part of the autopilot.Node interface.
-func (m memNode) ForEachChannel(cb func(ChannelEdge) er.R) er.R {
- for _, channel := range m.chans {
- if err := cb(channel); err != nil {
- return err
- }
- }
-
- return nil
-}
-
-// Median returns the median value in the slice of Amounts.
-func Median(vals []btcutil.Amount) btcutil.Amount {
- sort.Slice(vals, func(i, j int) bool {
- return vals[i] < vals[j]
- })
-
- num := len(vals)
- switch {
- case num == 0:
- return 0
-
- case num%2 == 0:
- return (vals[num/2-1] + vals[num/2]) / 2
-
- default:
- return vals[num/2]
- }
-}
diff --git a/lnd/autopilot/graph_test.go b/lnd/autopilot/graph_test.go
deleted file mode 100644
index 725a33bf..00000000
--- a/lnd/autopilot/graph_test.go
+++ /dev/null
@@ -1,50 +0,0 @@
-package autopilot_test
-
-import (
- "testing"
-
- "github.com/pkt-cash/pktd/btcutil"
- "github.com/pkt-cash/pktd/lnd/autopilot"
-)
-
-// TestMedian tests the Median method.
-func TestMedian(t *testing.T) {
- t.Parallel()
-
- testCases := []struct {
- values []btcutil.Amount
- median btcutil.Amount
- }{
- {
- values: []btcutil.Amount{},
- median: 0,
- },
- {
- values: []btcutil.Amount{10},
- median: 10,
- },
- {
- values: []btcutil.Amount{10, 20},
- median: 15,
- },
- {
- values: []btcutil.Amount{10, 20, 30},
- median: 20,
- },
- {
- values: []btcutil.Amount{30, 10, 20},
- median: 20,
- },
- {
- values: []btcutil.Amount{10, 10, 10, 10, 5000000},
- median: 10,
- },
- }
-
- for _, test := range testCases {
- res := autopilot.Median(test.values)
- if res != test.median {
- t.Fatalf("expected median %v, got %v", test.median, res)
- }
- }
-}
diff --git a/lnd/autopilot/interface.go b/lnd/autopilot/interface.go
deleted file mode 100644
index 7c9ddfc1..00000000
--- a/lnd/autopilot/interface.go
+++ /dev/null
@@ -1,219 +0,0 @@
-package autopilot
-
-import (
- "net"
-
- "github.com/pkt-cash/pktd/btcec"
- "github.com/pkt-cash/pktd/btcutil"
- "github.com/pkt-cash/pktd/btcutil/er"
- "github.com/pkt-cash/pktd/lnd/lnwire"
- "github.com/pkt-cash/pktd/wire"
-)
-
-// DefaultConfTarget is the default confirmation target for autopilot channels.
-// TODO(halseth): possibly make dynamic, going aggressive->lax as more channels
-// are opened.
-const DefaultConfTarget = 3
-
-// Node is an interface which represents n abstract vertex within the
-// channel graph. All nodes should have at least a single edge to/from them
-// within the graph.
-//
-// TODO(roasbeef): combine with routing.ChannelGraphSource
-type Node interface {
- // PubKey is the identity public key of the node. This will be used to
- // attempt to target a node for channel opening by the main autopilot
- // agent. The key will be returned in serialized compressed format.
- PubKey() [33]byte
-
- // Addrs returns a slice of publicly reachable public TCP addresses
- // that the peer is known to be listening on.
- Addrs() []net.Addr
-
- // ForEachChannel is a higher-order function that will be used to
- // iterate through all edges emanating from/to the target node. For
- // each active channel, this function should be called with the
- // populated ChannelEdge that describes the active channel.
- ForEachChannel(func(ChannelEdge) er.R) er.R
-}
-
-// LocalChannel is a simple struct which contains relevant details of a
-// particular channel the local node has. The fields in this struct may be used
-// a signals for various AttachmentHeuristic implementations.
-type LocalChannel struct {
- // ChanID is the short channel ID for this channel as defined within
- // BOLT-0007.
- ChanID lnwire.ShortChannelID
-
- // Balance is the local balance of the channel expressed in satoshis.
- Balance btcutil.Amount
-
- // Node is the peer that this channel has been established with.
- Node NodeID
-
- // TODO(roasbeef): also add other traits?
- // * fee, timelock, etc
-}
-
-// ChannelEdge is a struct that holds details concerning a channel, but also
-// contains a reference to the Node that this channel connects to as a directed
-// edge within the graph. The existence of this reference to the connected node
-// will allow callers to traverse the graph in an object-oriented manner.
-type ChannelEdge struct {
- // ChanID is the short channel ID for this channel as defined within
- // BOLT-0007.
- ChanID lnwire.ShortChannelID
-
- // Capacity is the capacity of the channel expressed in satoshis.
- Capacity btcutil.Amount
-
- // Peer is the peer that this channel creates an edge to in the channel
- // graph.
- Peer Node
-}
-
-// ChannelGraph in an interface that represents a traversable channel graph.
-// The autopilot agent will use this interface as its source of graph traits in
-// order to make decisions concerning which channels should be opened, and to
-// whom.
-//
-// TODO(roasbeef): abstract??
-type ChannelGraph interface {
- // ForEachNode is a higher-order function that should be called once
- // for each connected node within the channel graph. If the passed
- // callback returns an error, then execution should be terminated.
- ForEachNode(func(Node) er.R) er.R
-}
-
-// NodeScore is a tuple mapping a NodeID to a score indicating the preference
-// of opening a channel with it.
-type NodeScore struct {
- // NodeID is the serialized compressed pubkey of the node that is being
- // scored.
- NodeID NodeID
-
- // Score is the score given by the heuristic for opening a channel of
- // the given size to this node.
- Score float64
-}
-
-// AttachmentDirective describes a channel attachment proscribed by an
-// AttachmentHeuristic. It details to which node a channel should be created
-// to, and also the parameters which should be used in the channel creation.
-type AttachmentDirective struct {
- // NodeID is the serialized compressed pubkey of the target node for
- // this attachment directive. It can be identified by its public key,
- // and therefore can be used along with a ChannelOpener implementation
- // to execute the directive.
- NodeID NodeID
-
- // ChanAmt is the size of the channel that should be opened, expressed
- // in satoshis.
- ChanAmt btcutil.Amount
-
- // Addrs is a list of addresses that the target peer may be reachable
- // at.
- Addrs []net.Addr
-}
-
-// AttachmentHeuristic is one of the primary interfaces within this package.
-// Implementations of this interface will be used to implement a control system
-// which automatically regulates channels of a particular agent, attempting to
-// optimize channels opened/closed based on various heuristics. The purpose of
-// the interface is to allow an auto-pilot agent to decide if it needs more
-// channels, and if so, which exact channels should be opened.
-type AttachmentHeuristic interface {
- // Name returns the name of this heuristic.
- Name() string
-
- // NodeScores is a method that given the current channel graph and
- // current set of local channels, scores the given nodes according to
- // the preference of opening a channel of the given size with them. The
- // returned channel candidates maps the NodeID to a NodeScore for the
- // node.
- //
- // The returned scores will be in the range [0, 1.0], where 0 indicates
- // no improvement in connectivity if a channel is opened to this node,
- // while 1.0 is the maximum possible improvement in connectivity. The
- // implementation of this interface must return scores in this range to
- // properly allow the autopilot agent to make a reasonable choice based
- // on the score from multiple heuristics.
- //
- // NOTE: A NodeID not found in the returned map is implicitly given a
- // score of 0.
- NodeScores(g ChannelGraph, chans []LocalChannel,
- chanSize btcutil.Amount, nodes map[NodeID]struct{}) (
- map[NodeID]*NodeScore, er.R)
-}
-
-// NodeMetric is a common interface for all graph metrics that are not
-// directly used as autopilot node scores but may be used in compositional
-// heuristics or statistical information exposed to users.
-type NodeMetric interface {
- // Name returns the unique name of this metric.
- Name() er.R
-
- // Refresh refreshes the metric values based on the current graph.
- Refresh(graph ChannelGraph) er.R
-
- // GetMetric returns the latest value of this metric. Values in the
- // map are per node and can be in arbitrary domain. If normalize is
- // set to true, then the returned values are normalized to either
- // [0, 1] or [-1, 1] depending on the metric.
- GetMetric(normalize bool) map[NodeID]float64
-}
-
-// ScoreSettable is an interface that indicates that the scores returned by the
-// heuristic can be mutated by an external caller. The ExternalScoreAttachment
-// currently implements this interface, and so should any heuristic that is
-// using the ExternalScoreAttachment as a sub-heuristic, or keeps their own
-// internal list of mutable scores, to allow access to setting the internal
-// scores.
-type ScoreSettable interface {
- // SetNodeScores is used to set the internal map from NodeIDs to
- // scores. The passed scores must be in the range [0, 1.0]. The fist
- // parameter is the name of the targeted heuristic, to allow
- // recursively target specific sub-heuristics. The returned boolean
- // indicates whether the targeted heuristic was found.
- SetNodeScores(string, map[NodeID]float64) (bool, er.R)
-}
-
-var (
- // availableHeuristics holds all heuristics possible to combine for use
- // with the autopilot agent.
- availableHeuristics = []AttachmentHeuristic{
- NewPrefAttachment(),
- NewExternalScoreAttachment(),
- NewTopCentrality(),
- }
-
- // AvailableHeuristics is a map that holds the name of available
- // heuristics to the actual heuristic for easy lookup. It will be
- // filled during init().
- AvailableHeuristics = make(map[string]AttachmentHeuristic)
-)
-
-func init() {
- // Fill the map from heuristic names to available heuristics for easy
- // lookup.
- for _, h := range availableHeuristics {
- AvailableHeuristics[h.Name()] = h
- }
-}
-
-// ChannelController is a simple interface that allows an auto-pilot agent to
-// open a channel within the graph to a target peer, close targeted channels,
-// or add/remove funds from existing channels via a splice in/out mechanisms.
-type ChannelController interface {
- // OpenChannel opens a channel to a target peer, using at most amt
- // funds. This means that the resulting channel capacity might be
- // slightly less to account for fees. This function should un-block
- // immediately after the funding transaction that marks the channel
- // open has been broadcast.
- OpenChannel(target *btcec.PublicKey, amt btcutil.Amount) er.R
-
- // CloseChannel attempts to close out the target channel.
- //
- // TODO(roasbeef): add force option?
- CloseChannel(chanPoint *wire.OutPoint) er.R
-}
diff --git a/lnd/autopilot/manager.go b/lnd/autopilot/manager.go
deleted file mode 100644
index fb7f1eee..00000000
--- a/lnd/autopilot/manager.go
+++ /dev/null
@@ -1,393 +0,0 @@
-package autopilot
-
-import (
- "sync"
-
- "github.com/pkt-cash/pktd/btcec"
- "github.com/pkt-cash/pktd/btcutil/er"
- "github.com/pkt-cash/pktd/lnd/lnwallet"
- "github.com/pkt-cash/pktd/lnd/lnwire"
- "github.com/pkt-cash/pktd/lnd/routing"
- "github.com/pkt-cash/pktd/pktlog/log"
- "github.com/pkt-cash/pktd/wire"
-)
-
-// ManagerCfg houses a set of values and methods that is passed to the Manager
-// for it to properly manage its autopilot agent.
-type ManagerCfg struct {
- // Self is the public key of the lnd instance. It is used to making
- // sure the autopilot is not opening channels to itself.
- Self *btcec.PublicKey
-
- // PilotCfg is the config of the autopilot agent managed by the
- // Manager.
- PilotCfg *Config
-
- // ChannelState is a function closure that returns the current set of
- // channels managed by this node.
- ChannelState func() ([]LocalChannel, er.R)
-
- // ChannelInfo is a function closure that returns the channel managed
- // by the node given by the passed channel point.
- ChannelInfo func(wire.OutPoint) (*LocalChannel, er.R)
-
- // SubscribeTransactions is used to get a subscription for transactions
- // relevant to this node's wallet.
- SubscribeTransactions func() (lnwallet.TransactionSubscription, er.R)
-
- // SubscribeTopology is used to get a subscription for topology changes
- // on the network.
- SubscribeTopology func() (*routing.TopologyClient, er.R)
-}
-
-// Manager is struct that manages an autopilot agent, making it possible to
-// enable and disable it at will, and hand it relevant external information.
-// It implements the autopilot grpc service, which is used to get data about
-// the running autopilot, and give it relevant information.
-type Manager struct {
- started sync.Once
- stopped sync.Once
-
- cfg *ManagerCfg
-
- // pilot is the current autopilot agent. It will be nil if the agent is
- // disabled.
- pilot *Agent
-
- quit chan struct{}
- wg sync.WaitGroup
- sync.Mutex
-}
-
-// NewManager creates a new instance of the Manager from the passed config.
-func NewManager(cfg *ManagerCfg) (*Manager, er.R) {
- return &Manager{
- cfg: cfg,
- quit: make(chan struct{}),
- }, nil
-}
-
-// Start starts the Manager.
-func (m *Manager) Start() er.R {
- m.started.Do(func() {})
- return nil
-}
-
-// Stop stops the Manager. If an autopilot agent is active, it will also be
-// stopped.
-func (m *Manager) Stop() er.R {
- m.stopped.Do(func() {
- if err := m.StopAgent(); err != nil {
- log.Errorf("Unable to stop pilot: %v", err)
- }
-
- close(m.quit)
- m.wg.Wait()
- })
- return nil
-}
-
-// IsActive returns whether the autopilot agent is currently active.
-func (m *Manager) IsActive() bool {
- m.Lock()
- defer m.Unlock()
-
- return m.pilot != nil
-}
-
-// StartAgent creates and starts an autopilot agent from the Manager's
-// config.
-func (m *Manager) StartAgent() er.R {
- m.Lock()
- defer m.Unlock()
-
- // Already active.
- if m.pilot != nil {
- return nil
- }
-
- // Next, we'll fetch the current state of open channels from the
- // database to use as initial state for the auto-pilot agent.
- initialChanState, err := m.cfg.ChannelState()
- if err != nil {
- return err
- }
-
- // Now that we have all the initial dependencies, we can create the
- // auto-pilot instance itself.
- pilot, err := New(*m.cfg.PilotCfg, initialChanState)
- if err != nil {
- return err
- }
-
- if err := pilot.Start(); err != nil {
- return err
- }
-
- // Finally, we'll need to subscribe to two things: incoming
- // transactions that modify the wallet's balance, and also any graph
- // topology updates.
- txnSubscription, err := m.cfg.SubscribeTransactions()
- if err != nil {
- pilot.Stop()
- return err
- }
- graphSubscription, err := m.cfg.SubscribeTopology()
- if err != nil {
- txnSubscription.Cancel()
- pilot.Stop()
- return err
- }
-
- m.pilot = pilot
-
- // We'll launch a goroutine to provide the agent with notifications
- // whenever the balance of the wallet changes.
- // TODO(halseth): can lead to panic if in process of shutting down.
- m.wg.Add(1)
- go func() {
- defer txnSubscription.Cancel()
- defer m.wg.Done()
-
- for {
- select {
- case <-txnSubscription.ConfirmedTransactions():
- pilot.OnBalanceChange()
-
- // We won't act upon new unconfirmed transaction, as
- // we'll only use confirmed outputs when funding.
- // However, we will still drain this request in order
- // to avoid goroutine leaks, and ensure we promptly
- // read from the channel if available.
- case <-txnSubscription.UnconfirmedTransactions():
- case <-pilot.quit:
- return
- case <-m.quit:
- return
- }
- }
-
- }()
-
- // We'll also launch a goroutine to provide the agent with
- // notifications for when the graph topology controlled by the node
- // changes.
- m.wg.Add(1)
- go func() {
- defer graphSubscription.Cancel()
- defer m.wg.Done()
-
- for {
- select {
- case topChange, ok := <-graphSubscription.TopologyChanges:
- // If the router is shutting down, then we will
- // as well.
- if !ok {
- return
- }
-
- for _, edgeUpdate := range topChange.ChannelEdgeUpdates {
- // If this isn't an advertisement by
- // the backing lnd node, then we'll
- // continue as we only want to add
- // channels that we've created
- // ourselves.
- if !edgeUpdate.AdvertisingNode.IsEqual(m.cfg.Self) {
- continue
- }
-
- // If this is indeed a channel we
- // opened, then we'll convert it to the
- // autopilot.Channel format, and notify
- // the pilot of the new channel.
- cp := edgeUpdate.ChanPoint
- edge, err := m.cfg.ChannelInfo(cp)
- if err != nil {
- log.Errorf("Unable to fetch "+
- "channel info for %v: "+
- "%v", cp, err)
- continue
- }
-
- pilot.OnChannelOpen(*edge)
- }
-
- // For each closed channel, we'll obtain
- // the chanID of the closed channel and send it
- // to the pilot.
- for _, chanClose := range topChange.ClosedChannels {
- chanID := lnwire.NewShortChanIDFromInt(
- chanClose.ChanID,
- )
-
- pilot.OnChannelClose(chanID)
- }
-
- // If new nodes were added to the graph, or nod
- // information has changed, we'll poke autopilot
- // to see if it can make use of them.
- if len(topChange.NodeUpdates) > 0 {
- pilot.OnNodeUpdates()
- }
-
- case <-pilot.quit:
- return
- case <-m.quit:
- return
- }
- }
- }()
-
- log.Debugf("Manager started autopilot agent")
-
- return nil
-}
-
-// StopAgent stops any active autopilot agent.
-func (m *Manager) StopAgent() er.R {
- m.Lock()
- defer m.Unlock()
-
- // Not active, so we can return early.
- if m.pilot == nil {
- return nil
- }
-
- if err := m.pilot.Stop(); err != nil {
- return err
- }
-
- // Make sure to nil the current agent, indicating it is no longer
- // active.
- m.pilot = nil
-
- log.Debugf("Manager stopped autopilot agent")
-
- return nil
-}
-
-// QueryHeuristics queries the available autopilot heuristics for node scores.
-func (m *Manager) QueryHeuristics(nodes []NodeID, localState bool) (
- HeuristicScores, er.R) {
-
- m.Lock()
- defer m.Unlock()
-
- n := make(map[NodeID]struct{})
- for _, node := range nodes {
- n[node] = struct{}{}
- }
-
- log.Debugf("Querying heuristics for %d nodes", len(n))
- return m.queryHeuristics(n, localState)
-}
-
-// HeuristicScores is an alias for a map that maps heuristic names to a map of
-// scores for pubkeys.
-type HeuristicScores map[string]map[NodeID]float64
-
-// queryHeuristics gets node scores from all available simple heuristics, and
-// the agent's current active heuristic.
-//
-// NOTE: Must be called with the manager's lock.
-func (m *Manager) queryHeuristics(nodes map[NodeID]struct{}, localState bool) (
- HeuristicScores, er.R) {
-
- // If we want to take the local state into action when querying the
- // heuristics, we fetch it. If not we'll just pass an emply slice to
- // the heuristic.
- var totalChans []LocalChannel
- var err er.R
- if localState {
- // Fetch the current set of channels.
- totalChans, err = m.cfg.ChannelState()
- if err != nil {
- return nil, err
- }
-
- // If the agent is active, we can merge the channel state with
- // the channels pending open.
- if m.pilot != nil {
- m.pilot.chanStateMtx.Lock()
- m.pilot.pendingMtx.Lock()
- totalChans = mergeChanState(
- m.pilot.pendingOpens, m.pilot.chanState,
- )
- m.pilot.pendingMtx.Unlock()
- m.pilot.chanStateMtx.Unlock()
- }
- }
-
- // As channel size we'll use the maximum size.
- chanSize := m.cfg.PilotCfg.Constraints.MaxChanSize()
-
- // We'll start by getting the scores from each available sub-heuristic,
- // in addition the current agent heuristic.
- var heuristics []AttachmentHeuristic
- heuristics = append(heuristics, availableHeuristics...)
- heuristics = append(heuristics, m.cfg.PilotCfg.Heuristic)
-
- report := make(HeuristicScores)
- for _, h := range heuristics {
- name := h.Name()
-
- // If the agent heuristic is among the simple heuristics it
- // might get queried more than once. As an optimization we'll
- // just skip it the second time.
- if _, ok := report[name]; ok {
- continue
- }
-
- s, err := h.NodeScores(
- m.cfg.PilotCfg.Graph, totalChans, chanSize, nodes,
- )
- if err != nil {
- return nil, er.Errorf("unable to get sub score: %v",
- err)
- }
-
- log.Debugf("Heuristic \"%v\" scored %d nodes", name, len(s))
-
- scores := make(map[NodeID]float64)
- for nID, score := range s {
- scores[nID] = score.Score
- }
-
- report[name] = scores
- }
-
- return report, nil
-}
-
-// SetNodeScores is used to set the scores of the given heuristic, if it is
-// active, and ScoreSettable.
-func (m *Manager) SetNodeScores(name string, scores map[NodeID]float64) er.R {
- m.Lock()
- defer m.Unlock()
-
- // It must be ScoreSettable to be available for external
- // scores.
- s, ok := m.cfg.PilotCfg.Heuristic.(ScoreSettable)
- if !ok {
- return er.Errorf("current heuristic doesn't support " +
- "external scoring")
- }
-
- // Heuristic was found, set its node scores.
- applied, err := s.SetNodeScores(name, scores)
- if err != nil {
- return err
- }
-
- if !applied {
- return er.Errorf("heuristic with name %v not found", name)
- }
-
- // If the autopilot agent is active, notify about the updated
- // heuristic.
- if m.pilot != nil {
- m.pilot.OnHeuristicUpdate(m.cfg.PilotCfg.Heuristic)
- }
-
- return nil
-}
diff --git a/lnd/autopilot/prefattach.go b/lnd/autopilot/prefattach.go
deleted file mode 100644
index 87498f7a..00000000
--- a/lnd/autopilot/prefattach.go
+++ /dev/null
@@ -1,212 +0,0 @@
-package autopilot
-
-import (
- prand "math/rand"
- "time"
-
- "github.com/pkt-cash/pktd/btcec"
- "github.com/pkt-cash/pktd/btcutil"
- "github.com/pkt-cash/pktd/btcutil/er"
- "github.com/pkt-cash/pktd/pktlog/log"
-)
-
-// minMedianChanSizeFraction determines the minimum size a channel must have to
-// count positively when calculating the scores using preferential attachment.
-// The minimum channel size is calculated as median/minMedianChanSizeFraction,
-// where median is the median channel size of the entire graph.
-const minMedianChanSizeFraction = 4
-
-// PrefAttachment is an implementation of the AttachmentHeuristic interface
-// that implement a non-linear preferential attachment heuristic. This means
-// that given a threshold to allocate to automatic channel establishment, the
-// heuristic will attempt to favor connecting to nodes which already have a set
-// amount of links, selected by sampling from a power law distribution. The
-// attachment is non-linear in that it favors nodes with a higher in-degree but
-// less so than regular linear preferential attachment. As a result, this
-// creates smaller and less clusters than regular linear preferential
-// attachment.
-//
-// TODO(roasbeef): BA, with k=-3
-type PrefAttachment struct {
-}
-
-// NewPrefAttachment creates a new instance of a PrefAttachment heuristic.
-func NewPrefAttachment() *PrefAttachment {
- prand.Seed(time.Now().Unix())
- return &PrefAttachment{}
-}
-
-// A compile time assertion to ensure PrefAttachment meets the
-// AttachmentHeuristic interface.
-var _ AttachmentHeuristic = (*PrefAttachment)(nil)
-
-// NodeID is a simple type that holds an EC public key serialized in compressed
-// format.
-type NodeID [33]byte
-
-// NewNodeID creates a new nodeID from a passed public key.
-func NewNodeID(pub *btcec.PublicKey) NodeID {
- var n NodeID
- copy(n[:], pub.SerializeCompressed())
- return n
-}
-
-// Name returns the name of this heuristic.
-//
-// NOTE: This is a part of the AttachmentHeuristic interface.
-func (p *PrefAttachment) Name() string {
- return "preferential"
-}
-
-// NodeScores is a method that given the current channel graph and current set
-// of local channels, scores the given nodes according to the preference of
-// opening a channel of the given size with them. The returned channel
-// candidates maps the NodeID to a NodeScore for the node.
-//
-// The heuristic employed by this method is one that attempts to promote a
-// scale-free network globally, via local attachment preferences for new nodes
-// joining the network with an amount of available funds to be allocated to
-// channels. Specifically, we consider the degree of each node (and the flow
-// in/out of the node available via its open channels) and utilize the
-// Barabási–Albert model to drive our recommended attachment heuristics. If
-// implemented globally for each new participant, this results in a channel
-// graph that is scale-free and follows a power law distribution with k=-3.
-//
-// To avoid assigning a high score to nodes with a large number of small
-// channels, we only count channels at least as large as a given fraction of
-// the graph's median channel size.
-//
-// The returned scores will be in the range [0.0, 1.0], where higher scores are
-// given to nodes already having high connectivity in the graph.
-//
-// NOTE: This is a part of the AttachmentHeuristic interface.
-func (p *PrefAttachment) NodeScores(g ChannelGraph, chans []LocalChannel,
- chanSize btcutil.Amount, nodes map[NodeID]struct{}) (
- map[NodeID]*NodeScore, er.R) {
-
- // We first run though the graph once in order to find the median
- // channel size.
- var (
- allChans []btcutil.Amount
- seenChans = make(map[uint64]struct{})
- )
- if err := g.ForEachNode(func(n Node) er.R {
- err := n.ForEachChannel(func(e ChannelEdge) er.R {
- if _, ok := seenChans[e.ChanID.ToUint64()]; ok {
- return nil
- }
- seenChans[e.ChanID.ToUint64()] = struct{}{}
- allChans = append(allChans, e.Capacity)
- return nil
- })
- if err != nil {
- return err
- }
-
- return nil
- }); err != nil {
- return nil, err
- }
-
- medianChanSize := Median(allChans)
- log.Tracef("Found channel median %v for preferential score heuristic",
- medianChanSize)
-
- // Count the number of large-ish channels for each particular node in
- // the graph.
- var maxChans int
- nodeChanNum := make(map[NodeID]int)
- if err := g.ForEachNode(func(n Node) er.R {
- var nodeChans int
- err := n.ForEachChannel(func(e ChannelEdge) er.R {
- // Since connecting to nodes with a lot of small
- // channels actually worsens our connectivity in the
- // graph (we will potentially waste time trying to use
- // these useless channels in path finding), we decrease
- // the counter for such channels.
- if e.Capacity < medianChanSize/minMedianChanSizeFraction {
- nodeChans--
- return nil
- }
-
- // Larger channels we count.
- nodeChans++
- return nil
- })
- if err != nil {
- return err
- }
-
- // We keep track of the highest-degree node we've seen, as this
- // will be given the max score.
- if nodeChans > maxChans {
- maxChans = nodeChans
- }
-
- // If this node is not among our nodes to score, we can return
- // early.
- nID := NodeID(n.PubKey())
- if _, ok := nodes[nID]; !ok {
- log.Tracef("Node %x not among nodes to score, "+
- "ignoring", nID[:])
- return nil
- }
-
- // Otherwise we'll record the number of channels.
- nodeChanNum[nID] = nodeChans
- log.Tracef("Counted %v channels for node %x", nodeChans, nID[:])
-
- return nil
- }); err != nil {
- return nil, err
- }
-
- // If there are no channels in the graph we cannot determine any
- // preferences, so we return, indicating all candidates get a score of
- // zero.
- if maxChans == 0 {
- log.Tracef("No channels in the graph")
- return nil, nil
- }
-
- existingPeers := make(map[NodeID]struct{})
- for _, c := range chans {
- existingPeers[c.Node] = struct{}{}
- }
-
- // For each node in the set of nodes, count their fraction of channels
- // in the graph, and use that as the score.
- candidates := make(map[NodeID]*NodeScore)
- for nID, nodeChans := range nodeChanNum {
-
- // If the node is among or existing channel peers, we don't
- // need another channel.
- if _, ok := existingPeers[nID]; ok {
- log.Tracef("Node %x among existing peers for pref "+
- "attach heuristic, giving zero score", nID[:])
- continue
- }
-
- // If the node had no large channels, we skip it, since it
- // would have gotten a zero score anyway.
- if nodeChans <= 0 {
- log.Tracef("Skipping node %x with channel count %v",
- nID[:], nodeChans)
- continue
- }
-
- // Otherwise we score the node according to its fraction of
- // channels in the graph, scaled such that the highest-degree
- // node will be given a score of 1.0.
- score := float64(nodeChans) / float64(maxChans)
- log.Tracef("Giving node %x a pref attach score of %v",
- nID[:], score)
-
- candidates[nID] = &NodeScore{
- NodeID: nID,
- Score: score,
- }
- }
-
- return candidates, nil
-}
diff --git a/lnd/autopilot/prefattach_test.go b/lnd/autopilot/prefattach_test.go
deleted file mode 100644
index 46ee3c29..00000000
--- a/lnd/autopilot/prefattach_test.go
+++ /dev/null
@@ -1,451 +0,0 @@
-package autopilot
-
-import (
- "bytes"
- "io/ioutil"
- "os"
- "testing"
- "time"
-
- prand "math/rand"
-
- "github.com/pkt-cash/pktd/btcec"
- "github.com/pkt-cash/pktd/btcutil"
- "github.com/pkt-cash/pktd/btcutil/er"
- "github.com/pkt-cash/pktd/lnd/channeldb"
-)
-
-type genGraphFunc func() (testGraph, func(), er.R)
-
-type testGraph interface {
- ChannelGraph
-
- addRandChannel(*btcec.PublicKey, *btcec.PublicKey,
- btcutil.Amount) (*ChannelEdge, *ChannelEdge, er.R)
-
- addRandNode() (*btcec.PublicKey, er.R)
-}
-
-func newDiskChanGraph() (testGraph, func(), er.R) {
- // First, create a temporary directory to be used for the duration of
- // this test.
- tempDirName, errr := ioutil.TempDir("", "channeldb")
- if errr != nil {
- return nil, nil, er.E(errr)
- }
-
- // Next, create channeldb for the first time.
- cdb, err := channeldb.Open(tempDirName)
- if err != nil {
- return nil, nil, err
- }
-
- cleanUp := func() {
- cdb.Close()
- os.RemoveAll(tempDirName)
- }
-
- return &databaseChannelGraph{
- db: cdb.ChannelGraph(),
- }, cleanUp, nil
-}
-
-var _ testGraph = (*databaseChannelGraph)(nil)
-
-func newMemChanGraph() (testGraph, func(), er.R) {
- return newMemChannelGraph(), nil, nil
-}
-
-var _ testGraph = (*memChannelGraph)(nil)
-
-var chanGraphs = []struct {
- name string
- genFunc genGraphFunc
-}{
- {
- name: "disk_graph",
- genFunc: newDiskChanGraph,
- },
- {
- name: "mem_graph",
- genFunc: newMemChanGraph,
- },
-}
-
-// TestPrefAttachmentSelectEmptyGraph ensures that when passed an
-// empty graph, the NodeSores function always returns a score of 0.
-func TestPrefAttachmentSelectEmptyGraph(t *testing.T) {
- prefAttach := NewPrefAttachment()
-
- // Create a random public key, which we will query to get a score for.
- pub, err := randKey()
- if err != nil {
- t.Fatalf("unable to generate key: %v", err)
- }
-
- nodes := map[NodeID]struct{}{
- NewNodeID(pub): {},
- }
-
- for _, graph := range chanGraphs {
- success := t.Run(graph.name, func(t1 *testing.T) {
- graph, cleanup, err := graph.genFunc()
- if err != nil {
- t1.Fatalf("unable to create graph: %v", err)
- }
- if cleanup != nil {
- defer cleanup()
- }
-
- // With the necessary state initialized, we'll now
- // attempt to get the score for this one node.
- walletFunds := btcutil.UnitsPerCoin()
- scores, err := prefAttach.NodeScores(graph, nil,
- walletFunds, nodes)
- if err != nil {
- t1.Fatalf("unable to select attachment "+
- "directives: %v", err)
- }
-
- // Since the graph is empty, we expect the score to be
- // 0, giving an empty return map.
- if len(scores) != 0 {
- t1.Fatalf("expected empty score map, "+
- "instead got %v ", len(scores))
- }
- })
- if !success {
- break
- }
- }
-}
-
-// TestPrefAttachmentSelectTwoVertexes ensures that when passed a
-// graph with only two eligible vertexes, then both are given the same score,
-// and the funds are appropriately allocated across each peer.
-func TestPrefAttachmentSelectTwoVertexes(t *testing.T) {
- t.Parallel()
-
- prand.Seed(time.Now().Unix())
-
- maxChanSize := btcutil.Amount(btcutil.UnitsPerCoin())
-
- for _, graph := range chanGraphs {
- success := t.Run(graph.name, func(t1 *testing.T) {
- graph, cleanup, err := graph.genFunc()
- if err != nil {
- t1.Fatalf("unable to create graph: %v", err)
- }
- if cleanup != nil {
- defer cleanup()
- }
-
- prefAttach := NewPrefAttachment()
-
- // For this set, we'll load the memory graph with two
- // nodes, and a random channel connecting them.
- chanCapacity := btcutil.UnitsPerCoin()
- edge1, edge2, err := graph.addRandChannel(nil, nil, chanCapacity)
- if err != nil {
- t1.Fatalf("unable to generate channel: %v", err)
- }
-
- // We also add a third, non-connected node to the graph.
- _, err = graph.addRandNode()
- if err != nil {
- t1.Fatalf("unable to add random node: %v", err)
- }
-
- // Get the score for all nodes found in the graph at
- // this point.
- nodes := make(map[NodeID]struct{})
- if err := graph.ForEachNode(func(n Node) er.R {
- nodes[n.PubKey()] = struct{}{}
- return nil
- }); err != nil {
- t1.Fatalf("unable to traverse graph: %v", err)
- }
-
- if len(nodes) != 3 {
- t1.Fatalf("expected 2 nodes, found %d", len(nodes))
- }
-
- // With the necessary state initialized, we'll now
- // attempt to get our candidates channel score given
- // the current state of the graph.
- candidates, err := prefAttach.NodeScores(graph, nil,
- maxChanSize, nodes)
- if err != nil {
- t1.Fatalf("unable to select attachment "+
- "directives: %v", err)
- }
-
- // We expect two candidates, since one of the nodes
- // doesn't have any channels.
- if len(candidates) != 2 {
- t1.Fatalf("2 nodes should be scored, "+
- "instead %v were", len(candidates))
- }
-
- // The candidates should be amongst the two edges
- // created above.
- for nodeID, candidate := range candidates {
- edge1Pub := edge1.Peer.PubKey()
- edge2Pub := edge2.Peer.PubKey()
-
- switch {
- case bytes.Equal(nodeID[:], edge1Pub[:]):
- case bytes.Equal(nodeID[:], edge2Pub[:]):
- default:
- t1.Fatalf("attached to unknown node: %x",
- nodeID[:])
- }
-
- // Since each of the nodes has 1 channel, out
- // of only one channel in the graph, we expect
- // their score to be 1.0.
- expScore := float64(1.0)
- if candidate.Score != expScore {
- t1.Fatalf("expected candidate score "+
- "to be %v, instead was %v",
- expScore, candidate.Score)
- }
- }
- })
- if !success {
- break
- }
- }
-}
-
-// TestPrefAttachmentSelectGreedyAllocation tests that if upon
-// returning node scores, the NodeScores method will attempt to greedily
-// allocate all funds to each vertex (up to the max channel size).
-func TestPrefAttachmentSelectGreedyAllocation(t *testing.T) {
- t.Parallel()
-
- prand.Seed(time.Now().Unix())
-
- maxChanSize := btcutil.Amount(btcutil.UnitsPerCoin())
-
- for _, graph := range chanGraphs {
- success := t.Run(graph.name, func(t1 *testing.T) {
- graph, cleanup, err := graph.genFunc()
- if err != nil {
- t1.Fatalf("unable to create graph: %v", err)
- }
- if cleanup != nil {
- defer cleanup()
- }
-
- prefAttach := NewPrefAttachment()
-
- chanCapacity := btcutil.UnitsPerCoin()
-
- // Next, we'll add 3 nodes to the graph, creating an
- // "open triangle topology".
- edge1, _, err := graph.addRandChannel(nil, nil,
- chanCapacity)
- if err != nil {
- t1.Fatalf("unable to create channel: %v", err)
- }
- peerPubBytes := edge1.Peer.PubKey()
- peerPub, err := btcec.ParsePubKey(
- peerPubBytes[:], btcec.S256(),
- )
- if err != nil {
- t.Fatalf("unable to parse pubkey: %v", err)
- }
- _, _, err = graph.addRandChannel(
- peerPub, nil, chanCapacity,
- )
- if err != nil {
- t1.Fatalf("unable to create channel: %v", err)
- }
-
- // At this point, there should be three nodes in the
- // graph, with node node having two edges.
- numNodes := 0
- twoChans := false
- nodes := make(map[NodeID]struct{})
- if err := graph.ForEachNode(func(n Node) er.R {
- numNodes++
- nodes[n.PubKey()] = struct{}{}
- numChans := 0
- err := n.ForEachChannel(func(c ChannelEdge) er.R {
- numChans++
- return nil
- })
- if err != nil {
- return err
- }
-
- twoChans = twoChans || (numChans == 2)
-
- return nil
- }); err != nil {
- t1.Fatalf("unable to traverse graph: %v", err)
- }
- if numNodes != 3 {
- t1.Fatalf("expected 3 nodes, instead have: %v",
- numNodes)
- }
- if !twoChans {
- t1.Fatalf("expected node to have two channels")
- }
-
- // We'll now begin our test, modeling the available
- // wallet balance to be 5.5 BTC. We're shooting for a
- // 50/50 allocation, and have 3 BTC in channels. As a
- // result, the heuristic should try to greedily
- // allocate funds to channels.
- scores, err := prefAttach.NodeScores(graph, nil,
- maxChanSize, nodes)
- if err != nil {
- t1.Fatalf("unable to select attachment "+
- "directives: %v", err)
- }
-
- if len(scores) != len(nodes) {
- t1.Fatalf("all nodes should be scored, "+
- "instead %v were", len(scores))
- }
-
- // The candidates should have a non-zero score, and
- // have the max chan size funds recommended channel
- // size.
- for _, candidate := range scores {
- if candidate.Score == 0 {
- t1.Fatalf("Expected non-zero score")
- }
- }
-
- // Imagine a few channels are being opened, and there's
- // only 0.5 BTC left. That should leave us with channel
- // candidates of that size.
- remBalance := btcutil.Amount(btcutil.UnitsPerCoinF() * 0.5)
- scores, err = prefAttach.NodeScores(graph, nil,
- remBalance, nodes)
- if err != nil {
- t1.Fatalf("unable to select attachment "+
- "directives: %v", err)
- }
-
- if len(scores) != len(nodes) {
- t1.Fatalf("all nodes should be scored, "+
- "instead %v were", len(scores))
- }
-
- // Check that the recommended channel sizes are now the
- // remaining channel balance.
- for _, candidate := range scores {
- if candidate.Score == 0 {
- t1.Fatalf("Expected non-zero score")
- }
- }
- })
- if !success {
- break
- }
- }
-}
-
-// TestPrefAttachmentSelectSkipNodes ensures that if a node was
-// already selected as a channel counterparty, then that node will get a score
-// of zero during scoring.
-func TestPrefAttachmentSelectSkipNodes(t *testing.T) {
- t.Parallel()
-
- prand.Seed(time.Now().Unix())
-
- maxChanSize := btcutil.Amount(btcutil.UnitsPerCoin())
-
- for _, graph := range chanGraphs {
- success := t.Run(graph.name, func(t1 *testing.T) {
- graph, cleanup, err := graph.genFunc()
- if err != nil {
- t1.Fatalf("unable to create graph: %v", err)
- }
- if cleanup != nil {
- defer cleanup()
- }
-
- prefAttach := NewPrefAttachment()
-
- // Next, we'll create a simple topology of two nodes,
- // with a single channel connecting them.
- chanCapacity := btcutil.UnitsPerCoin()
- _, _, err = graph.addRandChannel(nil, nil,
- chanCapacity)
- if err != nil {
- t1.Fatalf("unable to create channel: %v", err)
- }
-
- nodes := make(map[NodeID]struct{})
- if err := graph.ForEachNode(func(n Node) er.R {
- nodes[n.PubKey()] = struct{}{}
- return nil
- }); err != nil {
- t1.Fatalf("unable to traverse graph: %v", err)
- }
-
- if len(nodes) != 2 {
- t1.Fatalf("expected 2 nodes, found %d", len(nodes))
- }
-
- // With our graph created, we'll now get the scores for
- // all nodes in the graph.
- scores, err := prefAttach.NodeScores(graph, nil,
- maxChanSize, nodes)
- if err != nil {
- t1.Fatalf("unable to select attachment "+
- "directives: %v", err)
- }
-
- if len(scores) != len(nodes) {
- t1.Fatalf("all nodes should be scored, "+
- "instead %v were", len(scores))
- }
-
- // THey should all have a score, and a maxChanSize
- // channel size recommendation.
- for _, candidate := range scores {
- if candidate.Score == 0 {
- t1.Fatalf("Expected non-zero score")
- }
- }
-
- // We'll simulate a channel update by adding the nodes
- // to our set of channels.
- var chans []LocalChannel
- for _, candidate := range scores {
- chans = append(chans,
- LocalChannel{
- Node: candidate.NodeID,
- },
- )
- }
-
- // If we attempt to make a call to the NodeScores
- // function, without providing any new information,
- // then all nodes should have a score of zero, since we
- // already got channels to them.
- scores, err = prefAttach.NodeScores(graph, chans,
- maxChanSize, nodes)
- if err != nil {
- t1.Fatalf("unable to select attachment "+
- "directives: %v", err)
- }
-
- // Since all should be given a score of 0, the map
- // should be empty.
- if len(scores) != 0 {
- t1.Fatalf("expected empty score map, "+
- "instead got %v ", len(scores))
- }
- })
- if !success {
- break
- }
- }
-}
diff --git a/lnd/autopilot/simple_graph.go b/lnd/autopilot/simple_graph.go
deleted file mode 100644
index e19f6961..00000000
--- a/lnd/autopilot/simple_graph.go
+++ /dev/null
@@ -1,68 +0,0 @@
-package autopilot
-
-import "github.com/pkt-cash/pktd/btcutil/er"
-
-// SimpleGraph stores a simplifed adj graph of a channel graph to speed
-// up graph processing by eliminating all unnecessary hashing and map access.
-type SimpleGraph struct {
- // Nodes is a map from node index to NodeID.
- Nodes []NodeID
-
- // Adj stores nodes and neighbors in an adjacency list.
- Adj [][]int
-}
-
-// NewSimpleGraph creates a simplified graph from the current channel graph.
-// Returns an error if the channel graph iteration fails due to underlying
-// failure.
-func NewSimpleGraph(g ChannelGraph) (*SimpleGraph, er.R) {
- nodes := make(map[NodeID]int)
- adj := make(map[int][]int)
- nextIndex := 0
-
- // getNodeIndex returns the integer index of the passed node.
- // The returned index is then used to create a simplifed adjacency list
- // where each node is identified by its index instead of its pubkey, and
- // also to create a mapping from node index to node pubkey.
- getNodeIndex := func(node Node) int {
- key := NodeID(node.PubKey())
- nodeIndex, ok := nodes[key]
-
- if !ok {
- nodes[key] = nextIndex
- nodeIndex = nextIndex
- nextIndex++
- }
-
- return nodeIndex
- }
-
- // Iterate over each node and each channel and update the adj and the node
- // index.
- err := g.ForEachNode(func(node Node) er.R {
- u := getNodeIndex(node)
-
- return node.ForEachChannel(func(edge ChannelEdge) er.R {
- v := getNodeIndex(edge.Peer)
-
- adj[u] = append(adj[u], v)
- return nil
- })
- })
- if err != nil {
- return nil, err
- }
-
- graph := &SimpleGraph{
- Nodes: make([]NodeID, len(nodes)),
- Adj: make([][]int, len(nodes)),
- }
-
- // Fill the adj and the node index to node pubkey mapping.
- for nodeID, nodeIndex := range nodes {
- graph.Adj[nodeIndex] = adj[nodeIndex]
- graph.Nodes[nodeIndex] = nodeID
- }
-
- return graph, nil
-}
diff --git a/lnd/autopilot/top_centrality.go b/lnd/autopilot/top_centrality.go
deleted file mode 100644
index e8c0549d..00000000
--- a/lnd/autopilot/top_centrality.go
+++ /dev/null
@@ -1,94 +0,0 @@
-package autopilot
-
-import (
- "runtime"
-
- "github.com/pkt-cash/pktd/btcutil"
- "github.com/pkt-cash/pktd/btcutil/er"
-)
-
-// TopCentrality is a simple greedy technique to create connections to nodes
-// with the top betweenness centrality value. This algorithm is usually
-// referred to as TopK in the literature. The idea is that by opening channels
-// to nodes with top betweenness centrality we also increase our own betweenness
-// centrality (given we already have at least one channel, or create at least
-// two new channels).
-// A different and much better approach is instead of selecting nodes with top
-// centrality value, we extend the graph in a loop by inserting a new non
-// existing edge and recalculate the betweenness centrality of each node. This
-// technique is usually referred to as "greedy" algorithm and gives better
-// results than TopK but is considerably slower too.
-type TopCentrality struct {
- centralityMetric *BetweennessCentrality
-}
-
-// A compile time assertion to ensure TopCentrality meets the
-// AttachmentHeuristic interface.
-var _ AttachmentHeuristic = (*TopCentrality)(nil)
-
-// NewTopCentrality constructs and returns a new TopCentrality heuristic.
-func NewTopCentrality() *TopCentrality {
- metric, err := NewBetweennessCentralityMetric(
- runtime.NumCPU(),
- )
- if err != nil {
- panic(err)
- }
-
- return &TopCentrality{
- centralityMetric: metric,
- }
-}
-
-// Name returns the name of the heuristic.
-func (g *TopCentrality) Name() string {
- return "top_centrality"
-}
-
-// NodeScores will return a [0,1] normalized map of scores for the given nodes
-// except for the ones we already have channels with. The scores will simply
-// be the betweenness centrality values of the nodes.
-// As our current implementation of betweenness centrality is non-incremental,
-// NodeScores will recalculate the centrality values on every call, which is
-// slow for large graphs.
-func (g *TopCentrality) NodeScores(graph ChannelGraph, chans []LocalChannel,
- chanSize btcutil.Amount, nodes map[NodeID]struct{}) (
- map[NodeID]*NodeScore, er.R) {
-
- // Calculate betweenness centrality for the whole graph.
- if err := g.centralityMetric.Refresh(graph); err != nil {
- return nil, err
- }
-
- normalize := true
- centrality := g.centralityMetric.GetMetric(normalize)
-
- // Create a map of the existing peers for faster filtering.
- existingPeers := make(map[NodeID]struct{})
- for _, c := range chans {
- existingPeers[c.Node] = struct{}{}
- }
-
- result := make(map[NodeID]*NodeScore, len(nodes))
- for nodeID := range nodes {
- // Skip nodes we already have channel with.
- if _, ok := existingPeers[nodeID]; ok {
- continue
- }
-
- // Skip passed nodes not in the graph. This could happen if
- // the graph changed before computing the centrality values as
- // the nodes we iterate are prefiltered by the autopilot agent.
- score, ok := centrality[nodeID]
- if !ok {
- continue
- }
-
- result[nodeID] = &NodeScore{
- NodeID: nodeID,
- Score: score,
- }
- }
-
- return result, nil
-}
diff --git a/lnd/autopilot/top_centrality_test.go b/lnd/autopilot/top_centrality_test.go
deleted file mode 100644
index 0caedb12..00000000
--- a/lnd/autopilot/top_centrality_test.go
+++ /dev/null
@@ -1,110 +0,0 @@
-package autopilot
-
-import (
- "testing"
-
- "github.com/pkt-cash/pktd/btcec"
- "github.com/pkt-cash/pktd/btcutil"
- "github.com/pkt-cash/pktd/btcutil/util"
- "github.com/stretchr/testify/require"
-)
-
-// testTopCentrality is subtest helper to which given the passed graph and
-// channels creates the expected centrality score set and checks that the
-// calculated score set matches it.
-func testTopCentrality(t *testing.T, graph testGraph,
- graphNodes map[int]*btcec.PublicKey, channelsWith []int) {
-
- topCentrality := NewTopCentrality()
-
- var channels []LocalChannel
- for _, ch := range channelsWith {
- channels = append(channels, LocalChannel{
- Node: NewNodeID(graphNodes[ch]),
- })
- }
-
- // Start iteration from -1 to also test the case where the node set
- // is empty.
- for i := -1; i < len(graphNodes); i++ {
- nodes := make(map[NodeID]struct{})
- expected := make(map[NodeID]*NodeScore)
-
- for j := 0; j <= i; j++ {
- // Add node to the interest set.
- nodeID := NewNodeID(graphNodes[j])
- nodes[nodeID] = struct{}{}
-
- // Add to the expected set unless it's a node we have
- // a channel with.
- haveChannel := false
- for _, ch := range channels {
- if nodeID == ch.Node {
- haveChannel = true
- break
- }
- }
-
- if !haveChannel {
- score := normalizedTestGraphCentrality[j]
- expected[nodeID] = &NodeScore{
- NodeID: nodeID,
- Score: score,
- }
- }
- }
-
- chanSize := btcutil.UnitsPerCoin()
-
- // Attempt to get centrality scores and expect
- // that the result equals with the expected set.
- scores, err := topCentrality.NodeScores(
- graph, channels, chanSize, nodes,
- )
-
- util.RequireNoErr(t, err)
- require.Equal(t, expected, scores)
- }
-}
-
-// TestTopCentrality tests that we return the correct normalized centralitiy
-// values given a non empty graph, and given our node has an increasing amount
-// of channels from 0 to N-1 simulating the whole range from non-connected to
-// fully connected.
-func TestTopCentrality(t *testing.T) {
- // Generate channels: {}, {0}, {0, 1}, ... {0, 1, ..., N-1}
- channelsWith := [][]int{nil}
-
- for i := 0; i < centralityTestGraph.nodes; i++ {
- channels := make([]int, i+1)
- for j := 0; j <= i; j++ {
- channels[j] = j
- }
- channelsWith = append(channelsWith, channels)
- }
-
- for _, chanGraph := range chanGraphs {
- chanGraph := chanGraph
-
- success := t.Run(chanGraph.name, func(t *testing.T) {
- t.Parallel()
-
- graph, cleanup, err := chanGraph.genFunc()
- util.RequireNoErr(t, err, "unable to create graph")
- if cleanup != nil {
- defer cleanup()
- }
-
- // Build the test graph.
- graphNodes := buildTestGraph(
- t, graph, centralityTestGraph,
- )
-
- for _, chans := range channelsWith {
- testTopCentrality(t, graph, graphNodes, chans)
- }
- })
-
- require.True(t, success)
- }
-}
diff --git a/lnd/breacharbiter.go b/lnd/breacharbiter.go
deleted file mode 100644
index 4ed6d1f0..00000000
--- a/lnd/breacharbiter.go
+++ /dev/null
@@ -1,1589 +0,0 @@
-package lnd
-
-import (
- "bytes"
- "encoding/binary"
- "io"
- "sync"
-
- "github.com/davecgh/go-spew/spew"
- "github.com/pkt-cash/pktd/blockchain"
- "github.com/pkt-cash/pktd/btcutil"
- "github.com/pkt-cash/pktd/btcutil/er"
- "github.com/pkt-cash/pktd/btcutil/util"
- "github.com/pkt-cash/pktd/chaincfg/chainhash"
- "github.com/pkt-cash/pktd/pktlog/log"
- "github.com/pkt-cash/pktd/txscript"
- "github.com/pkt-cash/pktd/wire"
-
- "github.com/pkt-cash/pktd/lnd/chainntnfs"
- "github.com/pkt-cash/pktd/lnd/channeldb"
- "github.com/pkt-cash/pktd/lnd/channeldb/kvdb"
- "github.com/pkt-cash/pktd/lnd/htlcswitch"
- "github.com/pkt-cash/pktd/lnd/input"
- "github.com/pkt-cash/pktd/lnd/labels"
- "github.com/pkt-cash/pktd/lnd/lnwallet"
- "github.com/pkt-cash/pktd/lnd/lnwallet/chainfee"
-)
-
-var (
- // retributionBucket stores retribution state on disk between detecting
- // a contract breach, broadcasting a justice transaction that sweeps the
- // channel, and finally witnessing the justice transaction confirm on
- // the blockchain. It is critical that such state is persisted on disk,
- // so that if our node restarts at any point during the retribution
- // procedure, we can recover and continue from the persisted state.
- retributionBucket = []byte("retribution")
-
- // justiceTxnBucket holds the finalized justice transactions for all
- // breached contracts. Entries are added to the justice txn bucket just
- // before broadcasting the sweep txn.
- justiceTxnBucket = []byte("justice-txn")
-
- // errBrarShuttingDown is an error returned if the breacharbiter has
- // been signalled to exit.
- errBrarShuttingDown = Err.CodeWithDetail("errBrarShuttingDown",
- "breacharbiter shutting down")
-)
-
-// ContractBreachEvent is an event the breachArbiter will receive in case a
-// contract breach is observed on-chain. It contains the necessary information
-// to handle the breach, and a ProcessACK channel we will use to ACK the event
-// when we have safely stored all the necessary information.
-type ContractBreachEvent struct {
- // ChanPoint is the channel point of the breached channel.
- ChanPoint wire.OutPoint
-
- // ProcessACK is an error channel where a nil error should be sent
- // iff the breach retribution info is safely stored in the retribution
- // store. In case storing the information to the store fails, a non-nil
- // error should be sent.
- ProcessACK chan er.R
-
- // BreachRetribution is the information needed to act on this contract
- // breach.
- BreachRetribution *lnwallet.BreachRetribution
-}
-
-// BreachConfig bundles the required subsystems used by the breach arbiter. An
-// instance of BreachConfig is passed to newBreachArbiter during instantiation.
-type BreachConfig struct {
- // CloseLink allows the breach arbiter to shutdown any channel links for
- // which it detects a breach, ensuring now further activity will
- // continue across the link. The method accepts link's channel point and
- // a close type to be included in the channel close summary.
- CloseLink func(*wire.OutPoint, htlcswitch.ChannelCloseType)
-
- // DB provides access to the user's channels, allowing the breach
- // arbiter to determine the current state of a user's channels, and how
- // it should respond to channel closure.
- DB *channeldb.DB
-
- // Estimator is used by the breach arbiter to determine an appropriate
- // fee level when generating, signing, and broadcasting sweep
- // transactions.
- Estimator chainfee.Estimator
-
- // GenSweepScript generates the receiving scripts for swept outputs.
- GenSweepScript func() ([]byte, er.R)
-
- // Notifier provides a publish/subscribe interface for event driven
- // notifications regarding the confirmation of txids.
- Notifier chainntnfs.ChainNotifier
-
- // PublishTransaction facilitates the process of broadcasting a
- // transaction to the network.
- PublishTransaction func(*wire.MsgTx, string) er.R
-
- // ContractBreaches is a channel where the breachArbiter will receive
- // notifications in the event of a contract breach being observed. A
- // ContractBreachEvent must be ACKed by the breachArbiter, such that
- // the sending subsystem knows that the event is properly handed off.
- ContractBreaches <-chan *ContractBreachEvent
-
- // Signer is used by the breach arbiter to generate sweep transactions,
- // which move coins from previously open channels back to the user's
- // wallet.
- Signer input.Signer
-
- // Store is a persistent resource that maintains information regarding
- // breached channels. This is used in conjunction with DB to recover
- // from crashes, restarts, or other failures.
- Store RetributionStore
-}
-
-// breachArbiter is a special subsystem which is responsible for watching and
-// acting on the detection of any attempted uncooperative channel breaches by
-// channel counterparties. This file essentially acts as deterrence code for
-// those attempting to launch attacks against the daemon. In practice it's
-// expected that the logic in this file never gets executed, but it is
-// important to have it in place just in case we encounter cheating channel
-// counterparties.
-// TODO(roasbeef): closures in config for subsystem pointers to decouple?
-type breachArbiter struct {
- started sync.Once
- stopped sync.Once
-
- cfg *BreachConfig
-
- quit chan struct{}
- wg sync.WaitGroup
- sync.Mutex
-}
-
-// newBreachArbiter creates a new instance of a breachArbiter initialized with
-// its dependent objects.
-func newBreachArbiter(cfg *BreachConfig) *breachArbiter {
- return &breachArbiter{
- cfg: cfg,
- quit: make(chan struct{}),
- }
-}
-
-// Start is an idempotent method that officially starts the breachArbiter along
-// with all other goroutines it needs to perform its functions.
-func (b *breachArbiter) Start() er.R {
- var err er.R
- b.started.Do(func() {
- err = b.start()
- })
- return err
-}
-
-func (b *breachArbiter) start() er.R {
- log.Tracef("Starting breach arbiter")
-
- // Load all retributions currently persisted in the retribution store.
- var breachRetInfos map[wire.OutPoint]retributionInfo
- if err := b.cfg.Store.ForAll(func(ret *retributionInfo) er.R {
- breachRetInfos[ret.chanPoint] = *ret
- return nil
- }, func() {
- breachRetInfos = make(map[wire.OutPoint]retributionInfo)
- }); err != nil {
- return err
- }
-
- // Load all currently closed channels from disk, we will use the
- // channels that have been marked fully closed to filter the retribution
- // information loaded from disk. This is necessary in the event that the
- // channel was marked fully closed, but was not removed from the
- // retribution store.
- closedChans, err := b.cfg.DB.FetchClosedChannels(false)
- if err != nil {
- log.Errorf("Unable to fetch closing channels: %v", err)
- return err
- }
-
- // Using the set of non-pending, closed channels, reconcile any
- // discrepancies between the channeldb and the retribution store by
- // removing any retribution information for which we have already
- // finished our responsibilities. If the removal is successful, we also
- // remove the entry from our in-memory map, to avoid any further action
- // for this channel.
- // TODO(halseth): no need continue on IsPending once closed channels
- // actually means close transaction is confirmed.
- for _, chanSummary := range closedChans {
- if chanSummary.IsPending {
- continue
- }
-
- chanPoint := &chanSummary.ChanPoint
- if _, ok := breachRetInfos[*chanPoint]; ok {
- if err := b.cfg.Store.Remove(chanPoint); err != nil {
- log.Errorf("Unable to remove closed "+
- "chanid=%v from breach arbiter: %v",
- chanPoint, err)
- return err
- }
- delete(breachRetInfos, *chanPoint)
- }
- }
-
- // Spawn the exactRetribution tasks to monitor and resolve any breaches
- // that were loaded from the retribution store.
- for chanPoint := range breachRetInfos {
- retInfo := breachRetInfos[chanPoint]
-
- // Register for a notification when the breach transaction is
- // confirmed on chain.
- breachTXID := retInfo.commitHash
- breachScript := retInfo.breachedOutputs[0].signDesc.Output.PkScript
- confChan, err := b.cfg.Notifier.RegisterConfirmationsNtfn(
- &breachTXID, breachScript, 1, retInfo.breachHeight,
- )
- if err != nil {
- log.Errorf("Unable to register for conf updates "+
- "for txid: %v, err: %v", breachTXID, err)
- return err
- }
-
- // Launch a new goroutine which to finalize the channel
- // retribution after the breach transaction confirms.
- b.wg.Add(1)
- go b.exactRetribution(confChan, &retInfo)
- }
-
- // Start watching the remaining active channels!
- b.wg.Add(1)
- go b.contractObserver()
-
- return nil
-}
-
-// Stop is an idempotent method that signals the breachArbiter to execute a
-// graceful shutdown. This function will block until all goroutines spawned by
-// the breachArbiter have gracefully exited.
-func (b *breachArbiter) Stop() er.R {
- b.stopped.Do(func() {
- log.Infof("Breach arbiter shutting down")
-
- close(b.quit)
- b.wg.Wait()
- })
- return nil
-}
-
-// IsBreached queries the breach arbiter's retribution store to see if it is
-// aware of any channel breaches for a particular channel point.
-func (b *breachArbiter) IsBreached(chanPoint *wire.OutPoint) (bool, er.R) {
- return b.cfg.Store.IsBreached(chanPoint)
-}
-
-// contractObserver is the primary goroutine for the breachArbiter. This
-// goroutine is responsible for handling breach events coming from the
-// contractcourt on the ContractBreaches channel. If a channel breach is
-// detected, then the contractObserver will execute the retribution logic
-// required to sweep ALL outputs from a contested channel into the daemon's
-// wallet.
-//
-// NOTE: This MUST be run as a goroutine.
-func (b *breachArbiter) contractObserver() {
- defer b.wg.Done()
-
- log.Infof("Starting contract observer, watching for breaches.")
-
- for {
- select {
- case breachEvent := <-b.cfg.ContractBreaches:
- // We have been notified about a contract breach!
- // Handle the handoff, making sure we ACK the event
- // after we have safely added it to the retribution
- // store.
- b.wg.Add(1)
- go b.handleBreachHandoff(breachEvent)
-
- case <-b.quit:
- return
- }
- }
-}
-
-// convertToSecondLevelRevoke takes a breached output, and a transaction that
-// spends it to the second level, and mutates the breach output into one that
-// is able to properly sweep that second level output. We'll use this function
-// when we go to sweep a breached commitment transaction, but the cheating
-// party has already attempted to take it to the second level
-func convertToSecondLevelRevoke(bo *breachedOutput, breachInfo *retributionInfo,
- spendDetails *chainntnfs.SpendDetail) {
-
- // In this case, we'll modify the witness type of this output to
- // actually prepare for a second level revoke.
- bo.witnessType = input.HtlcSecondLevelRevoke
-
- // We'll also redirect the outpoint to this second level output, so the
- // spending transaction updates it inputs accordingly.
- spendingTx := spendDetails.SpendingTx
- oldOp := bo.outpoint
- bo.outpoint = wire.OutPoint{
- Hash: spendingTx.TxHash(),
- Index: 0,
- }
-
- // Next, we need to update the amount so we can do fee estimation
- // properly, and also so we can generate a valid signature as we need
- // to know the new input value (the second level transactions shaves
- // off some funds to fees).
- newAmt := spendingTx.TxOut[0].Value
- bo.amt = btcutil.Amount(newAmt)
- bo.signDesc.Output.Value = newAmt
- bo.signDesc.Output.PkScript = spendingTx.TxOut[0].PkScript
-
- // Finally, we'll need to adjust the witness program in the
- // SignDescriptor.
- bo.signDesc.WitnessScript = bo.secondLevelWitnessScript
-
- log.Warnf("HTLC(%v) for ChannelPoint(%v) has been spent to the "+
- "second-level, adjusting -> %v", oldOp, breachInfo.chanPoint,
- bo.outpoint)
-}
-
-// waitForSpendEvent waits for any of the breached outputs to get spent, and
-// mutates the breachInfo to be able to sweep it. This method should be used
-// when we fail to publish the justice tx because of a double spend, indicating
-// that the counter party has taken one of the breached outputs to the second
-// level. The spendNtfns map is a cache used to store registered spend
-// subscriptions, in case we must call this method multiple times.
-func (b *breachArbiter) waitForSpendEvent(breachInfo *retributionInfo,
- spendNtfns map[wire.OutPoint]*chainntnfs.SpendEvent) er.R {
-
- inputs := breachInfo.breachedOutputs
-
- // spend is used to wrap the index of the output that gets spent
- // together with the spend details.
- type spend struct {
- index int
- detail *chainntnfs.SpendDetail
- }
-
- // We create a channel the first goroutine that gets a spend event can
- // signal. We make it buffered in case multiple spend events come in at
- // the same time.
- anySpend := make(chan struct{}, len(inputs))
-
- // The allSpends channel will be used to pass spend events from all the
- // goroutines that detects a spend before they are signalled to exit.
- allSpends := make(chan spend, len(inputs))
-
- // exit will be used to signal the goroutines that they can exit.
- exit := make(chan struct{})
- var wg sync.WaitGroup
-
- // We'll now launch a goroutine for each of the HTLC outputs, that will
- // signal the moment they detect a spend event.
- for i := range inputs {
- breachedOutput := &inputs[i]
-
- log.Infof("Checking spend from %v(%v) for ChannelPoint(%v)",
- breachedOutput.witnessType, breachedOutput.outpoint,
- breachInfo.chanPoint)
-
- // If we have already registered for a notification for this
- // output, we'll reuse it.
- spendNtfn, ok := spendNtfns[breachedOutput.outpoint]
- if !ok {
- var err er.R
- spendNtfn, err = b.cfg.Notifier.RegisterSpendNtfn(
- &breachedOutput.outpoint,
- breachedOutput.signDesc.Output.PkScript,
- breachInfo.breachHeight,
- )
- if err != nil {
- log.Errorf("Unable to check for spentness "+
- "of outpoint=%v: %v",
- breachedOutput.outpoint, err)
-
- // Registration may have failed if we've been
- // instructed to shutdown. If so, return here
- // to avoid entering an infinite loop.
- select {
- case <-b.quit:
- return errBrarShuttingDown.Default()
- default:
- continue
- }
- }
- spendNtfns[breachedOutput.outpoint] = spendNtfn
- }
-
- // Launch a goroutine waiting for a spend event.
- b.wg.Add(1)
- wg.Add(1)
- go func(index int, spendEv *chainntnfs.SpendEvent) {
- defer b.wg.Done()
- defer wg.Done()
-
- select {
- // The output has been taken to the second level!
- case sp, ok := <-spendEv.Spend:
- if !ok {
- return
- }
-
- log.Infof("Detected spend on %s(%v) by "+
- "txid(%v) for ChannelPoint(%v)",
- inputs[index].witnessType,
- inputs[index].outpoint,
- sp.SpenderTxHash,
- breachInfo.chanPoint)
-
- // First we send the spend event on the
- // allSpends channel, such that it can be
- // handled after all go routines have exited.
- allSpends <- spend{index, sp}
-
- // Finally we'll signal the anySpend channel
- // that a spend was detected, such that the
- // other goroutines can be shut down.
- anySpend <- struct{}{}
- case <-exit:
- return
- case <-b.quit:
- return
- }
- }(i, spendNtfn)
- }
-
- // We'll wait for any of the outputs to be spent, or that we are
- // signalled to exit.
- select {
- // A goroutine have signalled that a spend occurred.
- case <-anySpend:
- // Signal for the remaining goroutines to exit.
- close(exit)
- wg.Wait()
-
- // At this point all goroutines that can send on the allSpends
- // channel have exited. We can therefore safely close the
- // channel before ranging over its content.
- close(allSpends)
-
- doneOutputs := make(map[int]struct{})
- for s := range allSpends {
- breachedOutput := &inputs[s.index]
- delete(spendNtfns, breachedOutput.outpoint)
-
- switch breachedOutput.witnessType {
- case input.HtlcAcceptedRevoke:
- fallthrough
- case input.HtlcOfferedRevoke:
- log.Infof("Spend on second-level"+
- "%s(%v) for ChannelPoint(%v) "+
- "transitions to second-level output",
- breachedOutput.witnessType,
- breachedOutput.outpoint,
- breachInfo.chanPoint)
-
- // In this case we'll morph our initial revoke
- // spend to instead point to the second level
- // output, and update the sign descriptor in the
- // process.
- convertToSecondLevelRevoke(
- breachedOutput, breachInfo, s.detail,
- )
-
- continue
- }
-
- log.Infof("Spend on %s(%v) for ChannelPoint(%v) "+
- "transitions output to terminal state, "+
- "removing input from justice transaction",
- breachedOutput.witnessType,
- breachedOutput.outpoint, breachInfo.chanPoint)
-
- doneOutputs[s.index] = struct{}{}
- }
-
- // Filter the inputs for which we can no longer proceed.
- var nextIndex int
- for i := range inputs {
- if _, ok := doneOutputs[i]; ok {
- continue
- }
-
- inputs[nextIndex] = inputs[i]
- nextIndex++
- }
-
- // Update our remaining set of outputs before continuing with
- // another attempt at publication.
- breachInfo.breachedOutputs = inputs[:nextIndex]
-
- case <-b.quit:
- return errBrarShuttingDown.Default()
- }
-
- return nil
-}
-
-// exactRetribution is a goroutine which is executed once a contract breach has
-// been detected by a breachObserver. This function is responsible for
-// punishing a counterparty for violating the channel contract by sweeping ALL
-// the lingering funds within the channel into the daemon's wallet.
-//
-// NOTE: This MUST be run as a goroutine.
-func (b *breachArbiter) exactRetribution(confChan *chainntnfs.ConfirmationEvent,
- breachInfo *retributionInfo) {
-
- defer b.wg.Done()
-
- // TODO(roasbeef): state needs to be checkpointed here
- var breachConfHeight uint32
- select {
- case breachConf, ok := <-confChan.Confirmed:
- // If the second value is !ok, then the channel has been closed
- // signifying a daemon shutdown, so we exit.
- if !ok {
- return
- }
-
- breachConfHeight = breachConf.BlockHeight
-
- // Otherwise, if this is a real confirmation notification, then
- // we fall through to complete our duty.
- case <-b.quit:
- return
- }
-
- log.Debugf("Breach transaction %v has been confirmed, sweeping "+
- "revoked funds", breachInfo.commitHash)
-
- // We may have to wait for some of the HTLC outputs to be spent to the
- // second level before broadcasting the justice tx. We'll store the
- // SpendEvents between each attempt to not re-register uneccessarily.
- spendNtfns := make(map[wire.OutPoint]*chainntnfs.SpendEvent)
-
- finalTx, err := b.cfg.Store.GetFinalizedTxn(&breachInfo.chanPoint)
- if err != nil {
- log.Errorf("Unable to get finalized txn for"+
- "chanid=%v: %v", &breachInfo.chanPoint, err)
- return
- }
-
- // If this retribution has not been finalized before, we will first
- // construct a sweep transaction and write it to disk. This will allow
- // the breach arbiter to re-register for notifications for the justice
- // txid.
-justiceTxBroadcast:
- if finalTx == nil {
- // With the breach transaction confirmed, we now create the
- // justice tx which will claim ALL the funds within the
- // channel.
- finalTx, err = b.createJusticeTx(breachInfo)
- if err != nil {
- log.Errorf("Unable to create justice tx: %v", err)
- return
- }
-
- // Persist our finalized justice transaction before making an
- // attempt to broadcast.
- err := b.cfg.Store.Finalize(&breachInfo.chanPoint, finalTx)
- if err != nil {
- log.Errorf("Unable to finalize justice tx for "+
- "chanid=%v: %v", &breachInfo.chanPoint, err)
- return
- }
- }
-
- log.Debugf("Broadcasting justice tx: %v", log.C(func() string {
- return spew.Sdump(finalTx)
- }))
-
- // We'll now attempt to broadcast the transaction which finalized the
- // channel's retribution against the cheating counter party.
- label := labels.MakeLabel(labels.LabelTypeJusticeTransaction, nil)
- err = b.cfg.PublishTransaction(finalTx, label)
- if err != nil {
- log.Errorf("Unable to broadcast justice tx: %v", err)
-
- if lnwallet.ErrDoubleSpend.Is(err) {
- // Broadcasting the transaction failed because of a
- // conflict either in the mempool or in chain. We'll
- // now create spend subscriptions for all HTLC outputs
- // on the commitment transaction that could possibly
- // have been spent, and wait for any of them to
- // trigger.
- log.Infof("Waiting for a spend event before " +
- "attempting to craft new justice tx.")
- finalTx = nil
-
- err := b.waitForSpendEvent(breachInfo, spendNtfns)
- if err != nil {
- if !errBrarShuttingDown.Is(err) {
- log.Errorf("error waiting for "+
- "spend event: %v", err)
- }
- return
- }
-
- if len(breachInfo.breachedOutputs) == 0 {
- log.Debugf("No more outputs to sweep for "+
- "breach, marking ChannelPoint(%v) "+
- "fully resolved", breachInfo.chanPoint)
-
- err = b.cleanupBreach(&breachInfo.chanPoint)
- if err != nil {
- log.Errorf("Failed to cleanup "+
- "breached ChannelPoint(%v): %v",
- breachInfo.chanPoint, err)
- }
- return
- }
-
- log.Infof("Attempting another justice tx "+
- "with %d inputs",
- len(breachInfo.breachedOutputs))
-
- goto justiceTxBroadcast
- }
- }
-
- // As a conclusionary step, we register for a notification to be
- // dispatched once the justice tx is confirmed. After confirmation we
- // notify the caller that initiated the retribution workflow that the
- // deed has been done.
- justiceTXID := finalTx.TxHash()
- justiceScript := finalTx.TxOut[0].PkScript
- confChan, err = b.cfg.Notifier.RegisterConfirmationsNtfn(
- &justiceTXID, justiceScript, 1, breachConfHeight,
- )
- if err != nil {
- log.Errorf("Unable to register for conf for txid(%v): %v",
- justiceTXID, err)
- return
- }
-
- select {
- case _, ok := <-confChan.Confirmed:
- if !ok {
- return
- }
-
- // Compute both the total value of funds being swept and the
- // amount of funds that were revoked from the counter party.
- var totalFunds, revokedFunds btcutil.Amount
- for _, inp := range breachInfo.breachedOutputs {
- totalFunds += inp.Amount()
-
- // If the output being revoked is the remote commitment
- // output or an offered HTLC output, it's amount
- // contributes to the value of funds being revoked from
- // the counter party.
- switch inp.WitnessType() {
- case input.CommitmentRevoke:
- revokedFunds += inp.Amount()
- case input.HtlcOfferedRevoke:
- revokedFunds += inp.Amount()
- default:
- }
- }
-
- log.Infof("Justice for ChannelPoint(%v) has "+
- "been served, %v revoked funds (%v total) "+
- "have been claimed", breachInfo.chanPoint,
- revokedFunds, totalFunds)
-
- err = b.cleanupBreach(&breachInfo.chanPoint)
- if err != nil {
- log.Errorf("Failed to cleanup breached "+
- "ChannelPoint(%v): %v", breachInfo.chanPoint,
- err)
- }
-
- // TODO(roasbeef): add peer to blacklist?
-
- // TODO(roasbeef): close other active channels with offending
- // peer
-
- return
- case <-b.quit:
- return
- }
-}
-
-// cleanupBreach marks the given channel point as fully resolved and removes the
-// retribution for that the channel from the retribution store.
-func (b *breachArbiter) cleanupBreach(chanPoint *wire.OutPoint) er.R {
- // With the channel closed, mark it in the database as such.
- err := b.cfg.DB.MarkChanFullyClosed(chanPoint)
- if err != nil {
- return er.Errorf("unable to mark chan as closed: %v", err)
- }
-
- // Justice has been carried out; we can safely delete the retribution
- // info from the database.
- err = b.cfg.Store.Remove(chanPoint)
- if err != nil {
- return er.Errorf("unable to remove retribution from db: %v",
- err)
- }
-
- return nil
-}
-
-// handleBreachHandoff handles a new breach event, by writing it to disk, then
-// notifies the breachArbiter contract observer goroutine that a channel's
-// contract has been breached by the prior counterparty. Once notified the
-// breachArbiter will attempt to sweep ALL funds within the channel using the
-// information provided within the BreachRetribution generated due to the
-// breach of channel contract. The funds will be swept only after the breaching
-// transaction receives a necessary number of confirmations.
-//
-// NOTE: This MUST be run as a goroutine.
-func (b *breachArbiter) handleBreachHandoff(breachEvent *ContractBreachEvent) {
- defer b.wg.Done()
-
- chanPoint := breachEvent.ChanPoint
- log.Debugf("Handling breach handoff for ChannelPoint(%v)",
- chanPoint)
-
- // A read from this channel indicates that a channel breach has been
- // detected! So we notify the main coordination goroutine with the
- // information needed to bring the counterparty to justice.
- breachInfo := breachEvent.BreachRetribution
- log.Warnf("REVOKED STATE #%v FOR ChannelPoint(%v) "+
- "broadcast, REMOTE PEER IS DOING SOMETHING "+
- "SKETCHY!!!", breachInfo.RevokedStateNum,
- chanPoint)
-
- // Immediately notify the HTLC switch that this link has been
- // breached in order to ensure any incoming or outgoing
- // multi-hop HTLCs aren't sent over this link, nor any other
- // links associated with this peer.
- b.cfg.CloseLink(&chanPoint, htlcswitch.CloseBreach)
-
- // TODO(roasbeef): need to handle case of remote broadcast
- // mid-local initiated state-transition, possible
- // false-positive?
-
- // Acquire the mutex to ensure consistency between the call to
- // IsBreached and Add below.
- b.Lock()
-
- // We first check if this breach info is already added to the
- // retribution store.
- breached, err := b.cfg.Store.IsBreached(&chanPoint)
- if err != nil {
- b.Unlock()
- log.Errorf("Unable to check breach info in DB: %v", err)
-
- select {
- case breachEvent.ProcessACK <- err:
- case <-b.quit:
- }
- return
- }
-
- // If this channel is already marked as breached in the retribution
- // store, we already have handled the handoff for this breach. In this
- // case we can safely ACK the handoff, and return.
- if breached {
- b.Unlock()
-
- select {
- case breachEvent.ProcessACK <- nil:
- case <-b.quit:
- }
- return
- }
-
- // Using the breach information provided by the wallet and the
- // channel snapshot, construct the retribution information that
- // will be persisted to disk.
- retInfo := newRetributionInfo(&chanPoint, breachInfo)
-
- // Persist the pending retribution state to disk.
- err = b.cfg.Store.Add(retInfo)
- b.Unlock()
- if err != nil {
- log.Errorf("Unable to persist retribution "+
- "info to db: %v", err)
- }
-
- // Now that the breach has been persisted, try to send an
- // acknowledgment back to the close observer with the error. If
- // the ack is successful, the close observer will mark the
- // channel as pending-closed in the channeldb.
- select {
- case breachEvent.ProcessACK <- err:
- // Bail if we failed to persist retribution info.
- if err != nil {
- return
- }
-
- case <-b.quit:
- return
- }
-
- // Now that a new channel contract has been added to the retribution
- // store, we first register for a notification to be dispatched once
- // the breach transaction (the revoked commitment transaction) has been
- // confirmed in the chain to ensure we're not dealing with a moving
- // target.
- breachTXID := &retInfo.commitHash
- breachScript := retInfo.breachedOutputs[0].signDesc.Output.PkScript
- cfChan, err := b.cfg.Notifier.RegisterConfirmationsNtfn(
- breachTXID, breachScript, 1, retInfo.breachHeight,
- )
- if err != nil {
- log.Errorf("Unable to register for conf updates for "+
- "txid: %v, err: %v", breachTXID, err)
- return
- }
-
- log.Warnf("A channel has been breached with txid: %v. Waiting "+
- "for confirmation, then justice will be served!", breachTXID)
-
- // With the retribution state persisted, channel close persisted, and
- // notification registered, we launch a new goroutine which will
- // finalize the channel retribution after the breach transaction has
- // been confirmed.
- b.wg.Add(1)
- go b.exactRetribution(cfChan, retInfo)
-}
-
-// breachedOutput contains all the information needed to sweep a breached
-// output. A breached output is an output that we are now entitled to due to a
-// revoked commitment transaction being broadcast.
-type breachedOutput struct {
- amt btcutil.Amount
- outpoint wire.OutPoint
- witnessType input.StandardWitnessType
- signDesc input.SignDescriptor
- confHeight uint32
-
- secondLevelWitnessScript []byte
-
- witnessFunc input.WitnessGenerator
-}
-
-// makeBreachedOutput assembles a new breachedOutput that can be used by the
-// breach arbiter to construct a justice or sweep transaction.
-func makeBreachedOutput(outpoint *wire.OutPoint,
- witnessType input.StandardWitnessType,
- secondLevelScript []byte,
- signDescriptor *input.SignDescriptor,
- confHeight uint32) breachedOutput {
-
- amount := signDescriptor.Output.Value
-
- return breachedOutput{
- amt: btcutil.Amount(amount),
- outpoint: *outpoint,
- secondLevelWitnessScript: secondLevelScript,
- witnessType: witnessType,
- signDesc: *signDescriptor,
- confHeight: confHeight,
- }
-}
-
-// Amount returns the number of satoshis contained in the breached output.
-func (bo *breachedOutput) Amount() btcutil.Amount {
- return bo.amt
-}
-
-// OutPoint returns the breached output's identifier that is to be included as a
-// transaction input.
-func (bo *breachedOutput) OutPoint() *wire.OutPoint {
- return &bo.outpoint
-}
-
-// RequiredTxOut returns a non-nil TxOut if input commits to a certain
-// transaction output. This is used in the SINGLE|ANYONECANPAY case to make
-// sure any presigned input is still valid by including the output.
-func (bo *breachedOutput) RequiredTxOut() *wire.TxOut {
- return nil
-}
-
-// RequiredLockTime returns whether this input commits to a tx locktime that
-// must be used in the transaction including it.
-func (bo *breachedOutput) RequiredLockTime() (uint32, bool) {
- return 0, false
-}
-
-// WitnessType returns the type of witness that must be generated to spend the
-// breached output.
-func (bo *breachedOutput) WitnessType() input.WitnessType {
- return bo.witnessType
-}
-
-// SignDesc returns the breached output's SignDescriptor, which is used during
-// signing to compute the witness.
-func (bo *breachedOutput) SignDesc() *input.SignDescriptor {
- return &bo.signDesc
-}
-
-// CraftInputScript computes a valid witness that allows us to spend from the
-// breached output. It does so by first generating and memoizing the witness
-// generation function, which parameterized primarily by the witness type and
-// sign descriptor. The method then returns the witness computed by invoking
-// this function on the first and subsequent calls.
-func (bo *breachedOutput) CraftInputScript(signer input.Signer, txn *wire.MsgTx,
- hashCache *txscript.TxSigHashes, txinIdx int) (*input.Script, er.R) {
-
- // First, we ensure that the witness generation function has been
- // initialized for this breached output.
- bo.witnessFunc = bo.witnessType.WitnessGenerator(signer, bo.SignDesc())
-
- // Now that we have ensured that the witness generation function has
- // been initialized, we can proceed to execute it and generate the
- // witness for this particular breached output.
- return bo.witnessFunc(txn, hashCache, txinIdx)
-}
-
-// BlocksToMaturity returns the relative timelock, as a number of blocks, that
-// must be built on top of the confirmation height before the output can be
-// spent.
-func (bo *breachedOutput) BlocksToMaturity() uint32 {
- // If the output is a to_remote output we can claim, and it's of the
- // confirmed type, we must wait one block before claiming it.
- if bo.witnessType == input.CommitmentToRemoteConfirmed {
- return 1
- }
-
- // All other breached outputs have no CSV delay.
- return 0
-}
-
-// HeightHint returns the minimum height at which a confirmed spending tx can
-// occur.
-func (bo *breachedOutput) HeightHint() uint32 {
- return bo.confHeight
-}
-
-// UnconfParent returns information about a possibly unconfirmed parent tx.
-func (bo *breachedOutput) UnconfParent() *input.TxInfo {
- return nil
-}
-
-// Add compile-time constraint ensuring breachedOutput implements the Input
-// interface.
-var _ input.Input = (*breachedOutput)(nil)
-
-// retributionInfo encapsulates all the data needed to sweep all the contested
-// funds within a channel whose contract has been breached by the prior
-// counterparty. This struct is used to create the justice transaction which
-// spends all outputs of the commitment transaction into an output controlled
-// by the wallet.
-type retributionInfo struct {
- commitHash chainhash.Hash
- chanPoint wire.OutPoint
- chainHash chainhash.Hash
- breachHeight uint32
-
- breachedOutputs []breachedOutput
-}
-
-// newRetributionInfo constructs a retributionInfo containing all the
-// information required by the breach arbiter to recover funds from breached
-// channels. The information is primarily populated using the BreachRetribution
-// delivered by the wallet when it detects a channel breach.
-func newRetributionInfo(chanPoint *wire.OutPoint,
- breachInfo *lnwallet.BreachRetribution) *retributionInfo {
-
- // Determine the number of second layer HTLCs we will attempt to sweep.
- nHtlcs := len(breachInfo.HtlcRetributions)
-
- // Initialize a slice to hold the outputs we will attempt to sweep. The
- // maximum capacity of the slice is set to 2+nHtlcs to handle the case
- // where the local, remote, and all HTLCs are not dust outputs. All
- // HTLC outputs provided by the wallet are guaranteed to be non-dust,
- // though the commitment outputs are conditionally added depending on
- // the nil-ness of their sign descriptors.
- breachedOutputs := make([]breachedOutput, 0, nHtlcs+2)
-
- // First, record the breach information for the local channel point if
- // it is not considered dust, which is signaled by a non-nil sign
- // descriptor. Here we use CommitmentNoDelay (or
- // CommitmentNoDelayTweakless for newer commitments) since this output
- // belongs to us and has no time-based constraints on spending.
- if breachInfo.LocalOutputSignDesc != nil {
- witnessType := input.CommitmentNoDelay
- if breachInfo.LocalOutputSignDesc.SingleTweak == nil {
- witnessType = input.CommitSpendNoDelayTweakless
- }
-
- // If the local delay is non-zero, it means this output is of
- // the confirmed to_remote type.
- if breachInfo.LocalDelay != 0 {
- witnessType = input.CommitmentToRemoteConfirmed
- }
-
- localOutput := makeBreachedOutput(
- &breachInfo.LocalOutpoint,
- witnessType,
- // No second level script as this is a commitment
- // output.
- nil,
- breachInfo.LocalOutputSignDesc,
- breachInfo.BreachHeight,
- )
-
- breachedOutputs = append(breachedOutputs, localOutput)
- }
-
- // Second, record the same information regarding the remote outpoint,
- // again if it is not dust, which belongs to the party who tried to
- // steal our money! Here we set witnessType of the breachedOutput to
- // CommitmentRevoke, since we will be using a revoke key, withdrawing
- // the funds from the commitment transaction immediately.
- if breachInfo.RemoteOutputSignDesc != nil {
- remoteOutput := makeBreachedOutput(
- &breachInfo.RemoteOutpoint,
- input.CommitmentRevoke,
- // No second level script as this is a commitment
- // output.
- nil,
- breachInfo.RemoteOutputSignDesc,
- breachInfo.BreachHeight,
- )
-
- breachedOutputs = append(breachedOutputs, remoteOutput)
- }
-
- // Lastly, for each of the breached HTLC outputs, record each as a
- // breached output with the appropriate witness type based on its
- // directionality. All HTLC outputs provided by the wallet are assumed
- // to be non-dust.
- for i, breachedHtlc := range breachInfo.HtlcRetributions {
- // Using the breachedHtlc's incoming flag, determine the
- // appropriate witness type that needs to be generated in order
- // to sweep the HTLC output.
- var htlcWitnessType input.StandardWitnessType
- if breachedHtlc.IsIncoming {
- htlcWitnessType = input.HtlcAcceptedRevoke
- } else {
- htlcWitnessType = input.HtlcOfferedRevoke
- }
-
- htlcOutput := makeBreachedOutput(
- &breachInfo.HtlcRetributions[i].OutPoint,
- htlcWitnessType,
- breachInfo.HtlcRetributions[i].SecondLevelWitnessScript,
- &breachInfo.HtlcRetributions[i].SignDesc,
- breachInfo.BreachHeight)
-
- breachedOutputs = append(breachedOutputs, htlcOutput)
- }
-
- return &retributionInfo{
- commitHash: breachInfo.BreachTransaction.TxHash(),
- chainHash: breachInfo.ChainHash,
- chanPoint: *chanPoint,
- breachedOutputs: breachedOutputs,
- breachHeight: breachInfo.BreachHeight,
- }
-}
-
-// createJusticeTx creates a transaction which exacts "justice" by sweeping ALL
-// the funds within the channel which we are now entitled to due to a breach of
-// the channel's contract by the counterparty. This function returns a *fully*
-// signed transaction with the witness for each input fully in place.
-func (b *breachArbiter) createJusticeTx(
- r *retributionInfo) (*wire.MsgTx, er.R) {
-
- // We will assemble the breached outputs into a slice of spendable
- // outputs, while simultaneously computing the estimated weight of the
- // transaction.
- var (
- spendableOutputs []input.Input
- weightEstimate input.TxWeightEstimator
- )
-
- // Allocate enough space to potentially hold each of the breached
- // outputs in the retribution info.
- spendableOutputs = make([]input.Input, 0, len(r.breachedOutputs))
-
- // The justice transaction we construct will be a segwit transaction
- // that pays to a p2wkh output. Components such as the version,
- // nLockTime, and output are already included in the TxWeightEstimator.
- weightEstimate.AddP2WKHOutput()
-
- // Next, we iterate over the breached outputs contained in the
- // retribution info. For each, we switch over the witness type such
- // that we contribute the appropriate weight for each input and witness,
- // finally adding to our list of spendable outputs.
- for i := range r.breachedOutputs {
- // Grab locally scoped reference to breached output.
- inp := &r.breachedOutputs[i]
-
- // First, determine the appropriate estimated witness weight for
- // the give witness type of this breached output. If the witness
- // weight cannot be estimated, we will omit it from the
- // transaction.
- witnessWeight, _, err := inp.WitnessType().SizeUpperBound()
- if err != nil {
- log.Warnf("could not determine witness weight "+
- "for breached output in retribution info: %v",
- err)
- continue
- }
- weightEstimate.AddWitnessInput(witnessWeight)
-
- // Finally, append this input to our list of spendable outputs.
- spendableOutputs = append(spendableOutputs, inp)
- }
-
- txWeight := int64(weightEstimate.Weight())
- return b.sweepSpendableOutputsTxn(txWeight, spendableOutputs...)
-}
-
-// sweepSpendableOutputsTxn creates a signed transaction from a sequence of
-// spendable outputs by sweeping the funds into a single p2wkh output.
-func (b *breachArbiter) sweepSpendableOutputsTxn(txWeight int64,
- inputs ...input.Input) (*wire.MsgTx, er.R) {
-
- // First, we obtain a new public key script from the wallet which we'll
- // sweep the funds to.
- // TODO(roasbeef): possibly create many outputs to minimize change in
- // the future?
- pkScript, err := b.cfg.GenSweepScript()
- if err != nil {
- return nil, err
- }
-
- // Compute the total amount contained in the inputs.
- var totalAmt btcutil.Amount
- for _, input := range inputs {
- totalAmt += btcutil.Amount(input.SignDesc().Output.Value)
- }
-
- // We'll actually attempt to target inclusion within the next two
- // blocks as we'd like to sweep these funds back into our wallet ASAP.
- feePerKw, err := b.cfg.Estimator.EstimateFeePerKW(2)
- if err != nil {
- return nil, err
- }
- txFee := feePerKw.FeeForWeight(txWeight)
-
- // TODO(roasbeef): already start to siphon their funds into fees
- sweepAmt := int64(totalAmt - txFee)
-
- // With the fee calculated, we can now create the transaction using the
- // information gathered above and the provided retribution information.
- txn := wire.NewMsgTx(2)
-
- // We begin by adding the output to which our funds will be deposited.
- txn.AddTxOut(&wire.TxOut{
- PkScript: pkScript,
- Value: sweepAmt,
- })
-
- // Next, we add all of the spendable outputs as inputs to the
- // transaction.
- for _, input := range inputs {
- txn.AddTxIn(&wire.TxIn{
- PreviousOutPoint: *input.OutPoint(),
- Sequence: input.BlocksToMaturity(),
- })
- }
-
- // Before signing the transaction, check to ensure that it meets some
- // basic validity requirements.
- btx := btcutil.NewTx(txn)
- if err := blockchain.CheckTransactionSanity(btx); err != nil {
- return nil, err
- }
-
- // Create a sighash cache to improve the performance of hashing and
- // signing SigHashAll inputs.
- hashCache := txscript.NewTxSigHashes(txn)
-
- // Create a closure that encapsulates the process of initializing a
- // particular output's witness generation function, computing the
- // witness, and attaching it to the transaction. This function accepts
- // an integer index representing the intended txin index, and the
- // breached output from which it will spend.
- addWitness := func(idx int, so input.Input) er.R {
- // First, we construct a valid witness for this outpoint and
- // transaction using the SpendableOutput's witness generation
- // function.
- inputScript, err := so.CraftInputScript(
- b.cfg.Signer, txn, hashCache, idx,
- )
- if err != nil {
- return err
- }
-
- // Then, we add the witness to the transaction at the
- // appropriate txin index.
- txn.TxIn[idx].Witness = inputScript.Witness
-
- return nil
- }
-
- // Finally, generate a witness for each output and attach it to the
- // transaction.
- for i, input := range inputs {
- if err := addWitness(i, input); err != nil {
- return nil, err
- }
- }
-
- return txn, nil
-}
-
-// RetributionStore provides an interface for managing a persistent map from
-// wire.OutPoint -> retributionInfo. Upon learning of a breach, a BreachArbiter
-// should record the retributionInfo for the breached channel, which serves a
-// checkpoint in the event that retribution needs to be resumed after failure.
-// A RetributionStore provides an interface for managing the persisted set, as
-// well as mapping user defined functions over the entire on-disk contents.
-//
-// Calls to RetributionStore may occur concurrently. A concrete instance of
-// RetributionStore should use appropriate synchronization primitives, or
-// be otherwise safe for concurrent access.
-type RetributionStore interface {
- // Add persists the retributionInfo to disk, using the information's
- // chanPoint as the key. This method should overwrite any existing
- // entries found under the same key, and an error should be raised if
- // the addition fails.
- Add(retInfo *retributionInfo) er.R
-
- // IsBreached queries the retribution store to see if the breach arbiter
- // is aware of any breaches for the provided channel point.
- IsBreached(chanPoint *wire.OutPoint) (bool, er.R)
-
- // Finalize persists the finalized justice transaction for a particular
- // channel.
- Finalize(chanPoint *wire.OutPoint, finalTx *wire.MsgTx) er.R
-
- // GetFinalizedTxn loads the finalized justice transaction, if any, from
- // the retribution store. The finalized transaction will be nil if
- // Finalize has not yet been called for this channel point.
- GetFinalizedTxn(chanPoint *wire.OutPoint) (*wire.MsgTx, er.R)
-
- // Remove deletes the retributionInfo from disk, if any exists, under
- // the given key. An error should be re raised if the removal fails.
- Remove(key *wire.OutPoint) er.R
-
- // ForAll iterates over the existing on-disk contents and applies a
- // chosen, read-only callback to each. This method should ensure that it
- // immediately propagate any errors generated by the callback.
- ForAll(cb func(*retributionInfo) er.R, reset func()) er.R
-}
-
-// retributionStore handles persistence of retribution states to disk and is
-// backed by a boltdb bucket. The primary responsibility of the retribution
-// store is to ensure that we can recover from a restart in the middle of a
-// breached contract retribution.
-type retributionStore struct {
- db *channeldb.DB
-}
-
-// newRetributionStore creates a new instance of a retributionStore.
-func newRetributionStore(db *channeldb.DB) *retributionStore {
- return &retributionStore{
- db: db,
- }
-}
-
-// Add adds a retribution state to the retributionStore, which is then persisted
-// to disk.
-func (rs *retributionStore) Add(ret *retributionInfo) er.R {
- return kvdb.Update(rs.db, func(tx kvdb.RwTx) er.R {
- // If this is our first contract breach, the retributionBucket
- // won't exist, in which case, we just create a new bucket.
- retBucket, err := tx.CreateTopLevelBucket(retributionBucket)
- if err != nil {
- return err
- }
-
- var outBuf bytes.Buffer
- if err := writeOutpoint(&outBuf, &ret.chanPoint); err != nil {
- return err
- }
-
- var retBuf bytes.Buffer
- if err := ret.Encode(&retBuf); err != nil {
- return err
- }
-
- return retBucket.Put(outBuf.Bytes(), retBuf.Bytes())
- }, func() {})
-}
-
-// Finalize writes a signed justice transaction to the retribution store. This
-// is done before publishing the transaction, so that we can recover the txid on
-// startup and re-register for confirmation notifications.
-func (rs *retributionStore) Finalize(chanPoint *wire.OutPoint,
- finalTx *wire.MsgTx) er.R {
- return kvdb.Update(rs.db, func(tx kvdb.RwTx) er.R {
- justiceBkt, err := tx.CreateTopLevelBucket(justiceTxnBucket)
- if err != nil {
- return err
- }
-
- var chanBuf bytes.Buffer
- if err := writeOutpoint(&chanBuf, chanPoint); err != nil {
- return err
- }
-
- var txBuf bytes.Buffer
- if err := finalTx.Serialize(&txBuf); err != nil {
- return err
- }
-
- return justiceBkt.Put(chanBuf.Bytes(), txBuf.Bytes())
- }, func() {})
-}
-
-// GetFinalizedTxn loads the finalized justice transaction for the provided
-// channel point. The finalized transaction will be nil if Finalize has yet to
-// be called for this channel point.
-func (rs *retributionStore) GetFinalizedTxn(
- chanPoint *wire.OutPoint) (*wire.MsgTx, er.R) {
-
- var finalTxBytes []byte
- if err := kvdb.View(rs.db, func(tx kvdb.RTx) er.R {
- justiceBkt := tx.ReadBucket(justiceTxnBucket)
- if justiceBkt == nil {
- return nil
- }
-
- var chanBuf bytes.Buffer
- if err := writeOutpoint(&chanBuf, chanPoint); err != nil {
- return err
- }
-
- finalTxBytes = justiceBkt.Get(chanBuf.Bytes())
-
- return nil
- }, func() {
- finalTxBytes = nil
- }); err != nil {
- return nil, err
- }
-
- if finalTxBytes == nil {
- return nil, nil
- }
-
- finalTx := &wire.MsgTx{}
- err := finalTx.Deserialize(bytes.NewReader(finalTxBytes))
-
- return finalTx, err
-}
-
-// IsBreached queries the retribution store to discern if this channel was
-// previously breached. This is used when connecting to a peer to determine if
-// it is safe to add a link to the htlcswitch, as we should never add a channel
-// that has already been breached.
-func (rs *retributionStore) IsBreached(chanPoint *wire.OutPoint) (bool, er.R) {
- var found bool
- err := kvdb.View(rs.db, func(tx kvdb.RTx) er.R {
- retBucket := tx.ReadBucket(retributionBucket)
- if retBucket == nil {
- return nil
- }
-
- var chanBuf bytes.Buffer
- if err := writeOutpoint(&chanBuf, chanPoint); err != nil {
- return err
- }
-
- retInfo := retBucket.Get(chanBuf.Bytes())
- if retInfo != nil {
- found = true
- }
-
- return nil
- }, func() {
- found = false
- })
-
- return found, err
-}
-
-// Remove removes a retribution state and finalized justice transaction by
-// channel point from the retribution store.
-func (rs *retributionStore) Remove(chanPoint *wire.OutPoint) er.R {
- return kvdb.Update(rs.db, func(tx kvdb.RwTx) er.R {
- retBucket := tx.ReadWriteBucket(retributionBucket)
-
- // We return an error if the bucket is not already created,
- // since normal operation of the breach arbiter should never try
- // to remove a finalized retribution state that is not already
- // stored in the db.
- if retBucket == nil {
- return er.New("unable to remove retribution " +
- "because the retribution bucket doesn't exist")
- }
-
- // Serialize the channel point we are intending to remove.
- var chanBuf bytes.Buffer
- if err := writeOutpoint(&chanBuf, chanPoint); err != nil {
- return err
- }
- chanBytes := chanBuf.Bytes()
-
- // Remove the persisted retribution info and finalized justice
- // transaction.
- if err := retBucket.Delete(chanBytes); err != nil {
- return err
- }
-
- // If we have not finalized this channel breach, we can exit
- // early.
- justiceBkt := tx.ReadWriteBucket(justiceTxnBucket)
- if justiceBkt == nil {
- return nil
- }
-
- return justiceBkt.Delete(chanBytes)
- }, func() {})
-}
-
-// ForAll iterates through all stored retributions and executes the passed
-// callback function on each retribution.
-func (rs *retributionStore) ForAll(cb func(*retributionInfo) er.R,
- reset func()) er.R {
-
- return kvdb.View(rs.db, func(tx kvdb.RTx) er.R {
- // If the bucket does not exist, then there are no pending
- // retributions.
- retBucket := tx.ReadBucket(retributionBucket)
- if retBucket == nil {
- return nil
- }
-
- // Otherwise, we fetch each serialized retribution info,
- // deserialize it, and execute the passed in callback function
- // on it.
- return retBucket.ForEach(func(_, retBytes []byte) er.R {
- ret := &retributionInfo{}
- err := ret.Decode(bytes.NewBuffer(retBytes))
- if err != nil {
- return err
- }
-
- return cb(ret)
- })
- }, reset)
-}
-
-// Encode serializes the retribution into the passed byte stream.
-func (ret *retributionInfo) Encode(w io.Writer) er.R {
- var scratch [4]byte
-
- if _, err := util.Write(w, ret.commitHash[:]); err != nil {
- return err
- }
-
- if err := writeOutpoint(w, &ret.chanPoint); err != nil {
- return err
- }
-
- if _, err := util.Write(w, ret.chainHash[:]); err != nil {
- return err
- }
-
- binary.BigEndian.PutUint32(scratch[:], ret.breachHeight)
- if _, err := util.Write(w, scratch[:]); err != nil {
- return err
- }
-
- nOutputs := len(ret.breachedOutputs)
- if err := wire.WriteVarInt(w, 0, uint64(nOutputs)); err != nil {
- return err
- }
-
- for _, output := range ret.breachedOutputs {
- if err := output.Encode(w); err != nil {
- return err
- }
- }
-
- return nil
-}
-
-// Dencode deserializes a retribution from the passed byte stream.
-func (ret *retributionInfo) Decode(r io.Reader) er.R {
- var scratch [32]byte
-
- if _, err := util.ReadFull(r, scratch[:]); err != nil {
- return err
- }
- hash, err := chainhash.NewHash(scratch[:])
- if err != nil {
- return err
- }
- ret.commitHash = *hash
-
- if err := readOutpoint(r, &ret.chanPoint); err != nil {
- return err
- }
-
- if _, err := util.ReadFull(r, scratch[:]); err != nil {
- return err
- }
- chainHash, err := chainhash.NewHash(scratch[:])
- if err != nil {
- return err
- }
- ret.chainHash = *chainHash
-
- if _, err := util.ReadFull(r, scratch[:4]); err != nil {
- return err
- }
- ret.breachHeight = binary.BigEndian.Uint32(scratch[:4])
-
- nOutputsU64, err := wire.ReadVarInt(r, 0)
- if err != nil {
- return err
- }
- nOutputs := int(nOutputsU64)
-
- ret.breachedOutputs = make([]breachedOutput, nOutputs)
- for i := range ret.breachedOutputs {
- if err := ret.breachedOutputs[i].Decode(r); err != nil {
- return err
- }
- }
-
- return nil
-}
-
-// Encode serializes a breachedOutput into the passed byte stream.
-func (bo *breachedOutput) Encode(w io.Writer) er.R {
- var scratch [8]byte
-
- binary.BigEndian.PutUint64(scratch[:8], uint64(bo.amt))
- if _, err := util.Write(w, scratch[:8]); err != nil {
- return err
- }
-
- if err := writeOutpoint(w, &bo.outpoint); err != nil {
- return err
- }
-
- err := input.WriteSignDescriptor(w, &bo.signDesc)
- if err != nil {
- return err
- }
-
- err = wire.WriteVarBytes(w, 0, bo.secondLevelWitnessScript)
- if err != nil {
- return err
- }
-
- binary.BigEndian.PutUint16(scratch[:2], uint16(bo.witnessType))
- if _, err := util.Write(w, scratch[:2]); err != nil {
- return err
- }
-
- return nil
-}
-
-// Decode deserializes a breachedOutput from the passed byte stream.
-func (bo *breachedOutput) Decode(r io.Reader) er.R {
- var scratch [8]byte
-
- if _, err := util.ReadFull(r, scratch[:8]); err != nil {
- return err
- }
- bo.amt = btcutil.Amount(binary.BigEndian.Uint64(scratch[:8]))
-
- if err := readOutpoint(r, &bo.outpoint); err != nil {
- return err
- }
-
- if err := input.ReadSignDescriptor(r, &bo.signDesc); err != nil {
- return err
- }
-
- wScript, err := wire.ReadVarBytes(r, 0, 1000, "witness script")
- if err != nil {
- return err
- }
- bo.secondLevelWitnessScript = wScript
-
- if _, err := util.ReadFull(r, scratch[:2]); err != nil {
- return err
- }
- bo.witnessType = input.StandardWitnessType(
- binary.BigEndian.Uint16(scratch[:2]),
- )
-
- return nil
-}
diff --git a/lnd/breacharbiter_test.go b/lnd/breacharbiter_test.go
deleted file mode 100644
index 2e3d7500..00000000
--- a/lnd/breacharbiter_test.go
+++ /dev/null
@@ -1,2055 +0,0 @@
-// +build !rpctest
-
-package lnd
-
-import (
- "bytes"
- crand "crypto/rand"
- "crypto/sha256"
- "encoding/binary"
- "io/ioutil"
- "math/rand"
- "net"
- "os"
- "reflect"
- "sync"
- "testing"
- "time"
-
- "github.com/pkt-cash/pktd/btcec"
- "github.com/pkt-cash/pktd/btcutil"
- "github.com/pkt-cash/pktd/btcutil/er"
- "github.com/pkt-cash/pktd/btcutil/util"
- "github.com/pkt-cash/pktd/chaincfg/chainhash"
- "github.com/pkt-cash/pktd/chaincfg/globalcfg"
- "github.com/pkt-cash/pktd/lnd/chainntnfs"
- "github.com/pkt-cash/pktd/lnd/channeldb"
- "github.com/pkt-cash/pktd/lnd/htlcswitch"
- "github.com/pkt-cash/pktd/lnd/input"
- "github.com/pkt-cash/pktd/lnd/keychain"
- "github.com/pkt-cash/pktd/lnd/lntest/mock"
- "github.com/pkt-cash/pktd/lnd/lntest/wait"
- "github.com/pkt-cash/pktd/lnd/lnwallet"
- "github.com/pkt-cash/pktd/lnd/lnwallet/chainfee"
- "github.com/pkt-cash/pktd/lnd/lnwire"
- "github.com/pkt-cash/pktd/lnd/shachain"
- "github.com/pkt-cash/pktd/txscript/params"
- "github.com/pkt-cash/pktd/wire"
-)
-
-var (
- breachOutPoints = []wire.OutPoint{
- {
- Hash: [chainhash.HashSize]byte{
- 0x51, 0xb6, 0x37, 0xd8, 0xfc, 0xd2, 0xc6, 0xda,
- 0x48, 0x59, 0xe6, 0x96, 0x31, 0x13, 0xa1, 0x17,
- 0x2d, 0xe7, 0x93, 0xe4, 0xb7, 0x25, 0xb8, 0x4d,
- 0x1f, 0xb, 0x4c, 0xf9, 0x9e, 0xc5, 0x8c, 0xe9,
- },
- Index: 9,
- },
- {
- Hash: [chainhash.HashSize]byte{
- 0xb7, 0x94, 0x38, 0x5f, 0x2d, 0x1e, 0xf7, 0xab,
- 0x4d, 0x92, 0x73, 0xd1, 0x90, 0x63, 0x81, 0xb4,
- 0x4f, 0x2f, 0x6f, 0x25, 0x88, 0xa3, 0xef, 0xb9,
- 0x6a, 0x49, 0x18, 0x83, 0x31, 0x98, 0x47, 0x53,
- },
- Index: 49,
- },
- {
- Hash: [chainhash.HashSize]byte{
- 0x81, 0xb6, 0x37, 0xd8, 0xfc, 0xd2, 0xc6, 0xda,
- 0x63, 0x59, 0xe6, 0x96, 0x31, 0x13, 0xa1, 0x17,
- 0xd, 0xe7, 0x95, 0xe4, 0xb7, 0x25, 0xb8, 0x4d,
- 0x1e, 0xb, 0x4c, 0xfd, 0x9e, 0xc5, 0x8c, 0xe9,
- },
- Index: 23,
- },
- }
-
- breachKeys = [][]byte{
- {0x04, 0x11, 0xdb, 0x93, 0xe1, 0xdc, 0xdb, 0x8a,
- 0x01, 0x6b, 0x49, 0x84, 0x0f, 0x8c, 0x53, 0xbc, 0x1e,
- 0xb6, 0x8a, 0x38, 0x2e, 0x97, 0xb1, 0x48, 0x2e, 0xca,
- 0xd7, 0xb1, 0x48, 0xa6, 0x90, 0x9a, 0x5c, 0xb2, 0xe0,
- 0xea, 0xdd, 0xfb, 0x84, 0xcc, 0xf9, 0x74, 0x44, 0x64,
- 0xf8, 0x2e, 0x16, 0x0b, 0xfa, 0x9b, 0x8b, 0x64, 0xf9,
- 0xd4, 0xc0, 0x3f, 0x99, 0x9b, 0x86, 0x43, 0xf6, 0x56,
- 0xb4, 0x12, 0xa3,
- },
- {0x07, 0x11, 0xdb, 0x93, 0xe1, 0xdc, 0xdb, 0x8a,
- 0x01, 0x6b, 0x49, 0x84, 0x0f, 0x8c, 0x53, 0xbc, 0x1e,
- 0xb6, 0x8a, 0x38, 0x2e, 0x97, 0xb1, 0x48, 0x2e, 0xca,
- 0xd7, 0xb1, 0x48, 0xa6, 0x90, 0x9a, 0x5c, 0xb2, 0xe0,
- 0xea, 0xdd, 0xfb, 0x84, 0xcc, 0xf9, 0x74, 0x44, 0x64,
- 0xf8, 0x2e, 0x16, 0x0b, 0xfa, 0x9b, 0x8b, 0x64, 0xf9,
- 0xd4, 0xc0, 0x3f, 0x99, 0x9b, 0x86, 0x43, 0xf6, 0x56,
- 0xb4, 0x12, 0xa3,
- },
- {0x02, 0xce, 0x0b, 0x14, 0xfb, 0x84, 0x2b, 0x1b,
- 0xa5, 0x49, 0xfd, 0xd6, 0x75, 0xc9, 0x80, 0x75, 0xf1,
- 0x2e, 0x9c, 0x51, 0x0f, 0x8e, 0xf5, 0x2b, 0xd0, 0x21,
- 0xa9, 0xa1, 0xf4, 0x80, 0x9d, 0x3b, 0x4d,
- },
- {0x02, 0xce, 0x0b, 0x14, 0xfb, 0x84, 0x2b, 0x1b,
- 0x2e, 0x9c, 0x51, 0x0f, 0x8e, 0xf5, 0x2b, 0xd0, 0x21,
- 0xa5, 0x49, 0xfd, 0xd6, 0x75, 0xc9, 0x80, 0x75, 0xf1,
- 0xa3, 0xa1, 0xf4, 0x80, 0x9d, 0x3b, 0x4d,
- },
- }
-
- breachedOutputs = []breachedOutput{
- {
- amt: btcutil.Amount(1e7),
- outpoint: breachOutPoints[0],
- witnessType: input.CommitmentNoDelay,
- signDesc: input.SignDescriptor{
- SingleTweak: []byte{
- 0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
- 0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
- 0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
- 0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
- 0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
- 0x02, 0x02,
- },
- WitnessScript: []byte{
- 0x00, 0x14, 0xee, 0x91, 0x41, 0x7e,
- 0x85, 0x6c, 0xde, 0x10, 0xa2, 0x91,
- 0x1e, 0xdc, 0xbd, 0xbd, 0x69, 0xe2,
- 0xef, 0xb5, 0x71, 0x48,
- },
- Output: &wire.TxOut{
- Value: 5000000000,
- PkScript: []byte{
- 0x41, // OP_DATA_65
- 0x04, 0xd6, 0x4b, 0xdf, 0xd0,
- 0x9e, 0xb1, 0xc5, 0xfe, 0x29,
- 0x5a, 0xbd, 0xeb, 0x1d, 0xca,
- 0x42, 0x81, 0xbe, 0x98, 0x8e,
- 0x2d, 0xa0, 0xb6, 0xc1, 0xc6,
- 0xa5, 0x9d, 0xc2, 0x26, 0xc2,
- 0x86, 0x24, 0xe1, 0x81, 0x75,
- 0xe8, 0x51, 0xc9, 0x6b, 0x97,
- 0x3d, 0x81, 0xb0, 0x1c, 0xc3,
- 0x1f, 0x04, 0x78, 0x34, 0xbc,
- 0x06, 0xd6, 0xd6, 0xed, 0xf6,
- 0x20, 0xd1, 0x84, 0x24, 0x1a,
- 0x6a, 0xed, 0x8b, 0x63,
- 0xa6, // 65-byte signature
- 0xac, // OP_CHECKSIG
- },
- },
- HashType: params.SigHashAll,
- },
- secondLevelWitnessScript: breachKeys[0],
- },
- {
- amt: btcutil.Amount(1e7),
- outpoint: breachOutPoints[0],
- witnessType: input.CommitSpendNoDelayTweakless,
- signDesc: input.SignDescriptor{
- WitnessScript: []byte{
- 0x00, 0x14, 0xee, 0x91, 0x41, 0x7e,
- 0x85, 0x6c, 0xde, 0x10, 0xa2, 0x91,
- 0x1e, 0xdc, 0xbd, 0xbd, 0x69, 0xe2,
- 0xef, 0xb5, 0x71, 0x48,
- },
- Output: &wire.TxOut{
- Value: 5000000000,
- PkScript: []byte{
- 0x41, // OP_DATA_65
- 0x04, 0xd6, 0x4b, 0xdf, 0xd0,
- 0x9e, 0xb1, 0xc5, 0xfe, 0x29,
- 0x5a, 0xbd, 0xeb, 0x1d, 0xca,
- 0x42, 0x81, 0xbe, 0x98, 0x8e,
- 0x2d, 0xa0, 0xb6, 0xc1, 0xc6,
- 0xa5, 0x9d, 0xc2, 0x26, 0xc2,
- 0x86, 0x24, 0xe1, 0x81, 0x75,
- 0xe8, 0x51, 0xc9, 0x6b, 0x97,
- 0x3d, 0x81, 0xb0, 0x1c, 0xc3,
- 0x1f, 0x04, 0x78, 0x34, 0xbc,
- 0x06, 0xd6, 0xd6, 0xed, 0xf6,
- 0x20, 0xd1, 0x84, 0x24, 0x1a,
- 0x6a, 0xed, 0x8b, 0x63,
- 0xa6, // 65-byte signature
- 0xac, // OP_CHECKSIG
- },
- },
- HashType: params.SigHashAll,
- },
- secondLevelWitnessScript: breachKeys[0],
- },
- {
- amt: btcutil.Amount(2e9),
- outpoint: breachOutPoints[1],
- witnessType: input.CommitmentRevoke,
- signDesc: input.SignDescriptor{
- SingleTweak: []byte{
- 0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
- 0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
- 0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
- 0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
- 0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
- 0x02, 0x02,
- },
- WitnessScript: []byte{
- 0x00, 0x14, 0xee, 0x91, 0x41, 0x7e,
- 0x85, 0x6c, 0xde, 0x10, 0xa2, 0x91,
- 0x1e, 0xdc, 0xbd, 0xbd, 0x69, 0xe2,
- 0xef, 0xb5, 0x71, 0x48,
- },
- Output: &wire.TxOut{
- Value: 5000000000,
- PkScript: []byte{
- 0x41, // OP_DATA_65
- 0x04, 0xd6, 0x4b, 0xdf, 0xd0,
- 0x9e, 0xb1, 0xc5, 0xfe, 0x29,
- 0x5a, 0xbd, 0xeb, 0x1d, 0xca,
- 0x42, 0x81, 0xbe, 0x98, 0x8e,
- 0x2d, 0xa0, 0xb6, 0xc1, 0xc6,
- 0xa5, 0x9d, 0xc2, 0x26, 0xc2,
- 0x86, 0x24, 0xe1, 0x81, 0x75,
- 0xe8, 0x51, 0xc9, 0x6b, 0x97,
- 0x3d, 0x81, 0xb0, 0x1c, 0xc3,
- 0x1f, 0x04, 0x78, 0x34, 0xbc,
- 0x06, 0xd6, 0xd6, 0xed, 0xf6,
- 0x20, 0xd1, 0x84, 0x24, 0x1a,
- 0x6a, 0xed, 0x8b, 0x63,
- 0xa6, // 65-byte signature
- 0xac, // OP_CHECKSIG
- },
- },
- HashType: params.SigHashAll,
- },
- secondLevelWitnessScript: breachKeys[0],
- },
- {
- amt: btcutil.Amount(3e4),
- outpoint: breachOutPoints[2],
- witnessType: input.CommitmentDelayOutput,
- signDesc: input.SignDescriptor{
- SingleTweak: []byte{
- 0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
- 0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
- 0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
- 0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
- 0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
- 0x02, 0x02,
- },
- WitnessScript: []byte{
- 0x00, 0x14, 0xee, 0x91, 0x41, 0x7e,
- 0x85, 0x6c, 0xde, 0x10, 0xa2, 0x91,
- 0x1e, 0xdc, 0xbd, 0xbd, 0x69, 0xe2,
- 0xef, 0xb5, 0x71, 0x48,
- },
- Output: &wire.TxOut{
- Value: 5000000000,
- PkScript: []byte{
- 0x41, // OP_DATA_65
- 0x04, 0xd6, 0x4b, 0xdf, 0xd0,
- 0x9e, 0xb1, 0xc5, 0xfe, 0x29,
- 0x5a, 0xbd, 0xeb, 0x1d, 0xca,
- 0x42, 0x81, 0xbe, 0x98, 0x8e,
- 0x2d, 0xa0, 0xb6, 0xc1, 0xc6,
- 0xa5, 0x9d, 0xc2, 0x26, 0xc2,
- 0x86, 0x24, 0xe1, 0x81, 0x75,
- 0xe8, 0x51, 0xc9, 0x6b, 0x97,
- 0x3d, 0x81, 0xb0, 0x1c, 0xc3,
- 0x1f, 0x04, 0x78, 0x34, 0xbc,
- 0x06, 0xd6, 0xd6, 0xed, 0xf6,
- 0x20, 0xd1, 0x84, 0x24, 0x1a,
- 0x6a, 0xed, 0x8b, 0x63,
- 0xa6, // 65-byte signature
- 0xac, // OP_CHECKSIG
- },
- },
- HashType: params.SigHashAll,
- },
- secondLevelWitnessScript: breachKeys[0],
- },
- }
-
- retributionMap = make(map[wire.OutPoint]retributionInfo)
- retributions = []retributionInfo{
- {
- commitHash: [chainhash.HashSize]byte{
- 0xb7, 0x94, 0x38, 0x5f, 0x2d, 0x1e, 0xf7, 0xab,
- 0x4d, 0x92, 0x73, 0xd1, 0x90, 0x63, 0x81, 0xb4,
- 0x4f, 0x2f, 0x6f, 0x25, 0x88, 0xa3, 0xef, 0xb9,
- 0x6a, 0x49, 0x18, 0x83, 0x31, 0x98, 0x47, 0x53,
- },
- chainHash: [chainhash.HashSize]byte{
- 0x4d, 0x92, 0x73, 0xd1, 0x90, 0x63, 0x81, 0xb4,
- 0x4f, 0x2f, 0x6f, 0x25, 0x88, 0xa3, 0xef, 0xb9,
- 0xb7, 0x94, 0x38, 0x5f, 0x2d, 0x1e, 0xf7, 0xab,
- 0x6b, 0x49, 0x18, 0x83, 0x31, 0x98, 0x47, 0x53,
- },
- chanPoint: breachOutPoints[0],
- breachHeight: 337,
- // Set to breachedOutputs 0 and 1 in init()
- breachedOutputs: []breachedOutput{{}, {}},
- },
- {
- commitHash: [chainhash.HashSize]byte{
- 0x51, 0xb6, 0x37, 0xd8, 0xfc, 0xd2, 0xc6, 0xda,
- 0x48, 0x59, 0xe6, 0x96, 0x31, 0x13, 0xa1, 0x17,
- 0x2d, 0xe7, 0x93, 0xe4, 0xb7, 0x25, 0xb8, 0x4d,
- 0x1f, 0xb, 0x4c, 0xf9, 0x9e, 0xc5, 0x8c, 0xe9,
- },
- chainHash: [chainhash.HashSize]byte{
- 0x4f, 0x2f, 0x6f, 0x25, 0x88, 0xa3, 0xef, 0xb9,
- 0xb7, 0x94, 0x39, 0x5f, 0x2d, 0x1e, 0xf7, 0xab,
- 0x6b, 0x49, 0x18, 0x83, 0x31, 0x98, 0x47, 0x53,
- 0x4d, 0x92, 0x73, 0xd1, 0x90, 0x63, 0x81, 0xb4,
- },
- chanPoint: breachOutPoints[1],
- breachHeight: 420420,
- // Set to breachedOutputs 1 and 2 in init()
- breachedOutputs: []breachedOutput{{}, {}},
- },
- }
-)
-
-func init() {
- // Ensure that breached outputs are initialized before starting tests.
- if err := initBreachedOutputs(); err != nil {
- panic(err)
- }
-
- // Populate a retribution map to for convenience, to allow lookups by
- // channel point.
- for i := range retributions {
- retInfo := &retributions[i]
- retInfo.breachedOutputs[0] = breachedOutputs[i]
- retInfo.breachedOutputs[1] = breachedOutputs[i+1]
-
- retributionMap[retInfo.chanPoint] = *retInfo
-
- }
-}
-
-// FailingRetributionStore wraps a RetributionStore and supports controlled
-// restarts of the persistent instance. This allows us to test (1) that no
-// modifications to the entries are made between calls or through side effects,
-// and (2) that the database is actually being persisted between actions.
-type FailingRetributionStore interface {
- RetributionStore
-
- Restart()
-}
-
-// failingRetributionStore is a concrete implementation of a
-// FailingRetributionStore. It wraps an underlying RetributionStore and is
-// parameterized entirely by a restart function, which is intended to simulate a
-// full stop/start of the store.
-type failingRetributionStore struct {
- mu sync.Mutex
-
- rs RetributionStore
-
- nextAddErr er.R
-
- restart func() RetributionStore
-}
-
-// newFailingRetributionStore creates a new failing retribution store. The given
-// restart closure should ensure that it is reloading its contents from the
-// persistent source.
-func newFailingRetributionStore(
- restart func() RetributionStore) *failingRetributionStore {
-
- return &failingRetributionStore{
- mu: sync.Mutex{},
- rs: restart(),
- restart: restart,
- }
-}
-
-// FailNextAdd instructs the retribution store to return the provided error. If
-// the error is nil, a generic default will be used.
-func (frs *failingRetributionStore) FailNextAdd(err er.R) {
- if err == nil {
- err = er.New("retribution store failed")
- }
-
- frs.mu.Lock()
- frs.nextAddErr = err
- frs.mu.Unlock()
-}
-
-func (frs *failingRetributionStore) Restart() {
- frs.mu.Lock()
- frs.rs = frs.restart()
- frs.mu.Unlock()
-}
-
-// Add forwards the call to the underlying retribution store, unless this Add
-// has been previously instructed to fail.
-func (frs *failingRetributionStore) Add(retInfo *retributionInfo) er.R {
- frs.mu.Lock()
- defer frs.mu.Unlock()
-
- if frs.nextAddErr != nil {
- err := frs.nextAddErr
- frs.nextAddErr = nil
- return err
- }
-
- return frs.rs.Add(retInfo)
-}
-
-func (frs *failingRetributionStore) IsBreached(chanPoint *wire.OutPoint) (bool, er.R) {
- frs.mu.Lock()
- defer frs.mu.Unlock()
-
- return frs.rs.IsBreached(chanPoint)
-}
-
-func (frs *failingRetributionStore) Finalize(chanPoint *wire.OutPoint,
- finalTx *wire.MsgTx) er.R {
-
- frs.mu.Lock()
- defer frs.mu.Unlock()
-
- return frs.rs.Finalize(chanPoint, finalTx)
-}
-
-func (frs *failingRetributionStore) GetFinalizedTxn(
- chanPoint *wire.OutPoint) (*wire.MsgTx, er.R) {
-
- frs.mu.Lock()
- defer frs.mu.Unlock()
-
- return frs.rs.GetFinalizedTxn(chanPoint)
-}
-
-func (frs *failingRetributionStore) Remove(key *wire.OutPoint) er.R {
- frs.mu.Lock()
- defer frs.mu.Unlock()
-
- return frs.rs.Remove(key)
-}
-
-func (frs *failingRetributionStore) ForAll(cb func(*retributionInfo) er.R,
- reset func()) er.R {
-
- frs.mu.Lock()
- defer frs.mu.Unlock()
-
- return frs.rs.ForAll(cb, reset)
-}
-
-// Parse the pubkeys in the breached outputs.
-func initBreachedOutputs() er.R {
- for i := range breachedOutputs {
- bo := &breachedOutputs[i]
-
- // Parse the sign descriptor's pubkey.
- pubkey, err := btcec.ParsePubKey(breachKeys[i], btcec.S256())
- if err != nil {
- return er.Errorf("unable to parse pubkey: %v",
- breachKeys[i])
- }
- bo.signDesc.KeyDesc.PubKey = pubkey
- }
-
- return nil
-}
-
-// Test that breachedOutput Encode/Decode works.
-func TestBreachedOutputSerialization(t *testing.T) {
- for i := range breachedOutputs {
- bo := &breachedOutputs[i]
-
- var buf bytes.Buffer
-
- if err := bo.Encode(&buf); err != nil {
- t.Fatalf("unable to serialize breached output [%v]: %v",
- i, err)
- }
-
- desBo := &breachedOutput{}
- if err := desBo.Decode(&buf); err != nil {
- t.Fatalf("unable to deserialize "+
- "breached output [%v]: %v", i, err)
- }
-
- if !reflect.DeepEqual(bo, desBo) {
- t.Fatalf("original and deserialized "+
- "breached outputs not equal:\n"+
- "original : %+v\n"+
- "deserialized : %+v\n",
- bo, desBo)
- }
- }
-}
-
-// Test that retribution Encode/Decode works.
-func TestRetributionSerialization(t *testing.T) {
- for i := range retributions {
- ret := &retributions[i]
-
- var buf bytes.Buffer
-
- if err := ret.Encode(&buf); err != nil {
- t.Fatalf("unable to serialize retribution [%v]: %v",
- i, err)
- }
-
- desRet := &retributionInfo{}
- if err := desRet.Decode(&buf); err != nil {
- t.Fatalf("unable to deserialize retribution [%v]: %v",
- i, err)
- }
-
- if !reflect.DeepEqual(ret, desRet) {
- t.Fatalf("original and deserialized "+
- "retribution infos not equal:\n"+
- "original : %+v\n"+
- "deserialized : %+v\n",
- ret, desRet)
- }
- }
-}
-
-// copyRetInfo creates a complete copy of the given retributionInfo.
-func copyRetInfo(retInfo *retributionInfo) *retributionInfo {
- nOutputs := len(retInfo.breachedOutputs)
-
- ret := &retributionInfo{
- commitHash: retInfo.commitHash,
- chainHash: retInfo.chainHash,
- chanPoint: retInfo.chanPoint,
- breachHeight: retInfo.breachHeight,
- breachedOutputs: make([]breachedOutput, nOutputs),
- }
-
- for i := range retInfo.breachedOutputs {
- ret.breachedOutputs[i] = retInfo.breachedOutputs[i]
- }
-
- return ret
-}
-
-// mockRetributionStore implements the RetributionStore interface and is backed
-// by an in-memory map. Access to the internal state is provided by a mutex.
-// TODO(cfromknecht) extend to support and test controlled failures.
-type mockRetributionStore struct {
- mu sync.Mutex
- state map[wire.OutPoint]*retributionInfo
- finalTxs map[wire.OutPoint]*wire.MsgTx
-}
-
-func newMockRetributionStore() *mockRetributionStore {
- return &mockRetributionStore{
- mu: sync.Mutex{},
- state: make(map[wire.OutPoint]*retributionInfo),
- finalTxs: make(map[wire.OutPoint]*wire.MsgTx),
- }
-}
-
-func (rs *mockRetributionStore) Add(retInfo *retributionInfo) er.R {
- rs.mu.Lock()
- rs.state[retInfo.chanPoint] = copyRetInfo(retInfo)
- rs.mu.Unlock()
-
- return nil
-}
-
-func (rs *mockRetributionStore) IsBreached(chanPoint *wire.OutPoint) (bool, er.R) {
- rs.mu.Lock()
- _, ok := rs.state[*chanPoint]
- rs.mu.Unlock()
-
- return ok, nil
-}
-
-func (rs *mockRetributionStore) Finalize(chanPoint *wire.OutPoint,
- finalTx *wire.MsgTx) er.R {
-
- rs.mu.Lock()
- rs.finalTxs[*chanPoint] = finalTx
- rs.mu.Unlock()
-
- return nil
-}
-
-func (rs *mockRetributionStore) GetFinalizedTxn(
- chanPoint *wire.OutPoint) (*wire.MsgTx, er.R) {
-
- rs.mu.Lock()
- finalTx := rs.finalTxs[*chanPoint]
- rs.mu.Unlock()
-
- return finalTx, nil
-}
-
-func (rs *mockRetributionStore) Remove(key *wire.OutPoint) er.R {
- rs.mu.Lock()
- delete(rs.state, *key)
- delete(rs.finalTxs, *key)
- rs.mu.Unlock()
-
- return nil
-}
-
-func (rs *mockRetributionStore) ForAll(cb func(*retributionInfo) er.R,
- reset func()) er.R {
-
- rs.mu.Lock()
- defer rs.mu.Unlock()
-
- reset()
- for _, retInfo := range rs.state {
- if err := cb(copyRetInfo(retInfo)); err != nil {
- return err
- }
- }
-
- return nil
-}
-
-var retributionStoreTestSuite = []struct {
- name string
- test func(FailingRetributionStore, *testing.T)
-}{
- {
- "Initialization",
- testRetributionStoreInit,
- },
- {
- "Add/Remove",
- testRetributionStoreAddRemove,
- },
- {
- "Persistence",
- testRetributionStorePersistence,
- },
- {
- "Overwrite",
- testRetributionStoreOverwrite,
- },
- {
- "RemoveEmpty",
- testRetributionStoreRemoveEmpty,
- },
-}
-
-// TestMockRetributionStore instantiates a mockRetributionStore and tests its
-// behavior using the general RetributionStore test suite.
-func TestMockRetributionStore(t *testing.T) {
- for _, test := range retributionStoreTestSuite {
- t.Run(
- "mockRetributionStore."+test.name,
- func(tt *testing.T) {
- mrs := newMockRetributionStore()
- frs := newFailingRetributionStore(
- func() RetributionStore { return mrs },
- )
- test.test(frs, tt)
- },
- )
- }
-}
-
-func makeTestChannelDB() (*channeldb.DB, func(), er.R) {
- // First, create a temporary directory to be used for the duration of
- // this test.
- tempDirName, errr := ioutil.TempDir("", "channeldb")
- if errr != nil {
- return nil, nil, er.E(errr)
- }
-
- cleanUp := func() {
- os.RemoveAll(tempDirName)
- }
-
- db, err := channeldb.Open(tempDirName)
- if err != nil {
- cleanUp()
- return nil, nil, err
- }
-
- return db, cleanUp, nil
-}
-
-// TestChannelDBRetributionStore instantiates a retributionStore backed by a
-// channeldb.DB, and tests its behavior using the general RetributionStore test
-// suite.
-func TestChannelDBRetributionStore(t *testing.T) {
- // Finally, instantiate retribution store and execute RetributionStore
- // test suite.
- for _, test := range retributionStoreTestSuite {
- t.Run(
- "channeldbDBRetributionStore."+test.name,
- func(tt *testing.T) {
- db, cleanUp, err := makeTestChannelDB()
- if err != nil {
- t.Fatalf("unable to open channeldb: %v", err)
- }
- defer db.Close()
- defer cleanUp()
-
- restartDb := func() RetributionStore {
- // Close and reopen channeldb
- if err = db.Close(); err != nil {
- t.Fatalf("unable to close "+
- "channeldb during "+
- "restart: %v",
- err)
- }
- db, err = channeldb.Open(db.Path())
- if err != nil {
- t.Fatalf("unable to open "+
- "channeldb: %v", err)
- }
-
- return newRetributionStore(db)
- }
-
- frs := newFailingRetributionStore(restartDb)
- test.test(frs, tt)
- },
- )
- }
-}
-
-// countRetributions uses a retribution store's ForAll to count the number of
-// elements emitted from the store.
-func countRetributions(t *testing.T, rs RetributionStore) int {
- count := 0
- err := rs.ForAll(func(_ *retributionInfo) er.R {
- count++
- return nil
- }, func() {
- count = 0
- })
- if err != nil {
- t.Fatalf("unable to list retributions in db: %v", err)
- }
- return count
-}
-
-// testRetributionStoreAddRemove executes a generic test suite for any concrete
-// implementation of the RetributionStore interface. This test adds all
-// retributions to the store, confirms that they are all present, and then
-// removes each one individually. Between each addition or removal, the number
-// of elements in the store is checked to ensure that it only changes by one.
-func testRetributionStoreAddRemove(frs FailingRetributionStore, t *testing.T) {
- // Make sure that a new retribution store is actually empty.
- if count := countRetributions(t, frs); count != 0 {
- t.Fatalf("expected 0 retributions, found %v", count)
- }
-
- // Add all retributions, check that ForAll returns the correct
- // information, and then remove all retributions.
- testRetributionStoreAdds(frs, t, false)
- testRetributionStoreForAll(frs, t, false)
- testRetributionStoreRemoves(frs, t, false)
-}
-
-// testRetributionStorePersistence executes the same general test as
-// testRetributionStoreAddRemove, except that it also restarts the store between
-// each operation to ensure that the results are properly persisted.
-func testRetributionStorePersistence(frs FailingRetributionStore, t *testing.T) {
- // Make sure that a new retribution store is still empty after failing
- // right off the bat.
- frs.Restart()
- if count := countRetributions(t, frs); count != 0 {
- t.Fatalf("expected 1 retributions, found %v", count)
- }
-
- // Insert all retributions into the database, restarting and checking
- // between subsequent calls to test that each intermediate additions are
- // persisted.
- testRetributionStoreAdds(frs, t, true)
-
- // After all retributions have been inserted, verify that the store
- // emits a distinct set of retributions that are equivalent to the test
- // vector.
- testRetributionStoreForAll(frs, t, true)
-
- // Remove all retributions from the database, restarting and checking
- // between subsequent calls to test that each intermediate removals are
- // persisted.
- testRetributionStoreRemoves(frs, t, true)
-}
-
-// testRetributionStoreInit ensures that a retribution store is always
-// initialized with no retributions.
-func testRetributionStoreInit(frs FailingRetributionStore, t *testing.T) {
- // Make sure that a new retribution store starts empty.
- if count := countRetributions(t, frs); count != 0 {
- t.Fatalf("expected 0 retributions, found %v", count)
- }
-}
-
-// testRetributionStoreRemoveEmpty ensures that a retribution store will not
-// fail or panic if it is instructed to remove an entry while empty.
-func testRetributionStoreRemoveEmpty(frs FailingRetributionStore, t *testing.T) {
- testRetributionStoreRemoves(frs, t, false)
-}
-
-// testRetributionStoreOverwrite ensures that attempts to write retribution
-// information regarding a channel point that already exists does not change the
-// total number of entries held by the retribution store.
-func testRetributionStoreOverwrite(frs FailingRetributionStore, t *testing.T) {
- // Initially, add all retributions to store.
- testRetributionStoreAdds(frs, t, false)
-
- // Overwrite the initial entries again.
- for i, retInfo := range retributions {
- if err := frs.Add(&retInfo); err != nil {
- t.Fatalf("unable to add to retribution %v to store: %v",
- i, err)
- }
- }
-
- // Check that retribution store still has 2 entries.
- if count := countRetributions(t, frs); count != 2 {
- t.Fatalf("expected 2 retributions, found %v", count)
- }
-}
-
-// testRetributionStoreAdds adds all of the test retributions to the database,
-// ensuring that the total number of elements increases by exactly 1 after each
-// operation. If the `failing` flag is provide, the test will restart the
-// database and confirm that the delta is still 1.
-func testRetributionStoreAdds(
- frs FailingRetributionStore,
- t *testing.T,
- failing bool) {
-
- // Iterate over retributions, adding each from the store. If we are
- // testing the store under failures, we restart the store and verify
- // that the contents are the same.
- for i, retInfo := range retributions {
- // Snapshot number of entries before and after the addition.
- nbefore := countRetributions(t, frs)
- if err := frs.Add(&retInfo); err != nil {
- t.Fatalf("unable to add to retribution %v to store: %v",
- i, err)
- }
- nafter := countRetributions(t, frs)
-
- // Check that only one retribution was added.
- if nafter-nbefore != 1 {
- t.Fatalf("expected %v retributions, found %v",
- nbefore+1, nafter)
- }
-
- if failing {
- frs.Restart()
-
- // Check that retribution store has persisted addition
- // after restarting.
- nrestart := countRetributions(t, frs)
- if nrestart-nbefore != 1 {
- t.Fatalf("expected %v retributions, found %v",
- nbefore+1, nrestart)
- }
- }
- }
-}
-
-// testRetributionStoreRemoves removes all of the test retributions to the
-// database, ensuring that the total number of elements decreases by exactly 1
-// after each operation. If the `failing` flag is provide, the test will
-// restart the database and confirm that the delta is the same.
-func testRetributionStoreRemoves(
- frs FailingRetributionStore,
- t *testing.T,
- failing bool) {
-
- // Iterate over retributions, removing each from the store. If we are
- // testing the store under failures, we restart the store and verify
- // that the contents are the same.
- for i, retInfo := range retributions {
- // Snapshot number of entries before and after the removal.
- nbefore := countRetributions(t, frs)
- err := frs.Remove(&retInfo.chanPoint)
- switch {
- case nbefore == 0 && err == nil:
-
- case nbefore > 0 && err != nil:
- t.Fatalf("unable to remove to retribution %v "+
- "from store: %v", i, err)
- }
- nafter := countRetributions(t, frs)
-
- // If the store is empty, increment nbefore to simulate the
- // removal of one element.
- if nbefore == 0 {
- nbefore++
- }
-
- // Check that only one retribution was removed.
- if nbefore-nafter != 1 {
- t.Fatalf("expected %v retributions, found %v",
- nbefore-1, nafter)
- }
-
- if failing {
- frs.Restart()
-
- // Check that retribution store has persisted removal
- // after restarting.
- nrestart := countRetributions(t, frs)
- if nbefore-nrestart != 1 {
- t.Fatalf("expected %v retributions, found %v",
- nbefore-1, nrestart)
- }
- }
- }
-}
-
-// testRetributionStoreForAll iterates over the current entries in the
-// retribution store, ensuring that each entry in the database is unique, and
-// corresponds to exactly one of the entries in the test vector. If the
-// `failing` flag is provide, the test will restart the database and confirm
-// that the entries again validate against the test vectors.
-func testRetributionStoreForAll(
- frs FailingRetributionStore,
- t *testing.T,
- failing bool) {
-
- // nrets is the number of retributions in the test vector
- nrets := len(retributions)
-
- // isRestart indicates whether or not the database has been restarted.
- // When testing for failures, this allows the test case to make a second
- // attempt without causing a subsequent restart on the second pass.
- var isRestart bool
-
-restartCheck:
- // Construct a set of all channel points presented by the store. Entries
- // are only be added to the set if their corresponding retribution
- // information matches the test vector.
- var foundSet map[wire.OutPoint]struct{}
-
- // Iterate through the stored retributions, checking to see if we have
- // an equivalent retribution in the test vector. This will return an
- // error unless all persisted retributions exist in the test vector.
- if err := frs.ForAll(func(ret *retributionInfo) er.R {
- // Fetch the retribution information from the test vector. If
- // the entry does not exist, the test returns an error.
- if exRetInfo, ok := retributionMap[ret.chanPoint]; ok {
- // Compare the presented retribution information with
- // the expected value, fail if they are inconsistent.
- if !reflect.DeepEqual(ret, &exRetInfo) {
- return er.Errorf("unexpected retribution "+
- "retrieved from db --\n"+
- "want: %#v\ngot: %#v", exRetInfo, ret,
- )
- }
-
- // Retribution information from database matches the
- // test vector, record the channel point in the found
- // map.
- foundSet[ret.chanPoint] = struct{}{}
-
- } else {
- return er.Errorf("unknown retribution retrieved "+
- "from db: %v", ret)
- }
-
- return nil
- }, func() {
- foundSet = make(map[wire.OutPoint]struct{})
- }); err != nil {
- t.Fatalf("failed to iterate over persistent retributions: %v",
- err)
- }
-
- // Check that retribution store emits nrets entries
- if count := countRetributions(t, frs); count != nrets {
- t.Fatalf("expected %v retributions, found %v", nrets, count)
- }
-
- // Confirm that all of the retributions emitted from the iteration
- // correspond to unique channel points.
- nunique := len(foundSet)
- if nunique != nrets {
- t.Fatalf("expected %v unique retributions, only found %v",
- nrets, nunique)
- }
-
- // If in failure mode on only on first pass, restart the database and
- // rexecute the test.
- if failing && !isRestart {
- frs.Restart()
- isRestart = true
-
- goto restartCheck
- }
-}
-
-func initBreachedState(t *testing.T) (*breachArbiter,
- *lnwallet.LightningChannel, *lnwallet.LightningChannel,
- *lnwallet.LocalForceCloseSummary, chan *ContractBreachEvent,
- func(), func()) {
- // Create a pair of channels using a notifier that allows us to signal
- // a spend of the funding transaction. Alice's channel will be the on
- // observing a breach.
- alice, bob, cleanUpChans, err := createInitChannels(1)
- if err != nil {
- t.Fatalf("unable to create test channels: %v", err)
- }
-
- // Instantiate a breach arbiter to handle the breach of alice's channel.
- contractBreaches := make(chan *ContractBreachEvent)
-
- brar, cleanUpArb, err := createTestArbiter(
- t, contractBreaches, alice.State().Db,
- )
- if err != nil {
- t.Fatalf("unable to initialize test breach arbiter: %v", err)
- }
-
- // Send one HTLC to Bob and perform a state transition to lock it in.
- htlcAmount := lnwire.NewMSatFromSatoshis(20000)
- htlc, _ := createHTLC(0, htlcAmount)
- if _, err := alice.AddHTLC(htlc, nil); err != nil {
- t.Fatalf("alice unable to add htlc: %v", err)
- }
- if _, err := bob.ReceiveHTLC(htlc); err != nil {
- t.Fatalf("bob unable to recv add htlc: %v", err)
- }
- if err := forceStateTransition(alice, bob); err != nil {
- t.Fatalf("Can't update the channel state: %v", err)
- }
-
- // Generate the force close summary at this point in time, this will
- // serve as the old state bob will broadcast.
- bobClose, err := bob.ForceClose()
- if err != nil {
- t.Fatalf("unable to force close bob's channel: %v", err)
- }
-
- // Now send another HTLC and perform a state transition, this ensures
- // Alice is ahead of the state Bob will broadcast.
- htlc2, _ := createHTLC(1, htlcAmount)
- if _, err := alice.AddHTLC(htlc2, nil); err != nil {
- t.Fatalf("alice unable to add htlc: %v", err)
- }
- if _, err := bob.ReceiveHTLC(htlc2); err != nil {
- t.Fatalf("bob unable to recv add htlc: %v", err)
- }
- if err := forceStateTransition(alice, bob); err != nil {
- t.Fatalf("Can't update the channel state: %v", err)
- }
-
- return brar, alice, bob, bobClose, contractBreaches, cleanUpChans,
- cleanUpArb
-}
-
-// TestBreachHandoffSuccess tests that a channel's close observer properly
-// delivers retribution information to the breach arbiter in response to a
-// breach close. This test verifies correctness in the event that the handoff
-// experiences no interruptions.
-func TestBreachHandoffSuccess(t *testing.T) {
- brar, alice, _, bobClose, contractBreaches,
- cleanUpChans, cleanUpArb := initBreachedState(t)
- defer cleanUpChans()
- defer cleanUpArb()
-
- chanPoint := alice.ChanPoint
-
- // Signal a spend of the funding transaction and wait for the close
- // observer to exit.
- breach := &ContractBreachEvent{
- ChanPoint: *chanPoint,
- ProcessACK: make(chan er.R, 1),
- BreachRetribution: &lnwallet.BreachRetribution{
- BreachTransaction: bobClose.CloseTx,
- LocalOutputSignDesc: &input.SignDescriptor{
- Output: &wire.TxOut{
- PkScript: breachKeys[0],
- },
- },
- },
- }
- contractBreaches <- breach
-
- // We'll also wait to consume the ACK back from the breach arbiter.
- select {
- case err := <-breach.ProcessACK:
- if err != nil {
- t.Fatalf("handoff failed: %v", err)
- }
- case <-time.After(time.Second * 15):
- t.Fatalf("breach arbiter didn't send ack back")
- }
-
- // After exiting, the breach arbiter should have persisted the
- // retribution information and the channel should be shown as pending
- // force closed.
- assertArbiterBreach(t, brar, chanPoint)
-
- // Send another breach event. Since the handoff for this channel was
- // already ACKed, the breach arbiter should immediately ACK and ignore
- // this event.
- breach = &ContractBreachEvent{
- ChanPoint: *chanPoint,
- ProcessACK: make(chan er.R, 1),
- BreachRetribution: &lnwallet.BreachRetribution{
- BreachTransaction: bobClose.CloseTx,
- LocalOutputSignDesc: &input.SignDescriptor{
- Output: &wire.TxOut{
- PkScript: breachKeys[0],
- },
- },
- },
- }
-
- contractBreaches <- breach
-
- // We'll also wait to consume the ACK back from the breach arbiter.
- select {
- case err := <-breach.ProcessACK:
- if err != nil {
- t.Fatalf("handoff failed: %v", err)
- }
- case <-time.After(time.Second * 15):
- t.Fatalf("breach arbiter didn't send ack back")
- }
-
- // State should not have changed.
- assertArbiterBreach(t, brar, chanPoint)
-}
-
-// TestBreachHandoffFail tests that a channel's close observer properly
-// delivers retribution information to the breach arbiter in response to a
-// breach close. This test verifies correctness in the event that the breach
-// arbiter fails to write the information to disk, and that a subsequent attempt
-// at the handoff succeeds.
-func TestBreachHandoffFail(t *testing.T) {
- brar, alice, _, bobClose, contractBreaches,
- cleanUpChans, cleanUpArb := initBreachedState(t)
- defer cleanUpChans()
- defer cleanUpArb()
-
- // Before alerting Alice of the breach, instruct our failing retribution
- // store to fail the next database operation, which we expect to write
- // the information handed off by the channel's close observer.
- fstore := brar.cfg.Store.(*failingRetributionStore)
- fstore.FailNextAdd(nil)
-
- // Signal the notifier to dispatch spend notifications of the funding
- // transaction using the transaction from bob's closing summary.
- chanPoint := alice.ChanPoint
- breach := &ContractBreachEvent{
- ChanPoint: *chanPoint,
- ProcessACK: make(chan er.R, 1),
- BreachRetribution: &lnwallet.BreachRetribution{
- BreachTransaction: bobClose.CloseTx,
- LocalOutputSignDesc: &input.SignDescriptor{
- Output: &wire.TxOut{
- PkScript: breachKeys[0],
- },
- },
- },
- }
- contractBreaches <- breach
-
- // We'll also wait to consume the ACK back from the breach arbiter.
- select {
- case err := <-breach.ProcessACK:
- if err == nil {
- t.Fatalf("breach write should have failed")
- }
- case <-time.After(time.Second * 15):
- t.Fatalf("breach arbiter didn't send ack back")
- }
-
- // Since the handoff failed, the breach arbiter should not show the
- // channel as breached, and the channel should also not have been marked
- // pending closed.
- assertNoArbiterBreach(t, brar, chanPoint)
- assertNotPendingClosed(t, alice)
-
- brar, cleanUpArb, err := createTestArbiter(
- t, contractBreaches, alice.State().Db,
- )
- if err != nil {
- t.Fatalf("unable to initialize test breach arbiter: %v", err)
- }
- defer cleanUpArb()
-
- // Signal a spend of the funding transaction and wait for the close
- // observer to exit. This time we are allowing the handoff to succeed.
- breach = &ContractBreachEvent{
- ChanPoint: *chanPoint,
- ProcessACK: make(chan er.R, 1),
- BreachRetribution: &lnwallet.BreachRetribution{
- BreachTransaction: bobClose.CloseTx,
- LocalOutputSignDesc: &input.SignDescriptor{
- Output: &wire.TxOut{
- PkScript: breachKeys[0],
- },
- },
- },
- }
-
- contractBreaches <- breach
-
- select {
- case err := <-breach.ProcessACK:
- if err != nil {
- t.Fatalf("handoff failed: %v", err)
- }
- case <-time.After(time.Second * 15):
- t.Fatalf("breach arbiter didn't send ack back")
- }
-
- // Check that the breach was properly recorded in the breach arbiter,
- // and that the close observer marked the channel as pending closed
- // before exiting.
- assertArbiterBreach(t, brar, chanPoint)
-}
-
-type publAssertion func(*testing.T, map[wire.OutPoint]*wire.MsgTx,
- chan *wire.MsgTx)
-
-type breachTest struct {
- name string
-
- // spend2ndLevel requests that second level htlcs be spent *again*, as
- // if by a remote party or watchtower. The outpoint of the second level
- // htlc is in effect "readded" to the set of inputs.
- spend2ndLevel bool
-
- // sendFinalConf informs the test to send a confirmation for the justice
- // transaction before asserting the arbiter is cleaned up.
- sendFinalConf bool
-
- // whenNonZeroInputs is called after spending an input but there are
- // further inputs to spend in the test.
- whenNonZeroInputs publAssertion
-
- // whenZeroInputs is called after spending an input but there are no
- // further inputs to spend in the test.
- whenZeroInputs publAssertion
-}
-
-var (
- // commitSpendTx is used to spend commitment outputs.
- commitSpendTx = &wire.MsgTx{
- TxOut: []*wire.TxOut{
- {Value: 500000000},
- },
- }
- // htlc2ndLevlTx is used to transition an htlc output on the commitment
- // transaction to a second level htlc.
- htlc2ndLevlTx = &wire.MsgTx{
- TxOut: []*wire.TxOut{
- {Value: 20000},
- },
- }
- // htlcSpendTx is used to spend from a second level htlc.
- htlcSpendTx = &wire.MsgTx{
- TxOut: []*wire.TxOut{
- {Value: 10000},
- },
- }
-)
-
-var breachTests = []breachTest{
- {
- name: "all spends",
- spend2ndLevel: true,
- whenNonZeroInputs: func(t *testing.T,
- inputs map[wire.OutPoint]*wire.MsgTx,
- publTx chan *wire.MsgTx) {
-
- var tx *wire.MsgTx
- select {
- case tx = <-publTx:
- case <-time.After(5 * time.Second):
- t.Fatalf("tx was not published")
- }
-
- // The justice transaction should have thee same number
- // of inputs as we are tracking in the test.
- if len(tx.TxIn) != len(inputs) {
- t.Fatalf("expected justice txn to have %d "+
- "inputs, found %d", len(inputs),
- len(tx.TxIn))
- }
-
- // Ensure that each input exists on the justice
- // transaction.
- for in := range inputs {
- findInputIndex(t, in, tx)
- }
-
- },
- whenZeroInputs: func(t *testing.T,
- inputs map[wire.OutPoint]*wire.MsgTx,
- publTx chan *wire.MsgTx) {
-
- // Sanity check to ensure the brar doesn't try to
- // broadcast another sweep, since all outputs have been
- // spent externally.
- select {
- case <-publTx:
- t.Fatalf("tx published unexpectedly")
- case <-time.After(50 * time.Millisecond):
- }
- },
- },
- {
- name: "commit spends, second level sweep",
- spend2ndLevel: false,
- sendFinalConf: true,
- whenNonZeroInputs: func(t *testing.T,
- inputs map[wire.OutPoint]*wire.MsgTx,
- publTx chan *wire.MsgTx) {
-
- select {
- case <-publTx:
- case <-time.After(5 * time.Second):
- t.Fatalf("tx was not published")
- }
- },
- whenZeroInputs: func(t *testing.T,
- inputs map[wire.OutPoint]*wire.MsgTx,
- publTx chan *wire.MsgTx) {
-
- // Now a transaction attempting to spend from the second
- // level tx should be published instead. Let this
- // publish succeed by setting the publishing error to
- // nil.
- var tx *wire.MsgTx
- select {
- case tx = <-publTx:
- case <-time.After(5 * time.Second):
- t.Fatalf("tx was not published")
- }
-
- // The commitment outputs should be gone, and there
- // should only be a single htlc spend.
- if len(tx.TxIn) != 1 {
- t.Fatalf("expect 1 htlc output, found %d "+
- "outputs", len(tx.TxIn))
- }
-
- // The remaining TxIn previously attempting to spend
- // the HTLC outpoint should now be spending from the
- // second level tx.
- //
- // NOTE: Commitment outputs and htlc sweeps are spent
- // with a different transactions (and thus txids),
- // ensuring we aren't mistaking this for a different
- // output type.
- onlyInput := tx.TxIn[0].PreviousOutPoint.Hash
- if onlyInput != htlc2ndLevlTx.TxHash() {
- t.Fatalf("tx not attempting to spend second "+
- "level tx, %v", tx.TxIn[0])
- }
- },
- },
-}
-
-// TestBreachSpends checks the behavior of the breach arbiter in response to
-// spend events on a channels outputs by asserting that it properly removes or
-// modifies the inputs from the justice txn.
-func TestBreachSpends(t *testing.T) {
- for _, test := range breachTests {
- tc := test
- t.Run(tc.name, func(t *testing.T) {
- testBreachSpends(t, tc)
- })
- }
-}
-
-func testBreachSpends(t *testing.T, test breachTest) {
- brar, alice, _, bobClose, contractBreaches,
- cleanUpChans, cleanUpArb := initBreachedState(t)
- defer cleanUpChans()
- defer cleanUpArb()
-
- var (
- height = bobClose.ChanSnapshot.CommitHeight
- forceCloseTx = bobClose.CloseTx
- chanPoint = alice.ChanPoint
- publTx = make(chan *wire.MsgTx)
- publErr *er.ErrorCode
- publMtx sync.Mutex
- )
-
- // Make PublishTransaction always return ErrDoubleSpend to begin with.
- publErr = lnwallet.ErrDoubleSpend
- brar.cfg.PublishTransaction = func(tx *wire.MsgTx, _ string) er.R {
- publMtx.Lock()
- var err er.R
- if publErr != nil {
- err = publErr.Default()
- }
- publMtx.Unlock()
- publTx <- tx
-
- return err
- }
-
- // Notify the breach arbiter about the breach.
- retribution, err := lnwallet.NewBreachRetribution(
- alice.State(), height, 1,
- )
- if err != nil {
- t.Fatalf("unable to create breach retribution: %v", err)
- }
-
- breach := &ContractBreachEvent{
- ChanPoint: *chanPoint,
- ProcessACK: make(chan er.R, 1),
- BreachRetribution: retribution,
- }
- contractBreaches <- breach
-
- // We'll also wait to consume the ACK back from the breach arbiter.
- select {
- case err := <-breach.ProcessACK:
- if err != nil {
- t.Fatalf("handoff failed: %v", err)
- }
- case <-time.After(time.Second * 15):
- t.Fatalf("breach arbiter didn't send ack back")
- }
-
- state := alice.State()
- err = state.CloseChannel(&channeldb.ChannelCloseSummary{
- ChanPoint: state.FundingOutpoint,
- ChainHash: state.ChainHash,
- RemotePub: state.IdentityPub,
- CloseType: channeldb.BreachClose,
- Capacity: state.Capacity,
- IsPending: true,
- ShortChanID: state.ShortChanID(),
- RemoteCurrentRevocation: state.RemoteCurrentRevocation,
- RemoteNextRevocation: state.RemoteNextRevocation,
- LocalChanConfig: state.LocalChanCfg,
- })
- if err != nil {
- t.Fatalf("unable to close channel: %v", err)
- }
-
- // After exiting, the breach arbiter should have persisted the
- // retribution information and the channel should be shown as pending
- // force closed.
- assertArbiterBreach(t, brar, chanPoint)
-
- // Assert that the database sees the channel as pending close, otherwise
- // the breach arbiter won't be able to fully close it.
- assertPendingClosed(t, alice)
-
- // Notify that the breaching transaction is confirmed, to trigger the
- // retribution logic.
- notifier := brar.cfg.Notifier.(*mock.SpendNotifier)
- notifier.ConfChan <- &chainntnfs.TxConfirmation{}
-
- // The breach arbiter should attempt to sweep all outputs on the
- // breached commitment. We'll pretend that the HTLC output has been
- // spent by the channel counter party's second level tx already.
- var tx *wire.MsgTx
- select {
- case tx = <-publTx:
- case <-time.After(5 * time.Second):
- t.Fatalf("tx was not published")
- }
-
- // All outputs should initially spend from the force closed txn.
- forceTxID := forceCloseTx.TxHash()
- for _, txIn := range tx.TxIn {
- if txIn.PreviousOutPoint.Hash != forceTxID {
- t.Fatalf("og justice tx not spending commitment")
- }
- }
-
- localOutpoint := retribution.LocalOutpoint
- remoteOutpoint := retribution.RemoteOutpoint
- htlcOutpoint := retribution.HtlcRetributions[0].OutPoint
-
- // Construct a map from outpoint on the force close to the transaction
- // we want it to be spent by. As the test progresses, this map will be
- // updated to contain only the set of commitment or second level
- // outpoints that remain to be spent.
- inputs := map[wire.OutPoint]*wire.MsgTx{
- htlcOutpoint: htlc2ndLevlTx,
- localOutpoint: commitSpendTx,
- remoteOutpoint: commitSpendTx,
- }
-
- // Until no more inputs to spend remain, deliver the spend events and
- // process the assertions prescribed by the test case.
- for len(inputs) > 0 {
- var (
- op wire.OutPoint
- spendTx *wire.MsgTx
- )
-
- // Pick an outpoint at random from the set of inputs.
- for op, spendTx = range inputs {
- delete(inputs, op)
- break
- }
-
- // Deliver the spend notification for the chosen transaction.
- notifier.Spend(&op, 2, spendTx)
-
- // When the second layer transfer is detected, add back the
- // outpoint of the second layer tx so that we can spend it
- // again. Only do so if the test requests this behavior.
- spendTxID := spendTx.TxHash()
- if test.spend2ndLevel && spendTxID == htlc2ndLevlTx.TxHash() {
- // Create the second level outpoint that will be spent,
- // the index is always zero for these 1-in-1-out txns.
- spendOp := wire.OutPoint{Hash: spendTxID}
- inputs[spendOp] = htlcSpendTx
- }
-
- if len(inputs) > 0 {
- test.whenNonZeroInputs(t, inputs, publTx)
- } else {
- // Reset the publishing error so that any publication,
- // made by the breach arbiter, if any, will succeed.
- publMtx.Lock()
- publErr = nil
- publMtx.Unlock()
- test.whenZeroInputs(t, inputs, publTx)
- }
- }
-
- // Deliver confirmation of sweep if the test expects it.
- if test.sendFinalConf {
- notifier.ConfChan <- &chainntnfs.TxConfirmation{}
- }
-
- // Assert that the channel is fully resolved.
- assertBrarCleanup(t, brar, alice.ChanPoint, alice.State().Db)
-}
-
-// findInputIndex returns the index of the input that spends from the given
-// outpoint. This method fails if the outpoint is not found.
-func findInputIndex(t *testing.T, op wire.OutPoint, tx *wire.MsgTx) int {
- t.Helper()
-
- inputIdx := -1
- for i, txIn := range tx.TxIn {
- if txIn.PreviousOutPoint == op {
- inputIdx = i
- }
- }
- if inputIdx == -1 {
- t.Fatalf("input %v in not found", op)
- }
-
- return inputIdx
-}
-
-// assertArbiterBreach checks that the breach arbiter has persisted the breach
-// information for a particular channel.
-func assertArbiterBreach(t *testing.T, brar *breachArbiter,
- chanPoint *wire.OutPoint) {
-
- t.Helper()
-
- isBreached, err := brar.IsBreached(chanPoint)
- if err != nil {
- t.Fatalf("unable to determine if channel is "+
- "breached: %v", err)
- }
-
- if !isBreached {
- t.Fatalf("channel %v was never marked breached",
- chanPoint)
- }
-
-}
-
-// assertNoArbiterBreach checks that the breach arbiter has not persisted the
-// breach information for a particular channel.
-func assertNoArbiterBreach(t *testing.T, brar *breachArbiter,
- chanPoint *wire.OutPoint) {
-
- t.Helper()
-
- isBreached, err := brar.IsBreached(chanPoint)
- if err != nil {
- t.Fatalf("unable to determine if channel is "+
- "breached: %v", err)
- }
-
- if isBreached {
- t.Fatalf("channel %v was marked breached",
- chanPoint)
- }
-}
-
-// assertBrarCleanup blocks until the given channel point has been removed the
-// retribution store and the channel is fully closed in the database.
-func assertBrarCleanup(t *testing.T, brar *breachArbiter,
- chanPoint *wire.OutPoint, db *channeldb.DB) {
-
- t.Helper()
-
- err := wait.NoError(func() er.R {
- isBreached, err := brar.IsBreached(chanPoint)
- if err != nil {
- return err
- }
-
- if isBreached {
- return er.Errorf("channel %v still breached",
- chanPoint)
- }
-
- closedChans, err := db.FetchClosedChannels(false)
- if err != nil {
- return err
- }
-
- for _, channel := range closedChans {
- switch {
- // Wrong channel.
- case channel.ChanPoint != *chanPoint:
- continue
-
- // Right channel, fully closed!
- case !channel.IsPending:
- return nil
- }
-
- // Still pending.
- return er.Errorf("channel %v still pending "+
- "close", chanPoint)
- }
-
- return er.Errorf("channel %v not closed", chanPoint)
-
- }, time.Second)
- if err != nil {
- t.Fatalf(err.String())
- }
-}
-
-// assertPendingClosed checks that the channel has been marked pending closed in
-// the channel database.
-func assertPendingClosed(t *testing.T, c *lnwallet.LightningChannel) {
- t.Helper()
-
- closedChans, err := c.State().Db.FetchClosedChannels(true)
- if err != nil {
- t.Fatalf("unable to load pending closed channels: %v", err)
- }
-
- for _, chanSummary := range closedChans {
- if chanSummary.ChanPoint == *c.ChanPoint {
- return
- }
- }
-
- t.Fatalf("channel %v was not marked pending closed", c.ChanPoint)
-}
-
-// assertNotPendingClosed checks that the channel has not been marked pending
-// closed in the channel database.
-func assertNotPendingClosed(t *testing.T, c *lnwallet.LightningChannel) {
- t.Helper()
-
- closedChans, err := c.State().Db.FetchClosedChannels(true)
- if err != nil {
- t.Fatalf("unable to load pending closed channels: %v", err)
- }
-
- for _, chanSummary := range closedChans {
- if chanSummary.ChanPoint == *c.ChanPoint {
- t.Fatalf("channel %v was marked pending closed",
- c.ChanPoint)
- }
- }
-}
-
-// createTestArbiter instantiates a breach arbiter with a failing retribution
-// store, so that controlled failures can be tested.
-func createTestArbiter(t *testing.T, contractBreaches chan *ContractBreachEvent,
- db *channeldb.DB) (*breachArbiter, func(), er.R) {
-
- // Create a failing retribution store, that wraps a normal one.
- store := newFailingRetributionStore(func() RetributionStore {
- return newRetributionStore(db)
- })
-
- aliceKeyPriv, _ := btcec.PrivKeyFromBytes(btcec.S256(),
- alicesPrivKey)
- signer := &mock.SingleSigner{Privkey: aliceKeyPriv}
-
- // Assemble our test arbiter.
- notifier := mock.MakeMockSpendNotifier()
- ba := newBreachArbiter(&BreachConfig{
- CloseLink: func(_ *wire.OutPoint, _ htlcswitch.ChannelCloseType) {},
- DB: db,
- Estimator: chainfee.NewStaticEstimator(12500, 0),
- GenSweepScript: func() ([]byte, er.R) { return nil, nil },
- ContractBreaches: contractBreaches,
- Signer: signer,
- Notifier: notifier,
- PublishTransaction: func(_ *wire.MsgTx, _ string) er.R { return nil },
- Store: store,
- })
-
- if err := ba.Start(); err != nil {
- return nil, nil, err
- }
-
- // The caller is responsible for closing the database.
- cleanUp := func() {
- ba.Stop()
- }
-
- return ba, cleanUp, nil
-}
-
-// createInitChannels creates two initialized test channels funded with 10 BTC,
-// with 5 BTC allocated to each side. Within the channel, Alice is the
-// initiator.
-func createInitChannels(revocationWindow int) (*lnwallet.LightningChannel, *lnwallet.LightningChannel, func(), er.R) {
-
- aliceKeyPriv, aliceKeyPub := btcec.PrivKeyFromBytes(btcec.S256(),
- alicesPrivKey)
- bobKeyPriv, bobKeyPub := btcec.PrivKeyFromBytes(btcec.S256(),
- bobsPrivKey)
-
- channelCapacity, err := btcutil.NewAmount(10)
- if err != nil {
- return nil, nil, nil, err
- }
-
- channelBal := channelCapacity / 2
- aliceDustLimit := btcutil.Amount(200)
- bobDustLimit := btcutil.Amount(1300)
- csvTimeoutAlice := uint32(5)
- csvTimeoutBob := uint32(4)
-
- prevOut := &wire.OutPoint{
- Hash: chainhash.Hash(testHdSeed),
- Index: 0,
- }
- fundingTxIn := wire.NewTxIn(prevOut, nil, nil)
-
- aliceCfg := channeldb.ChannelConfig{
- ChannelConstraints: channeldb.ChannelConstraints{
- DustLimit: aliceDustLimit,
- MaxPendingAmount: lnwire.MilliSatoshi(rand.Int63()),
- ChanReserve: 0,
- MinHTLC: 0,
- MaxAcceptedHtlcs: uint16(rand.Int31()),
- CsvDelay: uint16(csvTimeoutAlice),
- },
- MultiSigKey: keychain.KeyDescriptor{
- PubKey: aliceKeyPub,
- },
- RevocationBasePoint: keychain.KeyDescriptor{
- PubKey: aliceKeyPub,
- },
- PaymentBasePoint: keychain.KeyDescriptor{
- PubKey: aliceKeyPub,
- },
- DelayBasePoint: keychain.KeyDescriptor{
- PubKey: aliceKeyPub,
- },
- HtlcBasePoint: keychain.KeyDescriptor{
- PubKey: aliceKeyPub,
- },
- }
- bobCfg := channeldb.ChannelConfig{
- ChannelConstraints: channeldb.ChannelConstraints{
- DustLimit: bobDustLimit,
- MaxPendingAmount: lnwire.MilliSatoshi(rand.Int63()),
- ChanReserve: 0,
- MinHTLC: 0,
- MaxAcceptedHtlcs: uint16(rand.Int31()),
- CsvDelay: uint16(csvTimeoutBob),
- },
- MultiSigKey: keychain.KeyDescriptor{
- PubKey: bobKeyPub,
- },
- RevocationBasePoint: keychain.KeyDescriptor{
- PubKey: bobKeyPub,
- },
- PaymentBasePoint: keychain.KeyDescriptor{
- PubKey: bobKeyPub,
- },
- DelayBasePoint: keychain.KeyDescriptor{
- PubKey: bobKeyPub,
- },
- HtlcBasePoint: keychain.KeyDescriptor{
- PubKey: bobKeyPub,
- },
- }
-
- bobRoot, err := chainhash.NewHash(bobKeyPriv.Serialize())
- if err != nil {
- return nil, nil, nil, err
- }
- bobPreimageProducer := shachain.NewRevocationProducer(*bobRoot)
- bobFirstRevoke, err := bobPreimageProducer.AtIndex(0)
- if err != nil {
- return nil, nil, nil, err
- }
- bobCommitPoint := input.ComputeCommitmentPoint(bobFirstRevoke[:])
-
- aliceRoot, err := chainhash.NewHash(aliceKeyPriv.Serialize())
- if err != nil {
- return nil, nil, nil, err
- }
- alicePreimageProducer := shachain.NewRevocationProducer(*aliceRoot)
- aliceFirstRevoke, err := alicePreimageProducer.AtIndex(0)
- if err != nil {
- return nil, nil, nil, err
- }
- aliceCommitPoint := input.ComputeCommitmentPoint(aliceFirstRevoke[:])
-
- aliceCommitTx, bobCommitTx, err := lnwallet.CreateCommitmentTxns(
- channelBal, channelBal, &aliceCfg, &bobCfg, aliceCommitPoint,
- bobCommitPoint, *fundingTxIn, channeldb.SingleFunderTweaklessBit,
- )
- if err != nil {
- return nil, nil, nil, err
- }
-
- alicePath, errr := ioutil.TempDir("", "alicedb")
- if errr != nil {
- return nil, nil, nil, er.E(errr)
- }
-
- dbAlice, err := channeldb.Open(alicePath)
- if err != nil {
- return nil, nil, nil, err
- }
-
- bobPath, errr := ioutil.TempDir("", "bobdb")
- if errr != nil {
- return nil, nil, nil, er.E(errr)
- }
-
- dbBob, err := channeldb.Open(bobPath)
- if err != nil {
- return nil, nil, nil, err
- }
-
- estimator := chainfee.NewStaticEstimator(12500, 0)
- feePerKw, err := estimator.EstimateFeePerKW(1)
- if err != nil {
- return nil, nil, nil, err
- }
-
- // TODO(roasbeef): need to factor in commit fee?
- aliceCommit := channeldb.ChannelCommitment{
- CommitHeight: 0,
- LocalBalance: lnwire.NewMSatFromSatoshis(channelBal),
- RemoteBalance: lnwire.NewMSatFromSatoshis(channelBal),
- FeePerKw: btcutil.Amount(feePerKw),
- CommitFee: 8688,
- CommitTx: aliceCommitTx,
- CommitSig: bytes.Repeat([]byte{1}, 71),
- }
- bobCommit := channeldb.ChannelCommitment{
- CommitHeight: 0,
- LocalBalance: lnwire.NewMSatFromSatoshis(channelBal),
- RemoteBalance: lnwire.NewMSatFromSatoshis(channelBal),
- FeePerKw: btcutil.Amount(feePerKw),
- CommitFee: 8688,
- CommitTx: bobCommitTx,
- CommitSig: bytes.Repeat([]byte{1}, 71),
- }
-
- var chanIDBytes [8]byte
- if _, err := util.ReadFull(crand.Reader, chanIDBytes[:]); err != nil {
- return nil, nil, nil, err
- }
-
- shortChanID := lnwire.NewShortChanIDFromInt(
- binary.BigEndian.Uint64(chanIDBytes[:]),
- )
-
- aliceChannelState := &channeldb.OpenChannel{
- LocalChanCfg: aliceCfg,
- RemoteChanCfg: bobCfg,
- IdentityPub: aliceKeyPub,
- FundingOutpoint: *prevOut,
- ShortChannelID: shortChanID,
- ChanType: channeldb.SingleFunderTweaklessBit,
- IsInitiator: true,
- Capacity: channelCapacity,
- RemoteCurrentRevocation: bobCommitPoint,
- RevocationProducer: alicePreimageProducer,
- RevocationStore: shachain.NewRevocationStore(),
- LocalCommitment: aliceCommit,
- RemoteCommitment: aliceCommit,
- Db: dbAlice,
- Packager: channeldb.NewChannelPackager(shortChanID),
- FundingTxn: testTx,
- }
- bobChannelState := &channeldb.OpenChannel{
- LocalChanCfg: bobCfg,
- RemoteChanCfg: aliceCfg,
- IdentityPub: bobKeyPub,
- FundingOutpoint: *prevOut,
- ShortChannelID: shortChanID,
- ChanType: channeldb.SingleFunderTweaklessBit,
- IsInitiator: false,
- Capacity: channelCapacity,
- RemoteCurrentRevocation: aliceCommitPoint,
- RevocationProducer: bobPreimageProducer,
- RevocationStore: shachain.NewRevocationStore(),
- LocalCommitment: bobCommit,
- RemoteCommitment: bobCommit,
- Db: dbBob,
- Packager: channeldb.NewChannelPackager(shortChanID),
- }
-
- aliceSigner := &mock.SingleSigner{Privkey: aliceKeyPriv}
- bobSigner := &mock.SingleSigner{Privkey: bobKeyPriv}
-
- alicePool := lnwallet.NewSigPool(1, aliceSigner)
- channelAlice, err := lnwallet.NewLightningChannel(
- aliceSigner, aliceChannelState, alicePool,
- )
- if err != nil {
- return nil, nil, nil, err
- }
- alicePool.Start()
-
- bobPool := lnwallet.NewSigPool(1, bobSigner)
- channelBob, err := lnwallet.NewLightningChannel(
- bobSigner, bobChannelState, bobPool,
- )
- if err != nil {
- return nil, nil, nil, err
- }
- bobPool.Start()
-
- addr := &net.TCPAddr{
- IP: net.ParseIP("127.0.0.1"),
- Port: 18556,
- }
- if err := channelAlice.State().SyncPending(addr, 101); err != nil {
- return nil, nil, nil, err
- }
-
- addr = &net.TCPAddr{
- IP: net.ParseIP("127.0.0.1"),
- Port: 18555,
- }
- if err := channelBob.State().SyncPending(addr, 101); err != nil {
- return nil, nil, nil, err
- }
-
- cleanUpFunc := func() {
- dbBob.Close()
- dbAlice.Close()
- os.RemoveAll(bobPath)
- os.RemoveAll(alicePath)
- }
-
- // Now that the channel are open, simulate the start of a session by
- // having Alice and Bob extend their revocation windows to each other.
- err = initRevocationWindows(channelAlice, channelBob, revocationWindow)
- if err != nil {
- return nil, nil, nil, err
- }
-
- return channelAlice, channelBob, cleanUpFunc, nil
-}
-
-// initRevocationWindows simulates a new channel being opened within the p2p
-// network by populating the initial revocation windows of the passed
-// commitment state machines.
-//
-// TODO(conner) remove code duplication
-func initRevocationWindows(chanA, chanB *lnwallet.LightningChannel, windowSize int) er.R {
- aliceNextRevoke, err := chanA.NextRevocationKey()
- if err != nil {
- return err
- }
- if err := chanB.InitNextRevocation(aliceNextRevoke); err != nil {
- return err
- }
-
- bobNextRevoke, err := chanB.NextRevocationKey()
- if err != nil {
- return err
- }
- if err := chanA.InitNextRevocation(bobNextRevoke); err != nil {
- return err
- }
-
- return nil
-}
-
-// createHTLC is a utility function for generating an HTLC with a given
-// preimage and a given amount.
-// TODO(conner) remove code duplication
-func createHTLC(data int, amount lnwire.MilliSatoshi) (*lnwire.UpdateAddHTLC, [32]byte) {
- preimage := bytes.Repeat([]byte{byte(data)}, 32)
- paymentHash := sha256.Sum256(preimage)
-
- var returnPreimage [32]byte
- copy(returnPreimage[:], preimage)
-
- return &lnwire.UpdateAddHTLC{
- ID: uint64(data),
- PaymentHash: paymentHash,
- Amount: amount,
- Expiry: uint32(5),
- }, returnPreimage
-}
-
-// forceStateTransition executes the necessary interaction between the two
-// commitment state machines to transition to a new state locking in any
-// pending updates.
-// TODO(conner) remove code duplication
-func forceStateTransition(chanA, chanB *lnwallet.LightningChannel) er.R {
- aliceSig, aliceHtlcSigs, _, err := chanA.SignNextCommitment()
- if err != nil {
- return err
- }
- if err = chanB.ReceiveNewCommitment(aliceSig, aliceHtlcSigs); err != nil {
- return err
- }
-
- bobRevocation, _, err := chanB.RevokeCurrentCommitment()
- if err != nil {
- return err
- }
- bobSig, bobHtlcSigs, _, err := chanB.SignNextCommitment()
- if err != nil {
- return err
- }
-
- _, _, _, _, err = chanA.ReceiveRevocation(bobRevocation)
- if err != nil {
- return err
- }
- if err := chanA.ReceiveNewCommitment(bobSig, bobHtlcSigs); err != nil {
- return err
- }
-
- aliceRevocation, _, err := chanA.RevokeCurrentCommitment()
- if err != nil {
- return err
- }
- _, _, _, _, err = chanB.ReceiveRevocation(aliceRevocation)
- if err != nil {
- return err
- }
-
- return nil
-}
-
-func TestMain(m *testing.M) {
- globalcfg.SelectConfig(globalcfg.BitcoinDefaults())
- os.Exit(m.Run())
-}
diff --git a/lnd/brontide/README.md b/lnd/brontide/README.md
deleted file mode 100644
index 0f0c6fbd..00000000
--- a/lnd/brontide/README.md
+++ /dev/null
@@ -1,28 +0,0 @@
-brontide
-==========
-
-[![Build Status](http://img.shields.io/travis/lightningnetwork/lnd.svg)](https://travis-ci.org/lightningnetwork/lnd)
-[![MIT licensed](https://img.shields.io/badge/license-MIT-blue.svg)](https://github.com/lightningnetwork/lnd/blob/master/LICENSE)
-[![GoDoc](https://img.shields.io/badge/godoc-reference-blue.svg)](http://godoc.org/github.com/lightningnetwork/lnd/brontide)
-
-The brontide package implements a secure crypto messaging protocol based off of
-the [Noise Protocol Framework](http://noiseprotocol.org/noise.html). The
-package exposes the raw state machine that handles the handshake and subsequent
-message encryption/decryption scheme. Additionally, the package exposes a
-[net.Conn](https://golang.org/pkg/net/#Conn) and a
-[net.Listener](https://golang.org/pkg/net/#Listener) interface implementation
-which allows the encrypted transport to be seamlessly integrated into a
-codebase.
-
-The secure messaging scheme implemented within this package is described in
-detail in [BOLT #8 of the Lightning Network specifications](https://github.com/lightningnetwork/lightning-rfc/blob/master/08-transport.md).
-
-This package has intentionally been designed so it can be used as a standalone
-package for any projects needing secure encrypted+authenticated communications
-between network enabled programs.
-
-## Installation and Updating
-
-```bash
-$ go get -u github.com/lightningnetwork/lnd/brontide
-```
diff --git a/lnd/brontide/conn.go b/lnd/brontide/conn.go
deleted file mode 100644
index 621e51ce..00000000
--- a/lnd/brontide/conn.go
+++ /dev/null
@@ -1,291 +0,0 @@
-package brontide
-
-import (
- "bytes"
- "math"
- "net"
- "time"
-
- "github.com/pkt-cash/pktd/btcec"
- "github.com/pkt-cash/pktd/btcutil/er"
- "github.com/pkt-cash/pktd/btcutil/util"
- "github.com/pkt-cash/pktd/lnd/keychain"
- "github.com/pkt-cash/pktd/lnd/lnwire"
- "github.com/pkt-cash/pktd/lnd/tor"
-)
-
-// Conn is an implementation of net.Conn which enforces an authenticated key
-// exchange and message encryption protocol dubbed "Brontide" after initial TCP
-// connection establishment. In the case of a successful handshake, all
-// messages sent via the .Write() method are encrypted with an AEAD cipher
-// along with an encrypted length-prefix. See the Machine struct for
-// additional details w.r.t to the handshake and encryption scheme.
-type Conn struct {
- conn net.Conn
-
- noise *Machine
-
- readBuf bytes.Buffer
-}
-
-// A compile-time assertion to ensure that Conn meets the net.Conn interface.
-var _ net.Conn = (*Conn)(nil)
-
-// Dial attempts to establish an encrypted+authenticated connection with the
-// remote peer located at address which has remotePub as its long-term static
-// public key. In the case of a handshake failure, the connection is closed and
-// a non-nil error is returned.
-func Dial(local keychain.SingleKeyECDH, netAddr *lnwire.NetAddress,
- timeout time.Duration, dialer tor.DialFunc) (*Conn, er.R) {
-
- ipAddr := netAddr.Address.String()
- var conn net.Conn
- var err er.R
- conn, err = dialer("tcp", ipAddr, timeout)
- if err != nil {
- return nil, err
- }
-
- b := &Conn{
- conn: conn,
- noise: NewBrontideMachine(true, local, netAddr.IdentityKey),
- }
-
- // Initiate the handshake by sending the first act to the receiver.
- actOne, err := b.noise.GenActOne()
- if err != nil {
- b.conn.Close()
- return nil, err
- }
- if _, err := conn.Write(actOne[:]); err != nil {
- b.conn.Close()
- return nil, er.E(err)
- }
-
- // We'll ensure that we get ActTwo from the remote peer in a timely
- // manner. If they don't respond within 1s, then we'll kill the
- // connection.
- err = er.E(conn.SetReadDeadline(time.Now().Add(handshakeReadTimeout)))
- if err != nil {
- b.conn.Close()
- return nil, err
- }
-
- // If the first act was successful (we know that address is actually
- // remotePub), then read the second act after which we'll be able to
- // send our static public key to the remote peer with strong forward
- // secrecy.
- var actTwo [ActTwoSize]byte
- if _, err := util.ReadFull(conn, actTwo[:]); err != nil {
- b.conn.Close()
- return nil, err
- }
- if err := b.noise.RecvActTwo(actTwo); err != nil {
- b.conn.Close()
- return nil, err
- }
-
- // Finally, complete the handshake by sending over our encrypted static
- // key and execute the final ECDH operation.
- actThree, err := b.noise.GenActThree()
- if err != nil {
- b.conn.Close()
- return nil, err
- }
- if _, err := conn.Write(actThree[:]); err != nil {
- b.conn.Close()
- return nil, er.E(err)
- }
-
- // We'll reset the deadline as it's no longer critical beyond the
- // initial handshake.
- err = er.E(conn.SetReadDeadline(time.Time{}))
- if err != nil {
- b.conn.Close()
- return nil, err
- }
-
- return b, nil
-}
-
-// ReadNextMessage uses the connection in a message-oriented manner, instructing
-// it to read the next _full_ message with the brontide stream. This function
-// will block until the read of the header and body succeeds.
-//
-// NOTE: This method SHOULD NOT be used in the case that the connection may be
-// adversarial and induce long delays. If the caller needs to set read deadlines
-// appropriately, it is preferred that they use the split ReadNextHeader and
-// ReadNextBody methods so that the deadlines can be set appropriately on each.
-func (c *Conn) ReadNextMessage() ([]byte, er.R) {
- return c.noise.ReadMessage(c.conn)
-}
-
-// ReadNextHeader uses the connection to read the next header from the brontide
-// stream. This function will block until the read of the header succeeds and
-// return the packet length (including MAC overhead) that is expected from the
-// subsequent call to ReadNextBody.
-func (c *Conn) ReadNextHeader() (uint32, er.R) {
- return c.noise.ReadHeader(c.conn)
-}
-
-// ReadNextBody uses the connection to read the next message body from the
-// brontide stream. This function will block until the read of the body succeeds
-// and return the decrypted payload. The provided buffer MUST be the packet
-// length returned by the preceding call to ReadNextHeader.
-func (c *Conn) ReadNextBody(buf []byte) ([]byte, er.R) {
- return c.noise.ReadBody(c.conn, buf)
-}
-
-// Read reads data from the connection. Read can be made to time out and
-// return an Error with Timeout() == true after a fixed time limit; see
-// SetDeadline and SetReadDeadline.
-//
-// Part of the net.Conn interface.
-func (c *Conn) Read(b []byte) (n int, err error) {
- // In order to reconcile the differences between the record abstraction
- // of our AEAD connection, and the stream abstraction of TCP, we
- // maintain an intermediate read buffer. If this buffer becomes
- // depleted, then we read the next record, and feed it into the
- // buffer. Otherwise, we read directly from the buffer.
- if c.readBuf.Len() == 0 {
- plaintext, err := c.noise.ReadMessage(c.conn)
- if err != nil {
- return 0, er.Native(err)
- }
-
- if _, err := c.readBuf.Write(plaintext); err != nil {
- return 0, err
- }
- }
-
- return c.readBuf.Read(b)
-}
-
-// Write writes data to the connection. Write can be made to time out and
-// return an Error with Timeout() == true after a fixed time limit; see
-// SetDeadline and SetWriteDeadline.
-//
-// Part of the net.Conn interface.
-func (c *Conn) Write(b []byte) (int, error) {
- // If the message doesn't require any chunking, then we can go ahead
- // with a single write.
- if len(b) <= math.MaxUint16 {
- err := c.noise.WriteMessage(b)
- if err != nil {
- return 0, er.Native(err)
- }
- i, e := c.noise.Flush(c.conn)
- return i, er.Native(e)
- }
-
- // If we need to split the message into fragments, then we'll write
- // chunks which maximize usage of the available payload.
- chunkSize := math.MaxUint16
-
- bytesToWrite := len(b)
- bytesWritten := 0
- for bytesWritten < bytesToWrite {
- // If we're on the last chunk, then truncate the chunk size as
- // necessary to avoid an out-of-bounds array memory access.
- if bytesWritten+chunkSize > len(b) {
- chunkSize = len(b) - bytesWritten
- }
-
- // Slice off the next chunk to be written based on our running
- // counter and next chunk size.
- chunk := b[bytesWritten : bytesWritten+chunkSize]
- if err := c.noise.WriteMessage(chunk); err != nil {
- return bytesWritten, er.Native(err)
- }
-
- n, err := c.noise.Flush(c.conn)
- bytesWritten += n
- if err != nil {
- return bytesWritten, er.Native(err)
- }
- }
-
- return bytesWritten, nil
-}
-
-// WriteMessage encrypts and buffers the next message p for the connection. The
-// ciphertext of the message is prepended with an encrypt+auth'd length which
-// must be used as the AD to the AEAD construction when being decrypted by the
-// other side.
-//
-// NOTE: This DOES NOT write the message to the wire, it should be followed by a
-// call to Flush to ensure the message is written.
-func (c *Conn) WriteMessage(b []byte) er.R {
- return c.noise.WriteMessage(b)
-}
-
-// Flush attempts to write a message buffered using WriteMessage to the
-// underlying connection. If no buffered message exists, this will result in a
-// NOP. Otherwise, it will continue to write the remaining bytes, picking up
-// where the byte stream left off in the event of a partial write. The number of
-// bytes returned reflects the number of plaintext bytes in the payload, and
-// does not account for the overhead of the header or MACs.
-//
-// NOTE: It is safe to call this method again iff a timeout error is returned.
-func (c *Conn) Flush() (int, er.R) {
- return c.noise.Flush(c.conn)
-}
-
-// Close closes the connection. Any blocked Read or Write operations will be
-// unblocked and return errors.
-//
-// Part of the net.Conn interface.
-func (c *Conn) Close() error {
- // TODO(roasbeef): reset brontide state?
- return c.conn.Close()
-}
-
-// LocalAddr returns the local network address.
-//
-// Part of the net.Conn interface.
-func (c *Conn) LocalAddr() net.Addr {
- return c.conn.LocalAddr()
-}
-
-// RemoteAddr returns the remote network address.
-//
-// Part of the net.Conn interface.
-func (c *Conn) RemoteAddr() net.Addr {
- return c.conn.RemoteAddr()
-}
-
-// SetDeadline sets the read and write deadlines associated with the
-// connection. It is equivalent to calling both SetReadDeadline and
-// SetWriteDeadline.
-//
-// Part of the net.Conn interface.
-func (c *Conn) SetDeadline(t time.Time) error {
- return c.conn.SetDeadline(t)
-}
-
-// SetReadDeadline sets the deadline for future Read calls. A zero value for t
-// means Read will not time out.
-//
-// Part of the net.Conn interface.
-func (c *Conn) SetReadDeadline(t time.Time) error {
- return c.conn.SetReadDeadline(t)
-}
-
-// SetWriteDeadline sets the deadline for future Write calls. Even if write
-// times out, it may return n > 0, indicating that some of the data was
-// successfully written. A zero value for t means Write will not time out.
-//
-// Part of the net.Conn interface.
-func (c *Conn) SetWriteDeadline(t time.Time) error {
- return c.conn.SetWriteDeadline(t)
-}
-
-// RemotePub returns the remote peer's static public key.
-func (c *Conn) RemotePub() *btcec.PublicKey {
- return c.noise.remoteStatic
-}
-
-// LocalPub returns the local peer's static public key.
-func (c *Conn) LocalPub() *btcec.PublicKey {
- return c.noise.localStatic.PubKey()
-}
diff --git a/lnd/brontide/listener.go b/lnd/brontide/listener.go
deleted file mode 100644
index db75de60..00000000
--- a/lnd/brontide/listener.go
+++ /dev/null
@@ -1,256 +0,0 @@
-package brontide
-
-import (
- "net"
- "time"
-
- "github.com/pkt-cash/pktd/btcutil/er"
- "github.com/pkt-cash/pktd/btcutil/util"
- "github.com/pkt-cash/pktd/lnd/keychain"
-)
-
-// defaultHandshakes is the maximum number of handshakes that can be done in
-// parallel.
-const defaultHandshakes = 1000
-
-// Listener is an implementation of a net.Conn which executes an authenticated
-// key exchange and message encryption protocol dubbed "Machine" after
-// initial connection acceptance. See the Machine struct for additional
-// details w.r.t the handshake and encryption scheme used within the
-// connection.
-type Listener struct {
- localStatic keychain.SingleKeyECDH
-
- tcp *net.TCPListener
-
- handshakeSema chan struct{}
- conns chan maybeConn
- quit chan struct{}
-}
-
-// A compile-time assertion to ensure that Conn meets the net.Listener interface.
-var _ net.Listener = (*Listener)(nil)
-
-// NewListener returns a new net.Listener which enforces the Brontide scheme
-// during both initial connection establishment and data transfer.
-func NewListener(localStatic keychain.SingleKeyECDH,
- listenAddr string) (*Listener, er.R) {
-
- addr, err := net.ResolveTCPAddr("tcp", listenAddr)
- if err != nil {
- return nil, er.E(err)
- }
-
- l, err := net.ListenTCP("tcp", addr)
- if err != nil {
- return nil, er.E(err)
- }
-
- brontideListener := &Listener{
- localStatic: localStatic,
- tcp: l,
- handshakeSema: make(chan struct{}, defaultHandshakes),
- conns: make(chan maybeConn),
- quit: make(chan struct{}),
- }
-
- for i := 0; i < defaultHandshakes; i++ {
- brontideListener.handshakeSema <- struct{}{}
- }
-
- go brontideListener.listen()
-
- return brontideListener, nil
-}
-
-// listen accepts connection from the underlying tcp conn, then performs
-// the brontinde handshake procedure asynchronously. A maximum of
-// defaultHandshakes will be active at any given time.
-//
-// NOTE: This method must be run as a goroutine.
-func (l *Listener) listen() {
- for {
- select {
- case <-l.handshakeSema:
- case <-l.quit:
- return
- }
-
- conn, err := l.tcp.Accept()
- if err != nil {
- l.rejectConn(er.E(err))
- l.handshakeSema <- struct{}{}
- continue
- }
-
- go l.doHandshake(conn)
- }
-}
-
-// rejectedConnErr is a helper function that prepends the remote address of the
-// failed connection attempt to the original error message.
-func rejectedConnErr(err er.R, remoteAddr string) er.R {
- return er.Errorf("unable to accept connection from %v: %v", remoteAddr,
- err)
-}
-
-// doHandshake asynchronously performs the brontide handshake, so that it does
-// not block the main accept loop. This prevents peers that delay writing to the
-// connection from block other connection attempts.
-func (l *Listener) doHandshake(conn net.Conn) {
- defer func() { l.handshakeSema <- struct{}{} }()
-
- select {
- case <-l.quit:
- return
- default:
- }
-
- remoteAddr := conn.RemoteAddr().String()
-
- brontideConn := &Conn{
- conn: conn,
- noise: NewBrontideMachine(false, l.localStatic, nil),
- }
-
- // We'll ensure that we get ActOne from the remote peer in a timely
- // manner. If they don't respond within 1s, then we'll kill the
- // connection.
- errr := conn.SetReadDeadline(time.Now().Add(handshakeReadTimeout))
- if errr != nil {
- brontideConn.conn.Close()
- l.rejectConn(rejectedConnErr(er.E(errr), remoteAddr))
- return
- }
-
- // Attempt to carry out the first act of the handshake protocol. If the
- // connecting node doesn't know our long-term static public key, then
- // this portion will fail with a non-nil error.
- var actOne [ActOneSize]byte
- if _, err := util.ReadFull(conn, actOne[:]); err != nil {
- brontideConn.conn.Close()
- l.rejectConn(rejectedConnErr(err, remoteAddr))
- return
- }
- if err := brontideConn.noise.RecvActOne(actOne); err != nil {
- brontideConn.conn.Close()
- l.rejectConn(rejectedConnErr(err, remoteAddr))
- return
- }
-
- // Next, progress the handshake processes by sending over our ephemeral
- // key for the session along with an authenticating tag.
- actTwo, err := brontideConn.noise.GenActTwo()
- if err != nil {
- brontideConn.conn.Close()
- l.rejectConn(rejectedConnErr(err, remoteAddr))
- return
- }
- if _, err := conn.Write(actTwo[:]); err != nil {
- brontideConn.conn.Close()
- l.rejectConn(rejectedConnErr(er.E(err), remoteAddr))
- return
- }
-
- select {
- case <-l.quit:
- return
- default:
- }
-
- // We'll ensure that we get ActTwo from the remote peer in a timely
- // manner. If they don't respond within 1 second, then we'll kill the
- // connection.
- errr = conn.SetReadDeadline(time.Now().Add(handshakeReadTimeout))
- if errr != nil {
- brontideConn.conn.Close()
- l.rejectConn(rejectedConnErr(er.E(errr), remoteAddr))
- return
- }
-
- // Finally, finish the handshake processes by reading and decrypting
- // the connection peer's static public key. If this succeeds then both
- // sides have mutually authenticated each other.
- var actThree [ActThreeSize]byte
- if _, err := util.ReadFull(conn, actThree[:]); err != nil {
- brontideConn.conn.Close()
- l.rejectConn(rejectedConnErr(err, remoteAddr))
- return
- }
- if err := brontideConn.noise.RecvActThree(actThree); err != nil {
- brontideConn.conn.Close()
- l.rejectConn(rejectedConnErr(err, remoteAddr))
- return
- }
-
- // We'll reset the deadline as it's no longer critical beyond the
- // initial handshake.
- errr = conn.SetReadDeadline(time.Time{})
- if errr != nil {
- brontideConn.conn.Close()
- l.rejectConn(rejectedConnErr(er.E(errr), remoteAddr))
- return
- }
-
- l.acceptConn(brontideConn)
-}
-
-// maybeConn holds either a brontide connection or an error returned from the
-// handshake.
-type maybeConn struct {
- conn *Conn
- err er.R
-}
-
-// acceptConn returns a connection that successfully performed a handshake.
-func (l *Listener) acceptConn(conn *Conn) {
- select {
- case l.conns <- maybeConn{conn: conn}:
- case <-l.quit:
- }
-}
-
-// rejectConn returns any errors encountered during connection or handshake.
-func (l *Listener) rejectConn(err er.R) {
- select {
- case l.conns <- maybeConn{err: err}:
- case <-l.quit:
- }
-}
-
-// Accept waits for and returns the next connection to the listener. All
-// incoming connections are authenticated via the three act Brontide
-// key-exchange scheme. This function will fail with a non-nil error in the
-// case that either the handshake breaks down, or the remote peer doesn't know
-// our static public key.
-//
-// Part of the net.Listener interface.
-func (l *Listener) Accept() (net.Conn, error) {
- select {
- case result := <-l.conns:
- return result.conn, er.Native(result.err)
- case <-l.quit:
- return nil, er.Native(er.New("brontide connection closed"))
- }
-}
-
-// Close closes the listener. Any blocked Accept operations will be unblocked
-// and return errors.
-//
-// Part of the net.Listener interface.
-func (l *Listener) Close() error {
- select {
- case <-l.quit:
- default:
- close(l.quit)
- }
-
- return l.tcp.Close()
-}
-
-// Addr returns the listener's network address.
-//
-// Part of the net.Listener interface.
-func (l *Listener) Addr() net.Addr {
- return l.tcp.Addr()
-}
diff --git a/lnd/brontide/noise.go b/lnd/brontide/noise.go
deleted file mode 100644
index 68c1870c..00000000
--- a/lnd/brontide/noise.go
+++ /dev/null
@@ -1,909 +0,0 @@
-package brontide
-
-import (
- "crypto/cipher"
- "crypto/sha256"
- "encoding/binary"
- "io"
- "math"
- "time"
-
- "golang.org/x/crypto/chacha20poly1305"
- "golang.org/x/crypto/hkdf"
-
- "github.com/pkt-cash/pktd/btcec"
- "github.com/pkt-cash/pktd/btcutil/er"
- "github.com/pkt-cash/pktd/btcutil/util"
- "github.com/pkt-cash/pktd/lnd/keychain"
-)
-
-const (
- // protocolName is the precise instantiation of the Noise protocol
- // handshake at the center of Brontide. This value will be used as part
- // of the prologue. If the initiator and responder aren't using the
- // exact same string for this value, along with prologue of the Bitcoin
- // network, then the initial handshake will fail.
- protocolName = "Noise_XK_secp256k1_ChaChaPoly_SHA256"
-
- // macSize is the length in bytes of the tags generated by poly1305.
- macSize = 16
-
- // lengthHeaderSize is the number of bytes used to prefix encode the
- // length of a message payload.
- lengthHeaderSize = 2
-
- // encHeaderSize is the number of bytes required to hold an encrypted
- // header and it's MAC.
- encHeaderSize = lengthHeaderSize + macSize
-
- // keyRotationInterval is the number of messages sent on a single
- // cipher stream before the keys are rotated forwards.
- keyRotationInterval = 1000
-
- // handshakeReadTimeout is a read timeout that will be enforced when
- // waiting for data payloads during the various acts of Brontide. If
- // the remote party fails to deliver the proper payload within this
- // time frame, then we'll fail the connection.
- handshakeReadTimeout = time.Second * 5
-)
-
-var (
- Err = er.NewErrorType("lnd.brontide")
- // ErrMaxMessageLengthExceeded is returned when a message to be written to
- // the cipher session exceeds the maximum allowed message payload.
- ErrMaxMessageLengthExceeded = Err.CodeWithDetail("ErrMaxMessageLengthExceeded",
- "the generated payload exceeds the max allowed message length of (2^16)-1")
-
- // ErrMessageNotFlushed signals that the connection cannot accept a new
- // message because the prior message has not been fully flushed.
- ErrMessageNotFlushed = Err.CodeWithDetail("ErrMessageNotFlushed", "prior message not flushed")
-
- // lightningPrologue is the noise prologue that is used to initialize
- // the brontide noise handshake.
- lightningPrologue = []byte("lightning")
-
- // ephemeralGen is the default ephemeral key generator, used to derive a
- // unique ephemeral key for each brontide handshake.
- ephemeralGen = func() (*btcec.PrivateKey, er.R) {
- return btcec.NewPrivateKey(btcec.S256())
- }
-)
-
-// TODO(roasbeef): free buffer pool?
-
-// ecdh performs an ECDH operation between pub and priv. The returned value is
-// the sha256 of the compressed shared point.
-func ecdh(pub *btcec.PublicKey, priv keychain.SingleKeyECDH) ([]byte, er.R) {
- hash, err := priv.ECDH(pub)
- return hash[:], err
-}
-
-// cipherState encapsulates the state for the AEAD which will be used to
-// encrypt+authenticate any payloads sent during the handshake, and messages
-// sent once the handshake has completed.
-type cipherState struct {
- // nonce is the nonce passed into the chacha20-poly1305 instance for
- // encryption+decryption. The nonce is incremented after each successful
- // encryption/decryption.
- //
- // TODO(roasbeef): this should actually be 96 bit
- nonce uint64
-
- // secretKey is the shared symmetric key which will be used to
- // instantiate the cipher.
- //
- // TODO(roasbeef): m-lock??
- secretKey [32]byte
-
- // salt is an additional secret which is used during key rotation to
- // generate new keys.
- salt [32]byte
-
- // cipher is an instance of the ChaCha20-Poly1305 AEAD construction
- // created using the secretKey above.
- cipher cipher.AEAD
-}
-
-// Encrypt returns a ciphertext which is the encryption of the plainText
-// observing the passed associatedData within the AEAD construction.
-func (c *cipherState) Encrypt(associatedData, cipherText, plainText []byte) []byte {
- defer func() {
- c.nonce++
-
- if c.nonce == keyRotationInterval {
- c.rotateKey()
- }
- }()
-
- var nonce [12]byte
- binary.LittleEndian.PutUint64(nonce[4:], c.nonce)
-
- return c.cipher.Seal(cipherText, nonce[:], plainText, associatedData)
-}
-
-// Decrypt attempts to decrypt the passed ciphertext observing the specified
-// associatedData within the AEAD construction. In the case that the final MAC
-// check fails, then a non-nil error will be returned.
-func (c *cipherState) Decrypt(associatedData, plainText, cipherText []byte) ([]byte, er.R) {
- defer func() {
- c.nonce++
-
- if c.nonce == keyRotationInterval {
- c.rotateKey()
- }
- }()
-
- var nonce [12]byte
- binary.LittleEndian.PutUint64(nonce[4:], c.nonce)
-
- o, e := c.cipher.Open(plainText, nonce[:], cipherText, associatedData)
- return o, er.E(e)
-}
-
-// InitializeKey initializes the secret key and AEAD cipher scheme based off of
-// the passed key.
-func (c *cipherState) InitializeKey(key [32]byte) {
- c.secretKey = key
- c.nonce = 0
-
- // Safe to ignore the error here as our key is properly sized
- // (32-bytes).
- c.cipher, _ = chacha20poly1305.New(c.secretKey[:])
-}
-
-// InitializeKeyWithSalt is identical to InitializeKey however it also sets the
-// cipherState's salt field which is used for key rotation.
-func (c *cipherState) InitializeKeyWithSalt(salt, key [32]byte) {
- c.salt = salt
- c.InitializeKey(key)
-}
-
-// rotateKey rotates the current encryption/decryption key for this cipherState
-// instance. Key rotation is performed by ratcheting the current key forward
-// using an HKDF invocation with the cipherState's salt as the salt, and the
-// current key as the input.
-func (c *cipherState) rotateKey() {
- var (
- info []byte
- nextKey [32]byte
- )
-
- oldKey := c.secretKey
- h := hkdf.New(sha256.New, oldKey[:], c.salt[:], info)
-
- // hkdf(ck, k, zero)
- // |
- // | \
- // | \
- // ck k'
- h.Read(c.salt[:])
- h.Read(nextKey[:])
-
- c.InitializeKey(nextKey)
-}
-
-// symmetricState encapsulates a cipherState object and houses the ephemeral
-// handshake digest state. This struct is used during the handshake to derive
-// new shared secrets based off of the result of ECDH operations. Ultimately,
-// the final key yielded by this struct is the result of an incremental
-// Triple-DH operation.
-type symmetricState struct {
- cipherState
-
- // chainingKey is used as the salt to the HKDF function to derive a new
- // chaining key as well as a new tempKey which is used for
- // encryption/decryption.
- chainingKey [32]byte
-
- // tempKey is the latter 32 bytes resulted from the latest HKDF
- // iteration. This key is used to encrypt/decrypt any handshake
- // messages or payloads sent until the next DH operation is executed.
- tempKey [32]byte
-
- // handshakeDigest is the cumulative hash digest of all handshake
- // messages sent from start to finish. This value is never transmitted
- // to the other side, but will be used as the AD when
- // encrypting/decrypting messages using our AEAD construction.
- handshakeDigest [32]byte
-}
-
-// mixKey implements a basic HKDF-based key ratchet. This method is called
-// with the result of each DH output generated during the handshake process.
-// The first 32 bytes extract from the HKDF reader is the next chaining key,
-// then latter 32 bytes become the temp secret key using within any future AEAD
-// operations until another DH operation is performed.
-func (s *symmetricState) mixKey(input []byte) {
- var info []byte
-
- secret := input
- salt := s.chainingKey
- h := hkdf.New(sha256.New, secret, salt[:], info)
-
- // hkdf(ck, input, zero)
- // |
- // | \
- // | \
- // ck k
- h.Read(s.chainingKey[:])
- h.Read(s.tempKey[:])
-
- // cipher.k = temp_key
- s.InitializeKey(s.tempKey)
-}
-
-// mixHash hashes the passed input data into the cumulative handshake digest.
-// The running result of this value (h) is used as the associated data in all
-// decryption/encryption operations.
-func (s *symmetricState) mixHash(data []byte) {
- h := sha256.New()
- h.Write(s.handshakeDigest[:])
- h.Write(data)
-
- copy(s.handshakeDigest[:], h.Sum(nil))
-}
-
-// EncryptAndHash returns the authenticated encryption of the passed plaintext.
-// When encrypting the handshake digest (h) is used as the associated data to
-// the AEAD cipher.
-func (s *symmetricState) EncryptAndHash(plaintext []byte) []byte {
- ciphertext := s.Encrypt(s.handshakeDigest[:], nil, plaintext)
-
- s.mixHash(ciphertext)
-
- return ciphertext
-}
-
-// DecryptAndHash returns the authenticated decryption of the passed
-// ciphertext. When encrypting the handshake digest (h) is used as the
-// associated data to the AEAD cipher.
-func (s *symmetricState) DecryptAndHash(ciphertext []byte) ([]byte, er.R) {
- plaintext, err := s.Decrypt(s.handshakeDigest[:], nil, ciphertext)
- if err != nil {
- return nil, err
- }
-
- s.mixHash(ciphertext)
-
- return plaintext, nil
-}
-
-// InitializeSymmetric initializes the symmetric state by setting the handshake
-// digest (h) and the chaining key (ck) to protocol name.
-func (s *symmetricState) InitializeSymmetric(protocolName []byte) {
- var empty [32]byte
-
- s.handshakeDigest = sha256.Sum256(protocolName)
- s.chainingKey = s.handshakeDigest
- s.InitializeKey(empty)
-}
-
-// handshakeState encapsulates the symmetricState and keeps track of all the
-// public keys (static and ephemeral) for both sides during the handshake
-// transcript. If the handshake completes successfully, then two instances of a
-// cipherState are emitted: one to encrypt messages from initiator to
-// responder, and the other for the opposite direction.
-type handshakeState struct {
- symmetricState
-
- initiator bool
-
- localStatic keychain.SingleKeyECDH
- localEphemeral keychain.SingleKeyECDH // nolint (false positive)
-
- remoteStatic *btcec.PublicKey
- remoteEphemeral *btcec.PublicKey
-}
-
-// newHandshakeState returns a new instance of the handshake state initialized
-// with the prologue and protocol name. If this is the responder's handshake
-// state, then the remotePub can be nil.
-func newHandshakeState(initiator bool, prologue []byte,
- localKey keychain.SingleKeyECDH,
- remotePub *btcec.PublicKey) handshakeState {
-
- h := handshakeState{
- initiator: initiator,
- localStatic: localKey,
- remoteStatic: remotePub,
- }
-
- // Set the current chaining key and handshake digest to the hash of the
- // protocol name, and additionally mix in the prologue. If either sides
- // disagree about the prologue or protocol name, then the handshake
- // will fail.
- h.InitializeSymmetric([]byte(protocolName))
- h.mixHash(prologue)
-
- // In Noise_XK, the initiator should know the responder's static
- // public key, therefore we include the responder's static key in the
- // handshake digest. If the initiator gets this value wrong, then the
- // handshake will fail.
- if initiator {
- h.mixHash(remotePub.SerializeCompressed())
- } else {
- h.mixHash(localKey.PubKey().SerializeCompressed())
- }
-
- return h
-}
-
-// EphemeralGenerator is a functional option that allows callers to substitute
-// a custom function for use when generating ephemeral keys for ActOne or
-// ActTwo. The function closure returned by this function can be passed into
-// NewBrontideMachine as a function option parameter.
-func EphemeralGenerator(gen func() (*btcec.PrivateKey, er.R)) func(*Machine) {
- return func(m *Machine) {
- m.ephemeralGen = gen
- }
-}
-
-// Machine is a state-machine which implements Brontide: an
-// Authenticated-key Exchange in Three Acts. Brontide is derived from the Noise
-// framework, specifically implementing the Noise_XK handshake. Once the
-// initial 3-act handshake has completed all messages are encrypted with a
-// chacha20 AEAD cipher. On the wire, all messages are prefixed with an
-// authenticated+encrypted length field. Additionally, the encrypted+auth'd
-// length prefix is used as the AD when encrypting+decryption messages. This
-// construction provides confidentiality of packet length, avoids introducing
-// a padding-oracle, and binds the encrypted packet length to the packet
-// itself.
-//
-// The acts proceeds the following order (initiator on the left):
-// GenActOne() ->
-// RecvActOne()
-// <- GenActTwo()
-// RecvActTwo()
-// GenActThree() ->
-// RecvActThree()
-//
-// This exchange corresponds to the following Noise handshake:
-// <- s
-// ...
-// -> e, es
-// <- e, ee
-// -> s, se
-type Machine struct {
- sendCipher cipherState
- recvCipher cipherState
-
- ephemeralGen func() (*btcec.PrivateKey, er.R)
-
- handshakeState
-
- // nextCipherHeader is a static buffer that we'll use to read in the
- // next ciphertext header from the wire. The header is a 2 byte length
- // (of the next ciphertext), followed by a 16 byte MAC.
- nextCipherHeader [encHeaderSize]byte
-
- // nextHeaderSend holds a reference to the remaining header bytes to
- // write out for a pending message. This allows us to tolerate timeout
- // errors that cause partial writes.
- nextHeaderSend []byte
-
- // nextHeaderBody holds a reference to the remaining body bytes to write
- // out for a pending message. This allows us to tolerate timeout errors
- // that cause partial writes.
- nextBodySend []byte
-}
-
-// NewBrontideMachine creates a new instance of the brontide state-machine. If
-// the responder (listener) is creating the object, then the remotePub should
-// be nil. The handshake state within brontide is initialized using the ascii
-// string "lightning" as the prologue. The last parameter is a set of variadic
-// arguments for adding additional options to the brontide Machine
-// initialization.
-func NewBrontideMachine(initiator bool, localKey keychain.SingleKeyECDH,
- remotePub *btcec.PublicKey, options ...func(*Machine)) *Machine {
-
- handshake := newHandshakeState(
- initiator, lightningPrologue, localKey, remotePub,
- )
-
- m := &Machine{
- handshakeState: handshake,
- ephemeralGen: ephemeralGen,
- }
-
- // With the default options established, we'll now process all the
- // options passed in as parameters.
- for _, option := range options {
- option(m)
- }
-
- return m
-}
-
-const (
- // HandshakeVersion is the expected version of the brontide handshake.
- // Any messages that carry a different version will cause the handshake
- // to abort immediately.
- HandshakeVersion = byte(0)
-
- // ActOneSize is the size of the packet sent from initiator to
- // responder in ActOne. The packet consists of a handshake version, an
- // ephemeral key in compressed format, and a 16-byte poly1305 tag.
- //
- // 1 + 33 + 16
- ActOneSize = 50
-
- // ActTwoSize is the size the packet sent from responder to initiator
- // in ActTwo. The packet consists of a handshake version, an ephemeral
- // key in compressed format and a 16-byte poly1305 tag.
- //
- // 1 + 33 + 16
- ActTwoSize = 50
-
- // ActThreeSize is the size of the packet sent from initiator to
- // responder in ActThree. The packet consists of a handshake version,
- // the initiators static key encrypted with strong forward secrecy and
- // a 16-byte poly1035 tag.
- //
- // 1 + 33 + 16 + 16
- ActThreeSize = 66
-)
-
-// GenActOne generates the initial packet (act one) to be sent from initiator
-// to responder. During act one the initiator generates a fresh ephemeral key,
-// hashes it into the handshake digest, and performs an ECDH between this key
-// and the responder's static key. Future payloads are encrypted with a key
-// derived from this result.
-//
-// -> e, es
-func (b *Machine) GenActOne() ([ActOneSize]byte, er.R) {
- var actOne [ActOneSize]byte
-
- // e
- localEphemeral, err := b.ephemeralGen()
- if err != nil {
- return actOne, err
- }
- b.localEphemeral = &keychain.PrivKeyECDH{
- PrivKey: localEphemeral,
- }
-
- ephemeral := localEphemeral.PubKey().SerializeCompressed()
- b.mixHash(ephemeral)
-
- // es
- s, err := ecdh(b.remoteStatic, b.localEphemeral)
- if err != nil {
- return actOne, err
- }
- b.mixKey(s[:])
-
- authPayload := b.EncryptAndHash([]byte{})
-
- actOne[0] = HandshakeVersion
- copy(actOne[1:34], ephemeral)
- copy(actOne[34:], authPayload)
-
- return actOne, nil
-}
-
-// RecvActOne processes the act one packet sent by the initiator. The responder
-// executes the mirrored actions to that of the initiator extending the
-// handshake digest and deriving a new shared secret based on an ECDH with the
-// initiator's ephemeral key and responder's static key.
-func (b *Machine) RecvActOne(actOne [ActOneSize]byte) er.R {
- var (
- err er.R
- e [33]byte
- p [16]byte
- )
-
- // If the handshake version is unknown, then the handshake fails
- // immediately.
- if actOne[0] != HandshakeVersion {
- return er.Errorf("act one: invalid handshake version: %v, "+
- "only %v is valid, msg=%x", actOne[0], HandshakeVersion,
- actOne[:])
- }
-
- copy(e[:], actOne[1:34])
- copy(p[:], actOne[34:])
-
- // e
- b.remoteEphemeral, err = btcec.ParsePubKey(e[:], btcec.S256())
- if err != nil {
- return err
- }
- b.mixHash(b.remoteEphemeral.SerializeCompressed())
-
- // es
- s, err := ecdh(b.remoteEphemeral, b.localStatic)
- if err != nil {
- return err
- }
- b.mixKey(s)
-
- // If the initiator doesn't know our static key, then this operation
- // will fail.
- _, err = b.DecryptAndHash(p[:])
- return err
-}
-
-// GenActTwo generates the second packet (act two) to be sent from the
-// responder to the initiator. The packet for act two is identical to that of
-// act one, but then results in a different ECDH operation between the
-// initiator's and responder's ephemeral keys.
-//
-// <- e, ee
-func (b *Machine) GenActTwo() ([ActTwoSize]byte, er.R) {
- var actTwo [ActTwoSize]byte
-
- // e
- localEphemeral, err := b.ephemeralGen()
- if err != nil {
- return actTwo, err
- }
- b.localEphemeral = &keychain.PrivKeyECDH{
- PrivKey: localEphemeral,
- }
-
- ephemeral := localEphemeral.PubKey().SerializeCompressed()
- b.mixHash(localEphemeral.PubKey().SerializeCompressed())
-
- // ee
- s, err := ecdh(b.remoteEphemeral, b.localEphemeral)
- if err != nil {
- return actTwo, err
- }
- b.mixKey(s)
-
- authPayload := b.EncryptAndHash([]byte{})
-
- actTwo[0] = HandshakeVersion
- copy(actTwo[1:34], ephemeral)
- copy(actTwo[34:], authPayload)
-
- return actTwo, nil
-}
-
-// RecvActTwo processes the second packet (act two) sent from the responder to
-// the initiator. A successful processing of this packet authenticates the
-// initiator to the responder.
-func (b *Machine) RecvActTwo(actTwo [ActTwoSize]byte) er.R {
- var (
- err er.R
- e [33]byte
- p [16]byte
- )
-
- // If the handshake version is unknown, then the handshake fails
- // immediately.
- if actTwo[0] != HandshakeVersion {
- return er.Errorf("act two: invalid handshake version: %v, "+
- "only %v is valid, msg=%x", actTwo[0], HandshakeVersion,
- actTwo[:])
- }
-
- copy(e[:], actTwo[1:34])
- copy(p[:], actTwo[34:])
-
- // e
- b.remoteEphemeral, err = btcec.ParsePubKey(e[:], btcec.S256())
- if err != nil {
- return err
- }
- b.mixHash(b.remoteEphemeral.SerializeCompressed())
-
- // ee
- s, err := ecdh(b.remoteEphemeral, b.localEphemeral)
- if err != nil {
- return err
- }
- b.mixKey(s)
-
- _, err = b.DecryptAndHash(p[:])
- return err
-}
-
-// GenActThree creates the final (act three) packet of the handshake. Act three
-// is to be sent from the initiator to the responder. The purpose of act three
-// is to transmit the initiator's public key under strong forward secrecy to
-// the responder. This act also includes the final ECDH operation which yields
-// the final session.
-//
-// -> s, se
-func (b *Machine) GenActThree() ([ActThreeSize]byte, er.R) {
- var actThree [ActThreeSize]byte
-
- ourPubkey := b.localStatic.PubKey().SerializeCompressed()
- ciphertext := b.EncryptAndHash(ourPubkey)
-
- s, err := ecdh(b.remoteEphemeral, b.localStatic)
- if err != nil {
- return actThree, err
- }
- b.mixKey(s)
-
- authPayload := b.EncryptAndHash([]byte{})
-
- actThree[0] = HandshakeVersion
- copy(actThree[1:50], ciphertext)
- copy(actThree[50:], authPayload)
-
- // With the final ECDH operation complete, derive the session sending
- // and receiving keys.
- b.split()
-
- return actThree, nil
-}
-
-// RecvActThree processes the final act (act three) sent from the initiator to
-// the responder. After processing this act, the responder learns of the
-// initiator's static public key. Decryption of the static key serves to
-// authenticate the initiator to the responder.
-func (b *Machine) RecvActThree(actThree [ActThreeSize]byte) er.R {
- var (
- err er.R
- s [33 + 16]byte
- p [16]byte
- )
-
- // If the handshake version is unknown, then the handshake fails
- // immediately.
- if actThree[0] != HandshakeVersion {
- return er.Errorf("act three: invalid handshake version: %v, "+
- "only %v is valid, msg=%x", actThree[0], HandshakeVersion,
- actThree[:])
- }
-
- copy(s[:], actThree[1:33+16+1])
- copy(p[:], actThree[33+16+1:])
-
- // s
- remotePub, err := b.DecryptAndHash(s[:])
- if err != nil {
- return err
- }
- b.remoteStatic, err = btcec.ParsePubKey(remotePub, btcec.S256())
- if err != nil {
- return err
- }
-
- // se
- se, err := ecdh(b.remoteStatic, b.localEphemeral)
- if err != nil {
- return err
- }
- b.mixKey(se)
-
- if _, err := b.DecryptAndHash(p[:]); err != nil {
- return err
- }
-
- // With the final ECDH operation complete, derive the session sending
- // and receiving keys.
- b.split()
-
- return nil
-}
-
-// split is the final wrap-up act to be executed at the end of a successful
-// three act handshake. This function creates two internal cipherState
-// instances: one which is used to encrypt messages from the initiator to the
-// responder, and another which is used to encrypt message for the opposite
-// direction.
-func (b *Machine) split() {
- var (
- empty []byte
- sendKey [32]byte
- recvKey [32]byte
- )
-
- h := hkdf.New(sha256.New, empty, b.chainingKey[:], empty)
-
- // If we're the initiator the first 32 bytes are used to encrypt our
- // messages and the second 32-bytes to decrypt their messages. For the
- // responder the opposite is true.
- if b.initiator {
- h.Read(sendKey[:])
- b.sendCipher = cipherState{}
- b.sendCipher.InitializeKeyWithSalt(b.chainingKey, sendKey)
-
- h.Read(recvKey[:])
- b.recvCipher = cipherState{}
- b.recvCipher.InitializeKeyWithSalt(b.chainingKey, recvKey)
- } else {
- h.Read(recvKey[:])
- b.recvCipher = cipherState{}
- b.recvCipher.InitializeKeyWithSalt(b.chainingKey, recvKey)
-
- h.Read(sendKey[:])
- b.sendCipher = cipherState{}
- b.sendCipher.InitializeKeyWithSalt(b.chainingKey, sendKey)
- }
-}
-
-// WriteMessage encrypts and buffers the next message p. The ciphertext of the
-// message is prepended with an encrypt+auth'd length which must be used as the
-// AD to the AEAD construction when being decrypted by the other side.
-//
-// NOTE: This DOES NOT write the message to the wire, it should be followed by a
-// call to Flush to ensure the message is written.
-func (b *Machine) WriteMessage(p []byte) er.R {
- // The total length of each message payload including the MAC size
- // payload exceed the largest number encodable within a 16-bit unsigned
- // integer.
- if len(p) > math.MaxUint16 {
- return ErrMaxMessageLengthExceeded.Default()
- }
-
- // If a prior message was written but it hasn't been fully flushed,
- // return an error as we only support buffering of one message at a
- // time.
- if len(b.nextHeaderSend) > 0 || len(b.nextBodySend) > 0 {
- return ErrMessageNotFlushed.Default()
- }
-
- // The full length of the packet is only the packet length, and does
- // NOT include the MAC.
- fullLength := uint16(len(p))
-
- var pktLen [2]byte
- binary.BigEndian.PutUint16(pktLen[:], fullLength)
-
- // First, generate the encrypted+MAC'd length prefix for the packet.
- b.nextHeaderSend = b.sendCipher.Encrypt(nil, nil, pktLen[:])
-
- // Finally, generate the encrypted packet itself.
- b.nextBodySend = b.sendCipher.Encrypt(nil, nil, p)
-
- return nil
-}
-
-// Flush attempts to write a message buffered using WriteMessage to the provided
-// io.Writer. If no buffered message exists, this will result in a NOP.
-// Otherwise, it will continue to write the remaining bytes, picking up where
-// the byte stream left off in the event of a partial write. The number of bytes
-// returned reflects the number of plaintext bytes in the payload, and does not
-// account for the overhead of the header or MACs.
-//
-// NOTE: It is safe to call this method again iff a timeout error is returned.
-func (b *Machine) Flush(w io.Writer) (int, er.R) {
- // First, write out the pending header bytes, if any exist. Any header
- // bytes written will not count towards the total amount flushed.
- if len(b.nextHeaderSend) > 0 {
- // Write any remaining header bytes and shift the slice to point
- // to the next segment of unwritten bytes. If an error is
- // encountered, we can continue to write the header from where
- // we left off on a subsequent call to Flush.
- n, err := util.Write(w, b.nextHeaderSend)
- b.nextHeaderSend = b.nextHeaderSend[n:]
- if err != nil {
- return 0, err
- }
- }
-
- // Next, write the pending body bytes, if any exist. Only the number of
- // bytes written that correspond to the ciphertext will be included in
- // the total bytes written, bytes written as part of the MAC will not be
- // counted.
- var nn int
- if len(b.nextBodySend) > 0 {
- // Write out all bytes excluding the mac and shift the body
- // slice depending on the number of actual bytes written.
- n, err := util.Write(w, b.nextBodySend)
- b.nextBodySend = b.nextBodySend[n:]
-
- // If we partially or fully wrote any of the body's MAC, we'll
- // subtract that contribution from the total amount flushed to
- // preserve the abstraction of returning the number of plaintext
- // bytes written by the connection.
- //
- // There are three possible scenarios we must handle to ensure
- // the returned value is correct. In the first case, the write
- // straddles both payload and MAC bytes, and we must subtract
- // the number of MAC bytes written from n. In the second, only
- // payload bytes are written, thus we can return n unmodified.
- // The final scenario pertains to the case where only MAC bytes
- // are written, none of which count towards the total.
- //
- // |-----------Payload------------|----MAC----|
- // Straddle: S---------------------------------E--------0
- // Payload-only: S------------------------E-----------------0
- // MAC-only: S-------E-0
- start, end := n+len(b.nextBodySend), len(b.nextBodySend)
- switch {
-
- // Straddles payload and MAC bytes, subtract number of MAC bytes
- // written from the actual number written.
- case start > macSize && end <= macSize:
- nn = n - (macSize - end)
-
- // Only payload bytes are written, return n directly.
- case start > macSize && end > macSize:
- nn = n
-
- // Only MAC bytes are written, return 0 bytes written.
- default:
- }
-
- if err != nil {
- return nn, err
- }
- }
-
- return nn, nil
-}
-
-// ReadMessage attempts to read the next message from the passed io.Reader. In
-// the case of an authentication error, a non-nil error is returned.
-func (b *Machine) ReadMessage(r io.Reader) ([]byte, er.R) {
- pktLen, err := b.ReadHeader(r)
- if err != nil {
- return nil, err
- }
-
- buf := make([]byte, pktLen)
- return b.ReadBody(r, buf)
-}
-
-// ReadHeader attempts to read the next message header from the passed
-// io.Reader. The header contains the length of the next body including
-// additional overhead of the MAC. In the case of an authentication error, a
-// non-nil error is returned.
-//
-// NOTE: This method SHOULD NOT be used in the case that the io.Reader may be
-// adversarial and induce long delays. If the caller needs to set read deadlines
-// appropriately, it is preferred that they use the split ReadHeader and
-// ReadBody methods so that the deadlines can be set appropriately on each.
-func (b *Machine) ReadHeader(r io.Reader) (uint32, er.R) {
- _, err := util.ReadFull(r, b.nextCipherHeader[:])
- if err != nil {
- return 0, err
- }
-
- // Attempt to decrypt+auth the packet length present in the stream.
- pktLenBytes, err := b.recvCipher.Decrypt(
- nil, nil, b.nextCipherHeader[:],
- )
- if err != nil {
- return 0, err
- }
-
- // Compute the packet length that we will need to read off the wire.
- pktLen := uint32(binary.BigEndian.Uint16(pktLenBytes)) + macSize
-
- return pktLen, nil
-}
-
-// ReadBody attempts to ready the next message body from the passed io.Reader.
-// The provided buffer MUST be the length indicated by the packet length
-// returned by the preceding call to ReadHeader. In the case of an
-// authentication eerror, a non-nil error is returned.
-func (b *Machine) ReadBody(r io.Reader, buf []byte) ([]byte, er.R) {
- // Next, using the length read from the packet header, read the
- // encrypted packet itself into the buffer allocated by the read
- // pool.
- _, err := util.ReadFull(r, buf)
- if err != nil {
- return nil, err
- }
-
- // Finally, decrypt the message held in the buffer, and return a
- // new byte slice containing the plaintext.
- // TODO(roasbeef): modify to let pass in slice
- return b.recvCipher.Decrypt(nil, nil, buf)
-}
-
-// SetCurveToNil sets the 'Curve' parameter to nil on the handshakeState keys.
-// This allows us to log the Machine object without spammy log messages.
-func (b *Machine) SetCurveToNil() {
- if b.localStatic != nil {
- b.localStatic.PubKey().Curve = nil
- }
-
- if b.localEphemeral != nil {
- b.localEphemeral.PubKey().Curve = nil
- }
-
- if b.remoteStatic != nil {
- b.remoteStatic.Curve = nil
- }
-
- if b.remoteEphemeral != nil {
- b.remoteEphemeral.Curve = nil
- }
-}
diff --git a/lnd/brontide/noise_test.go b/lnd/brontide/noise_test.go
deleted file mode 100644
index 0598698e..00000000
--- a/lnd/brontide/noise_test.go
+++ /dev/null
@@ -1,740 +0,0 @@
-package brontide
-
-import (
- "bytes"
- "fmt"
- "io"
- "math"
- "net"
- "testing"
- "testing/iotest"
- "time"
-
- "github.com/pkt-cash/pktd/btcec"
- "github.com/pkt-cash/pktd/btcutil/er"
- "github.com/pkt-cash/pktd/btcutil/util"
- "github.com/pkt-cash/pktd/lnd/keychain"
- "github.com/pkt-cash/pktd/lnd/lnwire"
- "github.com/pkt-cash/pktd/lnd/tor"
-)
-
-type maybeNetConn struct {
- conn net.Conn
- err er.R
-}
-
-func makeListener() (*Listener, *lnwire.NetAddress, er.R) {
- // First, generate the long-term private keys for the brontide listener.
- localPriv, err := btcec.NewPrivateKey(btcec.S256())
- if err != nil {
- return nil, nil, err
- }
- localKeyECDH := &keychain.PrivKeyECDH{PrivKey: localPriv}
-
- // Having a port of ":0" means a random port, and interface will be
- // chosen for our listener.
- addr := "localhost:0"
-
- // Our listener will be local, and the connection remote.
- listener, errr := NewListener(localKeyECDH, addr)
- if errr != nil {
- return nil, nil, errr
- }
-
- netAddr := &lnwire.NetAddress{
- IdentityKey: localPriv.PubKey(),
- Address: listener.Addr().(*net.TCPAddr),
- }
-
- return listener, netAddr, nil
-}
-
-func dialTimeout(network, address string, timeout time.Duration) (net.Conn, er.R) {
- c, e := net.DialTimeout(network, address, timeout)
- return c, er.E(e)
-}
-
-func establishTestConnection() (net.Conn, net.Conn, func(), er.R) {
- listener, netAddr, err := makeListener()
- if err != nil {
- return nil, nil, nil, err
- }
- defer listener.Close()
-
- // Nos, generate the long-term private keys remote end of the connection
- // within our test.
- remotePriv, err := btcec.NewPrivateKey(btcec.S256())
- if err != nil {
- return nil, nil, nil, err
- }
- remoteKeyECDH := &keychain.PrivKeyECDH{PrivKey: remotePriv}
-
- // Initiate a connection with a separate goroutine, and listen with our
- // main one. If both errors are nil, then encryption+auth was
- // successful.
- remoteConnChan := make(chan maybeNetConn, 1)
- go func() {
- remoteConn, err := Dial(
- remoteKeyECDH, netAddr,
- tor.DefaultConnTimeout, dialTimeout,
- )
- remoteConnChan <- maybeNetConn{remoteConn, err}
- }()
-
- localConnChan := make(chan maybeNetConn, 1)
- go func() {
- localConn, err := listener.Accept()
- localConnChan <- maybeNetConn{localConn, er.E(err)}
- }()
-
- remote := <-remoteConnChan
- if remote.err != nil {
- return nil, nil, nil, err
- }
-
- local := <-localConnChan
- if local.err != nil {
- return nil, nil, nil, err
- }
-
- cleanUp := func() {
- local.conn.Close()
- remote.conn.Close()
- }
-
- return local.conn, remote.conn, cleanUp, nil
-}
-
-func TestConnectionCorrectness(t *testing.T) {
- // Create a test connection, grabbing either side of the connection
- // into local variables. If the initial crypto handshake fails, then
- // we'll get a non-nil error here.
- localConn, remoteConn, cleanUp, err := establishTestConnection()
- if err != nil {
- t.Fatalf("unable to establish test connection: %v", err)
- }
- defer cleanUp()
-
- // Test out some message full-message reads.
- for i := 0; i < 10; i++ {
- msg := []byte(fmt.Sprintf("hello%d", i))
-
- if _, err := localConn.Write(msg); err != nil {
- t.Fatalf("remote conn failed to write: %v", err)
- }
-
- readBuf := make([]byte, len(msg))
- if _, err := remoteConn.Read(readBuf); err != nil {
- t.Fatalf("local conn failed to read: %v", err)
- }
-
- if !bytes.Equal(readBuf, msg) {
- t.Fatalf("messages don't match, %v vs %v",
- string(readBuf), string(msg))
- }
- }
-
- // Now try incremental message reads. This simulates first writing a
- // message header, then a message body.
- outMsg := []byte("hello world")
- if _, err := localConn.Write(outMsg); err != nil {
- t.Fatalf("remote conn failed to write: %v", err)
- }
-
- readBuf := make([]byte, len(outMsg))
- if _, err := remoteConn.Read(readBuf[:len(outMsg)/2]); err != nil {
- t.Fatalf("local conn failed to read: %v", err)
- }
- if _, err := remoteConn.Read(readBuf[len(outMsg)/2:]); err != nil {
- t.Fatalf("local conn failed to read: %v", err)
- }
-
- if !bytes.Equal(outMsg, readBuf) {
- t.Fatalf("messages don't match, %v vs %v",
- string(readBuf), string(outMsg))
- }
-}
-
-// TestConecurrentHandshakes verifies the listener's ability to not be blocked
-// by other pending handshakes. This is tested by opening multiple tcp
-// connections with the listener, without completing any of the brontide acts.
-// The test passes if real brontide dialer connects while the others are
-// stalled.
-func TestConcurrentHandshakes(t *testing.T) {
- listener, netAddr, err := makeListener()
- if err != nil {
- t.Fatalf("unable to create listener connection: %v", err)
- }
- defer listener.Close()
-
- const nblocking = 5
-
- // Open a handful of tcp connections, that do not complete any steps of
- // the brontide handshake.
- connChan := make(chan maybeNetConn)
- for i := 0; i < nblocking; i++ {
- go func() {
- conn, err := net.Dial("tcp", listener.Addr().String())
- connChan <- maybeNetConn{conn, er.E(err)}
- }()
- }
-
- // Receive all connections/errors from our blocking tcp dials. We make a
- // pass to gather all connections and errors to make sure we defer the
- // calls to Close() on all successful connections.
- tcpErrs := make([]error, 0, nblocking)
- for i := 0; i < nblocking; i++ {
- result := <-connChan
- if result.conn != nil {
- defer result.conn.Close()
- }
- if result.err != nil {
- tcpErrs = append(tcpErrs, er.Native(result.err))
- }
- }
- for _, tcpErr := range tcpErrs {
- if tcpErr != nil {
- t.Fatalf("unable to tcp dial listener: %v", tcpErr)
- }
- }
-
- // Now, construct a new private key and use the brontide dialer to
- // connect to the listener.
- remotePriv, err := btcec.NewPrivateKey(btcec.S256())
- if err != nil {
- t.Fatalf("unable to generate private key: %v", err)
- }
- remoteKeyECDH := &keychain.PrivKeyECDH{PrivKey: remotePriv}
-
- go func() {
- remoteConn, err := Dial(
- remoteKeyECDH, netAddr,
- tor.DefaultConnTimeout, dialTimeout,
- )
- connChan <- maybeNetConn{remoteConn, err}
- }()
-
- // This connection should be accepted without error, as the brontide
- // connection should bypass stalled tcp connections.
- conn, errr := listener.Accept()
- if errr != nil {
- t.Fatalf("unable to accept dial: %v", errr)
- }
- defer conn.Close()
-
- result := <-connChan
- if result.err != nil {
- t.Fatalf("unable to dial %v: %v", netAddr, result.err)
- }
- result.conn.Close()
-}
-
-func TestMaxPayloadLength(t *testing.T) {
- t.Parallel()
-
- b := Machine{}
- b.split()
-
- // Create a payload that's only *slightly* above the maximum allotted
- // payload length.
- payloadToReject := make([]byte, math.MaxUint16+1)
-
- // A write of the payload generated above to the state machine should
- // be rejected as it's over the max payload length.
- err := b.WriteMessage(payloadToReject)
- if !ErrMaxMessageLengthExceeded.Is(err) {
- t.Fatalf("payload is over the max allowed length, the write " +
- "should have been rejected")
- }
-
- // Generate another payload which should be accepted as a valid
- // payload.
- payloadToAccept := make([]byte, math.MaxUint16-1)
- if err := b.WriteMessage(payloadToAccept); err != nil {
- t.Fatalf("write for payload was rejected, should have been " +
- "accepted")
- }
-
- // Generate a final payload which is only *slightly* above the max payload length
- // when the MAC is accounted for.
- payloadToReject = make([]byte, math.MaxUint16+1)
-
- // This payload should be rejected.
- err = b.WriteMessage(payloadToReject)
- if !ErrMaxMessageLengthExceeded.Is(err) {
- t.Fatalf("payload is over the max allowed length, the write " +
- "should have been rejected")
- }
-}
-
-func TestWriteMessageChunking(t *testing.T) {
- // Create a test connection, grabbing either side of the connection
- // into local variables. If the initial crypto handshake fails, then
- // we'll get a non-nil error here.
- localConn, remoteConn, cleanUp, err := establishTestConnection()
- if err != nil {
- t.Fatalf("unable to establish test connection: %v", err)
- }
- defer cleanUp()
-
- // Attempt to write a message which is over 3x the max allowed payload
- // size.
- largeMessage := bytes.Repeat([]byte("kek"), math.MaxUint16*3)
-
- // Launch a new goroutine to write the large message generated above in
- // chunks. We spawn a new goroutine because otherwise, we may block as
- // the kernel waits for the buffer to flush.
- errCh := make(chan er.R)
- go func() {
- defer close(errCh)
-
- bytesWritten, err := localConn.Write(largeMessage)
- if err != nil {
- errCh <- er.Errorf("unable to write message: %v", err)
- return
- }
-
- // The entire message should have been written out to the remote
- // connection.
- if bytesWritten != len(largeMessage) {
- errCh <- er.Errorf("bytes not fully written")
- return
- }
- }()
-
- // Attempt to read the entirety of the message generated above.
- buf := make([]byte, len(largeMessage))
- if _, err := util.ReadFull(remoteConn, buf); err != nil {
- t.Fatalf("unable to read message: %v", err)
- }
-
- err = <-errCh
- if err != nil {
- t.Fatal(err)
- }
-
- // Finally, the message the remote end of the connection received
- // should be identical to what we sent from the local connection.
- if !bytes.Equal(buf, largeMessage) {
- t.Fatalf("bytes don't match")
- }
-}
-
-// TestBolt0008TestVectors ensures that our implementation of brontide exactly
-// matches the test vectors within the specification.
-func TestBolt0008TestVectors(t *testing.T) {
- t.Parallel()
-
- // First, we'll generate the state of the initiator from the test
- // vectors at the appendix of BOLT-0008
- initiatorKeyBytes, err := util.DecodeHex("1111111111111111111111" +
- "111111111111111111111111111111111111111111")
- if err != nil {
- t.Fatalf("unable to decode hex: %v", err)
- }
- initiatorPriv, _ := btcec.PrivKeyFromBytes(
- btcec.S256(), initiatorKeyBytes,
- )
- initiatorKeyECDH := &keychain.PrivKeyECDH{PrivKey: initiatorPriv}
-
- // We'll then do the same for the responder.
- responderKeyBytes, err := util.DecodeHex("212121212121212121212121" +
- "2121212121212121212121212121212121212121")
- if err != nil {
- t.Fatalf("unable to decode hex: %v", err)
- }
- responderPriv, responderPub := btcec.PrivKeyFromBytes(
- btcec.S256(), responderKeyBytes,
- )
- responderKeyECDH := &keychain.PrivKeyECDH{PrivKey: responderPriv}
-
- // With the initiator's key data parsed, we'll now define a custom
- // EphemeralGenerator function for the state machine to ensure that the
- // initiator and responder both generate the ephemeral public key
- // defined within the test vectors.
- initiatorEphemeral := EphemeralGenerator(func() (*btcec.PrivateKey, er.R) {
- e := "121212121212121212121212121212121212121212121212121212" +
- "1212121212"
- eBytes, err := util.DecodeHex(e)
- if err != nil {
- return nil, err
- }
-
- priv, _ := btcec.PrivKeyFromBytes(btcec.S256(), eBytes)
- return priv, nil
- })
- responderEphemeral := EphemeralGenerator(func() (*btcec.PrivateKey, er.R) {
- e := "222222222222222222222222222222222222222222222222222" +
- "2222222222222"
- eBytes, err := util.DecodeHex(e)
- if err != nil {
- return nil, err
- }
-
- priv, _ := btcec.PrivKeyFromBytes(btcec.S256(), eBytes)
- return priv, nil
- })
-
- // Finally, we'll create both brontide state machines, so we can begin
- // our test.
- initiator := NewBrontideMachine(
- true, initiatorKeyECDH, responderPub, initiatorEphemeral,
- )
- responder := NewBrontideMachine(
- false, responderKeyECDH, nil, responderEphemeral,
- )
-
- // We'll start with the initiator generating the initial payload for
- // act one. This should consist of exactly 50 bytes. We'll assert that
- // the payload return is _exactly_ the same as what's specified within
- // the test vectors.
- actOne, err := initiator.GenActOne()
- if err != nil {
- t.Fatalf("unable to generate act one: %v", err)
- }
- expectedActOne, err := util.DecodeHex("00036360e856310ce5d294e" +
- "8be33fc807077dc56ac80d95d9cd4ddbd21325eff73f70df608655115" +
- "1f58b8afe6c195782c6a")
- if err != nil {
- t.Fatalf("unable to parse expected act one: %v", err)
- }
- if !bytes.Equal(expectedActOne, actOne[:]) {
- t.Fatalf("act one mismatch: expected %x, got %x",
- expectedActOne, actOne)
- }
-
- // With the assertion above passed, we'll now process the act one
- // payload with the responder of the crypto handshake.
- if err := responder.RecvActOne(actOne); err != nil {
- t.Fatalf("responder unable to process act one: %v", err)
- }
-
- // Next, we'll start the second act by having the responder generate
- // its contribution to the crypto handshake. We'll also verify that we
- // produce the _exact_ same byte stream as advertised within the spec's
- // test vectors.
- actTwo, err := responder.GenActTwo()
- if err != nil {
- t.Fatalf("unable to generate act two: %v", err)
- }
- expectedActTwo, err := util.DecodeHex("0002466d7fcae563e5cb09a0" +
- "d1870bb580344804617879a14949cf22285f1bae3f276e2470b93aac58" +
- "3c9ef6eafca3f730ae")
- if err != nil {
- t.Fatalf("unable to parse expected act two: %v", err)
- }
- if !bytes.Equal(expectedActTwo, actTwo[:]) {
- t.Fatalf("act two mismatch: expected %x, got %x",
- expectedActTwo, actTwo)
- }
-
- // Moving the handshake along, we'll also ensure that the initiator
- // accepts the act two payload.
- if err := initiator.RecvActTwo(actTwo); err != nil {
- t.Fatalf("initiator unable to process act two: %v", err)
- }
-
- // At the final step, we'll generate the last act from the initiator
- // and once again verify that it properly matches the test vectors.
- actThree, err := initiator.GenActThree()
- if err != nil {
- t.Fatalf("unable to generate act three: %v", err)
- }
- expectedActThree, err := util.DecodeHex("00b9e3a702e93e3a9948c2e" +
- "d6e5fd7590a6e1c3a0344cfc9d5b57357049aa22355361aa02e55a8f" +
- "c28fef5bd6d71ad0c38228dc68b1c466263b47fdf31e560e139ba")
- if err != nil {
- t.Fatalf("unable to parse expected act three: %v", err)
- }
- if !bytes.Equal(expectedActThree, actThree[:]) {
- t.Fatalf("act three mismatch: expected %x, got %x",
- expectedActThree, actThree)
- }
-
- // Finally, we'll ensure that the responder itself also properly parses
- // the last payload in the crypto handshake.
- if err := responder.RecvActThree(actThree); err != nil {
- t.Fatalf("responder unable to process act three: %v", err)
- }
-
- // As a final assertion, we'll ensure that both sides have derived the
- // proper symmetric encryption keys.
- sendingKey, err := util.DecodeHex("969ab31b4d288cedf6218839b27a3e2" +
- "140827047f2c0f01bf5c04435d43511a9")
- if err != nil {
- t.Fatalf("unable to parse sending key: %v", err)
- }
- recvKey, err := util.DecodeHex("bb9020b8965f4df047e07f955f3c4b884" +
- "18984aadc5cdb35096b9ea8fa5c3442")
- if err != nil {
- t.Fatalf("unable to parse receiving key: %v", err)
- }
-
- chainKey, err := util.DecodeHex("919219dbb2920afa8db80f9a51787a840" +
- "bcf111ed8d588caf9ab4be716e42b01")
- if err != nil {
- t.Fatalf("unable to parse chaining key: %v", err)
- }
-
- if !bytes.Equal(initiator.sendCipher.secretKey[:], sendingKey) {
- t.Fatalf("sending key mismatch: expected %x, got %x",
- initiator.sendCipher.secretKey[:], sendingKey)
- }
- if !bytes.Equal(initiator.recvCipher.secretKey[:], recvKey) {
- t.Fatalf("receiving key mismatch: expected %x, got %x",
- initiator.recvCipher.secretKey[:], recvKey)
- }
- if !bytes.Equal(initiator.chainingKey[:], chainKey) {
- t.Fatalf("chaining key mismatch: expected %x, got %x",
- initiator.chainingKey[:], chainKey)
- }
-
- if !bytes.Equal(responder.sendCipher.secretKey[:], recvKey) {
- t.Fatalf("sending key mismatch: expected %x, got %x",
- responder.sendCipher.secretKey[:], recvKey)
- }
- if !bytes.Equal(responder.recvCipher.secretKey[:], sendingKey) {
- t.Fatalf("receiving key mismatch: expected %x, got %x",
- responder.recvCipher.secretKey[:], sendingKey)
- }
- if !bytes.Equal(responder.chainingKey[:], chainKey) {
- t.Fatalf("chaining key mismatch: expected %x, got %x",
- responder.chainingKey[:], chainKey)
- }
-
- // Now test as per section "transport-message test" in Test Vectors
- // (the transportMessageVectors ciphertexts are from this section of BOLT 8);
- // we do slightly greater than 1000 encryption/decryption operations
- // to ensure that the key rotation algorithm is operating as expected.
- // The starting point for enc/decr is already guaranteed correct from the
- // above tests of sendingKey, receivingKey, chainingKey.
- transportMessageVectors := map[int]string{
- 0: "cf2b30ddf0cf3f80e7c35a6e6730b59fe802473180f396d88a8fb0db8cb" +
- "cf25d2f214cf9ea1d95",
- 1: "72887022101f0b6753e0c7de21657d35a4cb2a1f5cde2650528bbc8f837" +
- "d0f0d7ad833b1a256a1",
- 500: "178cb9d7387190fa34db9c2d50027d21793c9bc2d40b1e14dcf30ebeeeb2" +
- "20f48364f7a4c68bf8",
- 501: "1b186c57d44eb6de4c057c49940d79bb838a145cb528d6e8fd26dbe50a6" +
- "0ca2c104b56b60e45bd",
- 1000: "4a2f3cc3b5e78ddb83dcb426d9863d9d9a723b0337c89dd0b005d89f8d3" +
- "c05c52b76b29b740f09",
- 1001: "2ecd8c8a5629d0d02ab457a0fdd0f7b90a192cd46be5ecb6ca570bfc5e2" +
- "68338b1a16cf4ef2d36",
- }
-
- // Payload for every message is the string "hello".
- payload := []byte("hello")
-
- var buf bytes.Buffer
-
- for i := 0; i < 1002; i++ {
- err = initiator.WriteMessage(payload)
- if err != nil {
- t.Fatalf("could not write message %s", payload)
- }
- _, err = initiator.Flush(&buf)
- if err != nil {
- t.Fatalf("could not flush message: %v", err)
- }
- if val, ok := transportMessageVectors[i]; ok {
- binaryVal, err := util.DecodeHex(val)
- if err != nil {
- t.Fatalf("Failed to decode hex string %s", val)
- }
- if !bytes.Equal(buf.Bytes(), binaryVal) {
- t.Fatalf("Ciphertext %x was not equal to expected %s",
- buf.String()[:], val)
- }
- }
-
- // Responder decrypts the bytes, in every iteration, and
- // should always be able to decrypt the same payload message.
- plaintext, err := responder.ReadMessage(&buf)
- if err != nil {
- t.Fatalf("failed to read message in responder: %v", err)
- }
-
- // Ensure decryption succeeded
- if !bytes.Equal(plaintext, payload) {
- t.Fatalf("Decryption failed to receive plaintext: %s, got %s",
- payload, plaintext)
- }
-
- // Clear out the buffer for the next iteration
- buf.Reset()
- }
-}
-
-// timeoutWriter wraps an io.Writer and throws an iotest.ErrTimeout after
-// writing n bytes.
-type timeoutWriter struct {
- w io.Writer
- n int64
-}
-
-func NewTimeoutWriter(w io.Writer, n int64) io.Writer {
- return &timeoutWriter{w, n}
-}
-
-func (t *timeoutWriter) Write(p []byte) (int, error) {
- n := len(p)
- if int64(n) > t.n {
- n = int(t.n)
- }
- n, err := util.Write(t.w, p[:n])
- t.n -= int64(n)
- if err == nil && t.n == 0 {
- return n, iotest.ErrTimeout
- }
- return n, er.Native(err)
-}
-
-const payloadSize = 10
-
-type flushChunk struct {
- errAfter int64
- expN int
- expErr error
-}
-
-type flushTest struct {
- name string
- chunks []flushChunk
-}
-
-var flushTests = []flushTest{
- {
- name: "partial header write",
- chunks: []flushChunk{
- // Write 18-byte header in two parts, 16 then 2.
- {
- errAfter: encHeaderSize - 2,
- expN: 0,
- expErr: iotest.ErrTimeout,
- },
- {
- errAfter: 2,
- expN: 0,
- expErr: iotest.ErrTimeout,
- },
- // Write payload and MAC in one go.
- {
- errAfter: -1,
- expN: payloadSize,
- },
- },
- },
- {
- name: "full payload then full mac",
- chunks: []flushChunk{
- // Write entire header and entire payload w/o MAC.
- {
- errAfter: encHeaderSize + payloadSize,
- expN: payloadSize,
- expErr: iotest.ErrTimeout,
- },
- // Write the entire MAC.
- {
- errAfter: -1,
- expN: 0,
- },
- },
- },
- {
- name: "payload-only, straddle, mac-only",
- chunks: []flushChunk{
- // Write header and all but last byte of payload.
- {
- errAfter: encHeaderSize + payloadSize - 1,
- expN: payloadSize - 1,
- expErr: iotest.ErrTimeout,
- },
- // Write last byte of payload and first byte of MAC.
- {
- errAfter: 2,
- expN: 1,
- expErr: iotest.ErrTimeout,
- },
- // Write 10 bytes of the MAC.
- {
- errAfter: 10,
- expN: 0,
- expErr: iotest.ErrTimeout,
- },
- // Write the remaining 5 MAC bytes.
- {
- errAfter: -1,
- expN: 0,
- },
- },
- },
-}
-
-// TestFlush asserts a Machine's ability to handle timeouts during Flush that
-// cause partial writes, and that the machine can properly resume writes on
-// subsequent calls to Flush.
-func TestFlush(t *testing.T) {
- // Run each test individually, to assert that they pass in isolation.
- for _, test := range flushTests {
- t.Run(test.name, func(t *testing.T) {
- var (
- w bytes.Buffer
- b Machine
- )
- b.split()
- testFlush(t, test, &b, &w)
- })
- }
-
- // Finally, run the tests serially as if all on one connection.
- t.Run("flush serial", func(t *testing.T) {
- var (
- w bytes.Buffer
- b Machine
- )
- b.split()
- for _, test := range flushTests {
- testFlush(t, test, &b, &w)
- }
- })
-}
-
-// testFlush buffers a message on the Machine, then flushes it to the io.Writer
-// in chunks. Once complete, a final call to flush is made to assert that Write
-// is not called again.
-func testFlush(t *testing.T, test flushTest, b *Machine, w io.Writer) {
- payload := make([]byte, payloadSize)
- if err := b.WriteMessage(payload); err != nil {
- t.Fatalf("unable to write message: %v", err)
- }
-
- for _, chunk := range test.chunks {
- assertFlush(t, b, w, chunk.errAfter, chunk.expN, chunk.expErr)
- }
-
- // We should always be able to call Flush after a message has been
- // successfully written, and it should result in a NOP.
- assertFlush(t, b, w, 0, 0, nil)
-}
-
-// assertFlush flushes a chunk to the passed io.Writer. If n >= 0, a
-// timeoutWriter will be used the flush should stop with iotest.ErrTimeout after
-// n bytes. The method asserts that the returned error matches expErr and that
-// the number of bytes written by Flush matches expN.
-func assertFlush(t *testing.T, b *Machine, w io.Writer, n int64, expN int,
- expErr error) {
-
- t.Helper()
-
- if n >= 0 {
- w = NewTimeoutWriter(w, n)
- }
- nn, err := b.Flush(w)
- if er.Wrapped(err) != expErr {
- t.Fatalf("expected flush err: %v, got: %v", expErr, err)
- }
- if nn != expN {
- t.Fatalf("expected n: %d, got: %d", expN, nn)
- }
-}
diff --git a/lnd/buffer/buffer_test.go b/lnd/buffer/buffer_test.go
deleted file mode 100644
index efda4c88..00000000
--- a/lnd/buffer/buffer_test.go
+++ /dev/null
@@ -1,44 +0,0 @@
-package buffer_test
-
-import (
- "bytes"
- "testing"
-
- "github.com/pkt-cash/pktd/lnd/buffer"
-)
-
-// TestRecycleSlice asserts that RecycleSlice always zeros a byte slice.
-func TestRecycleSlice(t *testing.T) {
- tests := []struct {
- name string
- slice []byte
- }{
- {
- name: "length zero",
- },
- {
- name: "length one",
- slice: []byte("a"),
- },
- {
- name: "length power of two length",
- slice: bytes.Repeat([]byte("b"), 16),
- },
- {
- name: "length non power of two",
- slice: bytes.Repeat([]byte("c"), 27),
- },
- }
-
- for _, test := range tests {
- t.Run(test.name, func(t *testing.T) {
- buffer.RecycleSlice(test.slice)
-
- expSlice := make([]byte, len(test.slice))
- if !bytes.Equal(expSlice, test.slice) {
- t.Fatalf("slice not recycled, want: %v, got: %v",
- expSlice, test.slice)
- }
- })
- }
-}
diff --git a/lnd/buffer/read.go b/lnd/buffer/read.go
deleted file mode 100644
index 1b12d20e..00000000
--- a/lnd/buffer/read.go
+++ /dev/null
@@ -1,19 +0,0 @@
-package buffer
-
-import (
- "github.com/pkt-cash/pktd/lnd/lnwire"
-)
-
-// ReadSize represents the size of the maximum message that can be read off the
-// wire by brontide. The buffer is used to hold the ciphertext while the
-// brontide state machine decrypts the message.
-const ReadSize = lnwire.MaxMessagePayload + 16
-
-// Read is a static byte array sized to the maximum-allowed Lightning message
-// size, plus 16 bytes for the MAC.
-type Read [ReadSize]byte
-
-// Recycle zeroes the Read, making it fresh for another use.
-func (b *Read) Recycle() {
- RecycleSlice(b[:])
-}
diff --git a/lnd/buffer/utils.go b/lnd/buffer/utils.go
deleted file mode 100644
index 40a386a9..00000000
--- a/lnd/buffer/utils.go
+++ /dev/null
@@ -1,17 +0,0 @@
-package buffer
-
-// RecycleSlice zeroes byte slice, making it fresh for another use.
-// Zeroing the buffer using a logarithmic number of calls to the optimized copy
-// method. Benchmarking shows this to be ~30 times faster than a for loop that
-// sets each index to 0 for ~65KB buffers use for wire messages. Inspired by:
-// https://stackoverflow.com/questions/30614165/is-there-analog-of-memset-in-go
-func RecycleSlice(b []byte) {
- if len(b) == 0 {
- return
- }
-
- b[0] = 0
- for i := 1; i < len(b); i *= 2 {
- copy(b[i:], b[:i])
- }
-}
diff --git a/lnd/buffer/write.go b/lnd/buffer/write.go
deleted file mode 100644
index a2bcfc9c..00000000
--- a/lnd/buffer/write.go
+++ /dev/null
@@ -1,19 +0,0 @@
-package buffer
-
-import (
- "github.com/pkt-cash/pktd/lnd/lnwire"
-)
-
-// WriteSize represents the size of the maximum plaintext message than can be
-// sent using brontide. The buffer does not include extra space for the MAC, as
-// that is applied by the Noise protocol after encrypting the plaintext.
-const WriteSize = lnwire.MaxMessagePayload
-
-// Write is static byte array occupying to maximum-allowed plaintext-message
-// size.
-type Write [WriteSize]byte
-
-// Recycle zeroes the Write, making it fresh for another use.
-func (b *Write) Recycle() {
- RecycleSlice(b[:])
-}
diff --git a/lnd/build/deployment.go b/lnd/build/deployment.go
deleted file mode 100644
index 410f7e96..00000000
--- a/lnd/build/deployment.go
+++ /dev/null
@@ -1,36 +0,0 @@
-package build
-
-// DeploymentType is an enum specifying the deployment to compile.
-type DeploymentType byte
-
-const (
- // Development is a deployment that includes extra testing hooks and
- // logging configurations.
- Development DeploymentType = iota
-
- // Production is a deployment that strips out testing logic and uses
- // Default logging.
- Production
-)
-
-// String returns a human readable name for a build type.
-func (b DeploymentType) String() string {
- switch b {
- case Development:
- return "development"
- case Production:
- return "production"
- default:
- return "unknown"
- }
-}
-
-// IsProdBuild returns true if this is a production build.
-func IsProdBuild() bool {
- return Deployment == Production
-}
-
-// IsDevBuild returns true if this is a development build.
-func IsDevBuild() bool {
- return Deployment == Development
-}
diff --git a/lnd/build/deployment_dev.go b/lnd/build/deployment_dev.go
deleted file mode 100644
index fb2bb2b9..00000000
--- a/lnd/build/deployment_dev.go
+++ /dev/null
@@ -1,6 +0,0 @@
-// +build dev
-
-package build
-
-// Deployment specifies a development build.
-const Deployment = Development
diff --git a/lnd/build/deployment_prod.go b/lnd/build/deployment_prod.go
deleted file mode 100644
index 247f25ae..00000000
--- a/lnd/build/deployment_prod.go
+++ /dev/null
@@ -1,6 +0,0 @@
-// +build !dev
-
-package build
-
-// Deployment specifies a production build.
-const Deployment = Production
diff --git a/lnd/cert/go.sum b/lnd/cert/go.sum
deleted file mode 100644
index 331fa698..00000000
--- a/lnd/cert/go.sum
+++ /dev/null
@@ -1,11 +0,0 @@
-github.com/davecgh/go-spew v1.1.0 h1:ZDRjVQ15GmhC3fiQ8ni8+OwkZQO4DARzQgrnXU1Liz8=
-github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
-github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
-github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
-github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
-github.com/stretchr/testify v1.5.1 h1:nOGnQDM7FYENwehXlg/kFVnos3rEvtKTjRvOWSzb6H4=
-github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA=
-gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM=
-gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
-gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw=
-gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
diff --git a/lnd/cert/selfsigned.go b/lnd/cert/selfsigned.go
deleted file mode 100644
index 207162f5..00000000
--- a/lnd/cert/selfsigned.go
+++ /dev/null
@@ -1,292 +0,0 @@
-package cert
-
-import (
- "bytes"
- "crypto/ecdsa"
- "crypto/elliptic"
- "crypto/rand"
- "crypto/x509"
- "crypto/x509/pkix"
- "encoding/pem"
- "io/ioutil"
- "math/big"
- "net"
- "os"
- "time"
-
- "github.com/pkt-cash/pktd/btcutil/er"
-)
-
-const (
- // DefaultAutogenValidity is the default validity of a self-signed
- // certificate. The value corresponds to 14 months
- // (14 months * 30 days * 24 hours).
- DefaultAutogenValidity = 14 * 30 * 24 * time.Hour
-)
-
-var (
- // End of ASN.1 time.
- endOfTime = time.Date(2049, 12, 31, 23, 59, 59, 0, time.UTC)
-
- // Max serial number.
- serialNumberLimit = new(big.Int).Lsh(big.NewInt(1), 128)
-)
-
-// ipAddresses returns the parserd IP addresses to use when creating the TLS
-// certificate. If tlsDisableAutofill is true, we don't include interface
-// addresses to protect users privacy.
-func ipAddresses(tlsExtraIPs []string, tlsDisableAutofill bool) ([]net.IP, er.R) {
- // Collect the host's IP addresses, including loopback, in a slice.
- ipAddresses := []net.IP{net.ParseIP("127.0.0.1"), net.ParseIP("::1")}
-
- // addIP appends an IP address only if it isn't already in the slice.
- addIP := func(ipAddr net.IP) {
- for _, ip := range ipAddresses {
- if ip.Equal(ipAddr) {
- return
- }
- }
- ipAddresses = append(ipAddresses, ipAddr)
- }
-
- // To protect their privacy, some users might not want to have all
- // their network addresses include in the certificate as this could
- // leak sensitive information.
- if !tlsDisableAutofill {
- // Add all the interface IPs that aren't already in the slice.
- addrs, err := net.InterfaceAddrs()
- if err != nil {
- return nil, er.E(err)
- }
- for _, a := range addrs {
- ipAddr, _, err := net.ParseCIDR(a.String())
- if err == nil {
- addIP(ipAddr)
- }
- }
- }
-
- // Add extra IPs to the slice.
- for _, ip := range tlsExtraIPs {
- ipAddr := net.ParseIP(ip)
- if ipAddr != nil {
- addIP(ipAddr)
- }
- }
-
- return ipAddresses, nil
-}
-
-// dnsNames returns the host and DNS names to use when creating the TLS
-// ceftificate.
-func dnsNames(tlsExtraDomains []string, tlsDisableAutofill bool) (string, []string) {
- // Collect the host's names into a slice.
- host, err := os.Hostname()
-
- // To further protect their privacy, some users might not want
- // to have their hostname include in the certificate as this could
- // leak sensitive information.
- if err != nil || tlsDisableAutofill {
- // Nothing much we can do here, other than falling back to
- // localhost as fallback. A hostname can still be provided with
- // the tlsExtraDomain parameter if the problem persists on a
- // system.
- host = "localhost"
- }
-
- dnsNames := []string{host}
- if host != "localhost" {
- dnsNames = append(dnsNames, "localhost")
- }
- dnsNames = append(dnsNames, tlsExtraDomains...)
-
- // Because we aren't including the hostname in the certificate when
- // tlsDisableAutofill is set, we will use the first extra domain
- // specified by the user, if it's set, as the Common Name.
- if tlsDisableAutofill && len(tlsExtraDomains) > 0 {
- host = tlsExtraDomains[0]
- }
-
- // Also add fake hostnames for unix sockets, otherwise hostname
- // verification will fail in the client.
- dnsNames = append(dnsNames, "unix", "unixpacket")
-
- // Also add hostnames for 'bufconn' which is the hostname used for the
- // in-memory connections used on mobile.
- dnsNames = append(dnsNames, "bufconn")
-
- return host, dnsNames
-}
-
-// IsOutdated returns whether the given certificate is outdated w.r.t. the IPs
-// and domains given. The certificate is considered up to date if it was
-// created with _exactly_ the IPs and domains given.
-func IsOutdated(cert *x509.Certificate, tlsExtraIPs,
- tlsExtraDomains []string, tlsDisableAutofill bool) (bool, er.R) {
-
- // Parse the slice of IP strings.
- ips, err := ipAddresses(tlsExtraIPs, tlsDisableAutofill)
- if err != nil {
- return false, err
- }
-
- // To not consider the certificate outdated if it has duplicate IPs or
- // if only the order has changed, we create two maps from the slice of
- // IPs to compare.
- ips1 := make(map[string]net.IP)
- for _, ip := range ips {
- ips1[ip.String()] = ip
- }
-
- ips2 := make(map[string]net.IP)
- for _, ip := range cert.IPAddresses {
- ips2[ip.String()] = ip
- }
-
- // If the certificate has a different number of IP addresses, it is
- // definitely out of date.
- if len(ips1) != len(ips2) {
- return true, nil
- }
-
- // Go through each IP address, and check that they are equal. We expect
- // both the string representation and the exact IP to match.
- for s, ip1 := range ips1 {
- // Assert the IP string is found in both sets.
- ip2, ok := ips2[s]
- if !ok {
- return true, nil
- }
-
- // And that the IPs are considered equal.
- if !ip1.Equal(ip2) {
- return true, nil
- }
- }
-
- // Get the full list of DNS names to use.
- _, dnsNames := dnsNames(tlsExtraDomains, tlsDisableAutofill)
-
- // We do the same kind of deduplication for the DNS names.
- dns1 := make(map[string]struct{})
- for _, n := range cert.DNSNames {
- dns1[n] = struct{}{}
- }
-
- dns2 := make(map[string]struct{})
- for _, n := range dnsNames {
- dns2[n] = struct{}{}
- }
-
- // If the number of domains are different, it is out of date.
- if len(dns1) != len(dns2) {
- return true, nil
- }
-
- // Similarly, check that each DNS name matches what is found in the
- // certificate.
- for k := range dns1 {
- if _, ok := dns2[k]; !ok {
- return true, nil
- }
- }
-
- // Certificate was up-to-date.
- return false, nil
-}
-
-// GenCertPair generates a key/cert pair to the paths provided. The
-// auto-generated certificates should *not* be used in production for public
-// access as they're self-signed and don't necessarily contain all of the
-// desired hostnames for the service. For production/public use, consider a
-// real PKI.
-//
-// This function is adapted from https://github.com/btcsuite/btcd and
-// https://github.com/btcsuite/btcutil
-func GenCertPair(org, certFile, keyFile string, tlsExtraIPs,
- tlsExtraDomains []string, tlsDisableAutofill bool,
- certValidity time.Duration) er.R {
-
- now := time.Now()
- validUntil := now.Add(certValidity)
-
- // Check that the certificate validity isn't past the ASN.1 end of time.
- if validUntil.After(endOfTime) {
- validUntil = endOfTime
- }
-
- // Generate a serial number that's below the serialNumberLimit.
- serialNumber, errr := rand.Int(rand.Reader, serialNumberLimit)
- if errr != nil {
- return er.Errorf("failed to generate serial number: %s", errr)
- }
-
- // Get all DNS names and IP addresses to use when creating the
- // certificate.
- host, dnsNames := dnsNames(tlsExtraDomains, tlsDisableAutofill)
- ipAddresses, err := ipAddresses(tlsExtraIPs, tlsDisableAutofill)
- if err != nil {
- return err
- }
-
- // Generate a private key for the certificate.
- priv, errr := ecdsa.GenerateKey(elliptic.P256(), rand.Reader)
- if errr != nil {
- return er.E(errr)
- }
-
- // Construct the certificate template.
- template := x509.Certificate{
- SerialNumber: serialNumber,
- Subject: pkix.Name{
- Organization: []string{org},
- CommonName: host,
- },
- NotBefore: now.Add(-time.Hour * 24),
- NotAfter: validUntil,
-
- KeyUsage: x509.KeyUsageKeyEncipherment |
- x509.KeyUsageDigitalSignature | x509.KeyUsageCertSign,
- ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth},
- IsCA: true, // so can sign self.
- BasicConstraintsValid: true,
-
- DNSNames: dnsNames,
- IPAddresses: ipAddresses,
- }
-
- derBytes, errr := x509.CreateCertificate(rand.Reader, &template,
- &template, &priv.PublicKey, priv)
- if errr != nil {
- return er.Errorf("failed to create certificate: %v", errr)
- }
-
- certBuf := &bytes.Buffer{}
- errr = pem.Encode(certBuf, &pem.Block{Type: "CERTIFICATE",
- Bytes: derBytes})
- if errr != nil {
- return er.Errorf("failed to encode certificate: %v", errr)
- }
-
- keybytes, errr := x509.MarshalECPrivateKey(priv)
- if errr != nil {
- return er.Errorf("unable to encode privkey: %v", errr)
- }
- keyBuf := &bytes.Buffer{}
- errr = pem.Encode(keyBuf, &pem.Block{Type: "EC PRIVATE KEY",
- Bytes: keybytes})
- if errr != nil {
- return er.Errorf("failed to encode private key: %v", errr)
- }
-
- // Write cert and key files.
- if errr = ioutil.WriteFile(certFile, certBuf.Bytes(), 0644); errr != nil {
- return er.E(errr)
- }
- if errr = ioutil.WriteFile(keyFile, keyBuf.Bytes(), 0600); errr != nil {
- os.Remove(certFile)
- return er.E(errr)
- }
-
- return nil
-}
diff --git a/lnd/cert/selfsigned_test.go b/lnd/cert/selfsigned_test.go
deleted file mode 100644
index 080bf58d..00000000
--- a/lnd/cert/selfsigned_test.go
+++ /dev/null
@@ -1,193 +0,0 @@
-package cert_test
-
-import (
- "io/ioutil"
- "testing"
-
- "github.com/pkt-cash/pktd/btcutil/util"
- "github.com/pkt-cash/pktd/lnd/cert"
- "github.com/stretchr/testify/require"
-)
-
-var (
- extraIPs = []string{"1.1.1.1", "123.123.123.1", "199.189.12.12"}
- extraDomains = []string{"home", "and", "away"}
-)
-
-// TestIsOutdatedCert checks that we'll consider the TLS certificate outdated
-// if the ip addresses or dns names don't match.
-func TestIsOutdatedCert(t *testing.T) {
- tempDir, errr := ioutil.TempDir("", "certtest")
- if errr != nil {
- t.Fatal(errr)
- }
-
- certPath := tempDir + "/tls.cert"
- keyPath := tempDir + "/tls.key"
-
- // Generate TLS files with two extra IPs and domains.
- err := cert.GenCertPair(
- "lnd autogenerated cert", certPath, keyPath, extraIPs[:2],
- extraDomains[:2], false, cert.DefaultAutogenValidity,
- )
- if err != nil {
- t.Fatal(err)
- }
-
- // We'll attempt to check up-to-date status for all variants of 1-3
- // number of IPs and domains.
- for numIPs := 1; numIPs <= len(extraIPs); numIPs++ {
- for numDomains := 1; numDomains <= len(extraDomains); numDomains++ {
- _, parsedCert, errr := cert.LoadCert(
- certPath, keyPath,
- )
- if errr != nil {
- t.Fatal(errr)
- }
-
- // Using the test case's number of IPs and domains, get
- // the outdated status of the certificate we created
- // above.
- outdated, err := cert.IsOutdated(
- parsedCert, extraIPs[:numIPs],
- extraDomains[:numDomains], false,
- )
- if err != nil {
- t.Fatal(err)
- }
-
- // We expect it to be considered outdated if the IPs or
- // domains don't match exactly what we created.
- expected := numIPs != 2 || numDomains != 2
- if outdated != expected {
- t.Fatalf("expected certificate to be "+
- "outdated=%v, got=%v", expected,
- outdated)
- }
- }
- }
-}
-
-// TestIsOutdatedPermutation tests that the order of listed IPs or DNS names,
-// nor dulicates in the lists, matter for whether we consider the certificate
-// outdated.
-func TestIsOutdatedPermutation(t *testing.T) {
- tempDir, errr := ioutil.TempDir("", "certtest")
- if errr != nil {
- t.Fatal(errr)
- }
-
- certPath := tempDir + "/tls.cert"
- keyPath := tempDir + "/tls.key"
-
- // Generate TLS files from the IPs and domains.
- err := cert.GenCertPair(
- "lnd autogenerated cert", certPath, keyPath, extraIPs[:],
- extraDomains[:], false, cert.DefaultAutogenValidity,
- )
- if err != nil {
- t.Fatal(err)
- }
- _, parsedCert, errr := cert.LoadCert(certPath, keyPath)
- if errr != nil {
- t.Fatal(errr)
- }
-
- // If we have duplicate IPs or DNS names listed, that shouldn't matter.
- dupIPs := make([]string, len(extraIPs)*2)
- for i := range dupIPs {
- dupIPs[i] = extraIPs[i/2]
- }
-
- dupDNS := make([]string, len(extraDomains)*2)
- for i := range dupDNS {
- dupDNS[i] = extraDomains[i/2]
- }
-
- outdated, err := cert.IsOutdated(parsedCert, dupIPs, dupDNS, false)
- if err != nil {
- t.Fatal(err)
- }
-
- if outdated {
- t.Fatalf("did not expect duplicate IPs or DNS names be " +
- "considered outdated")
- }
-
- // Similarly, the order of the lists shouldn't matter.
- revIPs := make([]string, len(extraIPs))
- for i := range revIPs {
- revIPs[i] = extraIPs[len(extraIPs)-1-i]
- }
-
- revDNS := make([]string, len(extraDomains))
- for i := range revDNS {
- revDNS[i] = extraDomains[len(extraDomains)-1-i]
- }
-
- outdated, err = cert.IsOutdated(parsedCert, revIPs, revDNS, false)
- if err != nil {
- t.Fatal(err)
- }
-
- if outdated {
- t.Fatalf("did not expect reversed IPs or DNS names be " +
- "considered outdated")
- }
-}
-
-// TestTLSDisableAutofill checks that setting the --tlsdisableautofill flag
-// does not add interface ip addresses or hostnames to the cert.
-func TestTLSDisableAutofill(t *testing.T) {
- tempDir, errr := ioutil.TempDir("", "certtest")
- if errr != nil {
- t.Fatal(errr)
- }
-
- certPath := tempDir + "/tls.cert"
- keyPath := tempDir + "/tls.key"
-
- // Generate TLS files with two extra IPs and domains and no interface IPs.
- err := cert.GenCertPair(
- "lnd autogenerated cert", certPath, keyPath, extraIPs[:2],
- extraDomains[:2], true, cert.DefaultAutogenValidity,
- )
- util.RequireNoErr(
- t, err,
- "unable to generate tls certificate pair",
- )
-
- _, parsedCert, errr := cert.LoadCert(
- certPath, keyPath,
- )
- require.NoError(
- t, errr,
- "unable to load tls certificate pair",
- )
-
- // Check if the TLS cert is outdated while still preventing
- // interface IPs from being used. Should not be outdated
- shouldNotBeOutdated, err := cert.IsOutdated(
- parsedCert, extraIPs[:2],
- extraDomains[:2], true,
- )
- util.RequireNoErr(t, err)
-
- require.Equal(
- t, false, shouldNotBeOutdated,
- "TLS Certificate was marked as outdated when it should not be",
- )
-
- // Check if the TLS cert is outdated while allowing for
- // interface IPs to be used. Should report as outdated.
- shouldBeOutdated, err := cert.IsOutdated(
- parsedCert, extraIPs[:2],
- extraDomains[:2], false,
- )
- util.RequireNoErr(t, err)
-
- require.Equal(
- t, true, shouldBeOutdated,
- "TLS Certificate was not marked as outdated when it should be",
- )
-}
diff --git a/lnd/cert/tls.go b/lnd/cert/tls.go
deleted file mode 100644
index a8783158..00000000
--- a/lnd/cert/tls.go
+++ /dev/null
@@ -1,60 +0,0 @@
-package cert
-
-import (
- "crypto/tls"
- "crypto/x509"
-)
-
-var (
- /*
- * tlsCipherSuites is the list of cipher suites we accept for TLS
- * connections. These cipher suites fit the following criteria:
- * - Don't use outdated algorithms like SHA-1 and 3DES
- * - Don't use ECB mode or other insecure symmetric methods
- * - Included in the TLS v1.2 suite
- * - Are available in the Go 1.7.6 standard library (more are
- * available in 1.8.3 and will be added after lnd no longer
- * supports 1.7, including suites that support CBC mode)
- **/
- tlsCipherSuites = []uint16{
- tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256,
- tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,
- tls.TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,
- tls.TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,
- }
-)
-
-// LoadCert loads a certificate and its corresponding private key from the PEM
-// files indicated and returns the certificate in the two formats it is most
-// commonly used.
-func LoadCert(certPath, keyPath string) (tls.Certificate, *x509.Certificate,
- error) {
-
- // The certData returned here is just a wrapper around the PEM blocks
- // loaded from the file. The PEM is not yet fully parsed but a basic
- // check is performed that the certificate and private key actually
- // belong together.
- certData, err := tls.LoadX509KeyPair(certPath, keyPath)
- if err != nil {
- return tls.Certificate{}, nil, err
- }
-
- // Now parse the the PEM block of the certificate into its x509 data
- // structure so it can be examined in more detail.
- x509Cert, err := x509.ParseCertificate(certData.Certificate[0])
- if err != nil {
- return tls.Certificate{}, nil, err
- }
-
- return certData, x509Cert, nil
-}
-
-// TLSConfFromCert returns the default TLS configuration used for a server,
-// using the given certificate as identity.
-func TLSConfFromCert(certData tls.Certificate) *tls.Config {
- return &tls.Config{
- Certificates: []tls.Certificate{certData},
- CipherSuites: tlsCipherSuites,
- MinVersion: tls.VersionTLS12,
- }
-}
diff --git a/lnd/chainntnfs/README.md b/lnd/chainntnfs/README.md
deleted file mode 100644
index 353dca0c..00000000
--- a/lnd/chainntnfs/README.md
+++ /dev/null
@@ -1,30 +0,0 @@
-chainntnfs
-==========
-
-[![Build Status](http://img.shields.io/travis/lightningnetwork/lnd.svg)](https://travis-ci.org/lightningnetwork/lnd)
-[![MIT licensed](https://img.shields.io/badge/license-MIT-blue.svg)](https://github.com/lightningnetwork/lnd/blob/master/LICENSE)
-[![GoDoc](https://img.shields.io/badge/godoc-reference-blue.svg)](http://godoc.org/github.com/lightningnetwork/lnd/chainntnfs)
-
-The chainntnfs package implements a set of interfaces which allow callers to
-receive notifications in response to specific on-chain events. The set of
-notifications available include:
-
- * Notifications for each new block connected to the current best chain.
- * Notifications once a `txid` has reached a specified number of
- confirmations.
- * Notifications once a target outpoint (`txid:index`) has been spent.
-
-These notifications are used within `lnd` in order to properly handle the
-workflows for: channel funding, cooperative channel closures, forced channel
-closures, channel contract breaches, sweeping time-locked outputs, and finally
-pruning the channel graph.
-
-This package is intentionally general enough to be applicable outside the
-specific use cases within `lnd` outlined above. The current sole concrete
-implementation of the `ChainNotifier` interface depends on `btcd`.
-
-## Installation and Updating
-
-```bash
-$ go get -u github.com/lightningnetwork/lnd/chainntnfs
-```
diff --git a/lnd/chainntnfs/btcdnotify/btcd.go b/lnd/chainntnfs/btcdnotify/btcd.go
deleted file mode 100644
index 4fd1dedf..00000000
--- a/lnd/chainntnfs/btcdnotify/btcd.go
+++ /dev/null
@@ -1,1013 +0,0 @@
-package btcdnotify
-
-import (
- "sync"
- "sync/atomic"
- "time"
-
- "github.com/pkt-cash/pktd/btcjson"
- "github.com/pkt-cash/pktd/btcutil"
- "github.com/pkt-cash/pktd/btcutil/er"
- "github.com/pkt-cash/pktd/chaincfg"
- "github.com/pkt-cash/pktd/chaincfg/chainhash"
- "github.com/pkt-cash/pktd/lnd/chainntnfs"
- "github.com/pkt-cash/pktd/lnd/queue"
- "github.com/pkt-cash/pktd/pktlog/log"
- "github.com/pkt-cash/pktd/rpcclient"
- "github.com/pkt-cash/pktd/txscript"
- "github.com/pkt-cash/pktd/wire"
-)
-
-const (
- // notifierType uniquely identifies this concrete implementation of the
- // ChainNotifier interface.
- notifierType = "btcd"
-)
-
-// chainUpdate encapsulates an update to the current main chain. This struct is
-// used as an element within an unbounded queue in order to avoid blocking the
-// main rpc dispatch rule.
-type chainUpdate struct {
- blockHash *chainhash.Hash
- blockHeight int32
-
- // connected is true if this update is a new block and false if it is a
- // disconnected block.
- connect bool
-}
-
-// txUpdate encapsulates a transaction related notification sent from btcd to
-// the registered RPC client. This struct is used as an element within an
-// unbounded queue in order to avoid blocking the main rpc dispatch rule.
-type txUpdate struct {
- tx *btcutil.Tx
- details *btcjson.BlockDetails
-}
-
-// TODO(roasbeef): generalize struct below:
-// * move chans to config, allow outside callers to handle send conditions
-
-// BtcdNotifier implements the ChainNotifier interface using btcd's websockets
-// notifications. Multiple concurrent clients are supported. All notifications
-// are achieved via non-blocking sends on client channels.
-type BtcdNotifier struct {
- epochClientCounter uint64 // To be used atomically.
-
- start sync.Once
- active int32 // To be used atomically.
- stopped int32 // To be used atomically.
-
- chainConn *rpcclient.Client
- chainParams *chaincfg.Params
-
- notificationCancels chan interface{}
- notificationRegistry chan interface{}
-
- txNotifier *chainntnfs.TxNotifier
-
- blockEpochClients map[uint64]*blockEpochRegistration
-
- bestBlock chainntnfs.BlockEpoch
-
- chainUpdates *queue.ConcurrentQueue
- txUpdates *queue.ConcurrentQueue
-
- // spendHintCache is a cache used to query and update the latest height
- // hints for an outpoint. Each height hint represents the earliest
- // height at which the outpoint could have been spent within the chain.
- spendHintCache chainntnfs.SpendHintCache
-
- // confirmHintCache is a cache used to query the latest height hints for
- // a transaction. Each height hint represents the earliest height at
- // which the transaction could have confirmed within the chain.
- confirmHintCache chainntnfs.ConfirmHintCache
-
- wg sync.WaitGroup
- quit chan struct{}
-}
-
-// Ensure BtcdNotifier implements the ChainNotifier interface at compile time.
-var _ chainntnfs.ChainNotifier = (*BtcdNotifier)(nil)
-
-// New returns a new BtcdNotifier instance. This function assumes the btcd node
-// detailed in the passed configuration is already running, and willing to
-// accept new websockets clients.
-func New(config *rpcclient.ConnConfig, chainParams *chaincfg.Params,
- spendHintCache chainntnfs.SpendHintCache,
- confirmHintCache chainntnfs.ConfirmHintCache) (*BtcdNotifier, er.R) {
-
- notifier := &BtcdNotifier{
- chainParams: chainParams,
-
- notificationCancels: make(chan interface{}),
- notificationRegistry: make(chan interface{}),
-
- blockEpochClients: make(map[uint64]*blockEpochRegistration),
-
- chainUpdates: queue.NewConcurrentQueue(10),
- txUpdates: queue.NewConcurrentQueue(10),
-
- spendHintCache: spendHintCache,
- confirmHintCache: confirmHintCache,
-
- quit: make(chan struct{}),
- }
-
- ntfnCallbacks := &rpcclient.NotificationHandlers{
- OnBlockConnected: notifier.onBlockConnected,
- OnBlockDisconnected: notifier.onBlockDisconnected,
- OnRedeemingTx: notifier.onRedeemingTx,
- }
-
- // Disable connecting to btcd within the rpcclient.New method. We
- // defer establishing the connection to our .Start() method.
- config.DisableConnectOnNew = true
- config.DisableAutoReconnect = false
- chainConn, err := rpcclient.New(config, ntfnCallbacks)
- if err != nil {
- return nil, err
- }
- notifier.chainConn = chainConn
-
- return notifier, nil
-}
-
-// Start connects to the running btcd node over websockets, registers for block
-// notifications, and finally launches all related helper goroutines.
-func (b *BtcdNotifier) Start() er.R {
- var startErr er.R
- b.start.Do(func() {
- startErr = b.startNotifier()
- })
- return startErr
-}
-
-// Started returns true if this instance has been started, and false otherwise.
-func (b *BtcdNotifier) Started() bool {
- return atomic.LoadInt32(&b.active) != 0
-}
-
-// Stop shutsdown the BtcdNotifier.
-func (b *BtcdNotifier) Stop() er.R {
- // Already shutting down?
- if atomic.AddInt32(&b.stopped, 1) != 1 {
- return nil
- }
-
- // Shutdown the rpc client, this gracefully disconnects from btcd, and
- // cleans up all related resources.
- b.chainConn.Shutdown()
-
- close(b.quit)
- b.wg.Wait()
-
- b.chainUpdates.Stop()
- b.txUpdates.Stop()
-
- // Notify all pending clients of our shutdown by closing the related
- // notification channels.
- for _, epochClient := range b.blockEpochClients {
- close(epochClient.cancelChan)
- epochClient.wg.Wait()
-
- close(epochClient.epochChan)
- }
- b.txNotifier.TearDown()
-
- return nil
-}
-
-func (b *BtcdNotifier) startNotifier() er.R {
- // Start our concurrent queues before starting the chain connection, to
- // ensure onBlockConnected and onRedeemingTx callbacks won't be
- // blocked.
- b.chainUpdates.Start()
- b.txUpdates.Start()
-
- // Connect to btcd, and register for notifications on connected, and
- // disconnected blocks.
- if err := b.chainConn.Connect(20); err != nil {
- b.txUpdates.Stop()
- b.chainUpdates.Stop()
- return err
- }
-
- currentHash, currentHeight, err := b.chainConn.GetBestBlock()
- if err != nil {
- b.txUpdates.Stop()
- b.chainUpdates.Stop()
- return err
- }
-
- b.txNotifier = chainntnfs.NewTxNotifier(
- uint32(currentHeight), chainntnfs.ReorgSafetyLimit,
- b.confirmHintCache, b.spendHintCache,
- )
-
- b.bestBlock = chainntnfs.BlockEpoch{
- Height: currentHeight,
- Hash: currentHash,
- }
-
- if err := b.chainConn.NotifyBlocks(); err != nil {
- b.txUpdates.Stop()
- b.chainUpdates.Stop()
- return err
- }
-
- b.wg.Add(1)
- go b.notificationDispatcher()
-
- // Set the active flag now that we've completed the full
- // startup.
- atomic.StoreInt32(&b.active, 1)
-
- return nil
-}
-
-// onBlockConnected implements on OnBlockConnected callback for rpcclient.
-// Ingesting a block updates the wallet's internal utxo state based on the
-// outputs created and destroyed within each block.
-func (b *BtcdNotifier) onBlockConnected(hash *chainhash.Hash, height int32, t time.Time) {
- // Append this new chain update to the end of the queue of new chain
- // updates.
- select {
- case b.chainUpdates.ChanIn() <- &chainUpdate{
- blockHash: hash,
- blockHeight: height,
- connect: true,
- }:
- case <-b.quit:
- return
- }
-}
-
-// filteredBlock represents a new block which has been connected to the main
-// chain. The slice of transactions will only be populated if the block
-// includes a transaction that confirmed one of our watched txids, or spends
-// one of the outputs currently being watched.
-// TODO(halseth): this is currently used for complete blocks. Change to use
-// onFilteredBlockConnected and onFilteredBlockDisconnected, making it easier
-// to unify with the Neutrino implementation.
-type filteredBlock struct {
- hash chainhash.Hash
- height uint32
- txns []*btcutil.Tx
-
- // connected is true if this update is a new block and false if it is a
- // disconnected block.
- connect bool
-}
-
-// onBlockDisconnected implements on OnBlockDisconnected callback for rpcclient.
-func (b *BtcdNotifier) onBlockDisconnected(hash *chainhash.Hash, height int32, t time.Time) {
- // Append this new chain update to the end of the queue of new chain
- // updates.
- select {
- case b.chainUpdates.ChanIn() <- &chainUpdate{
- blockHash: hash,
- blockHeight: height,
- connect: false,
- }:
- case <-b.quit:
- return
- }
-}
-
-// onRedeemingTx implements on OnRedeemingTx callback for rpcclient.
-func (b *BtcdNotifier) onRedeemingTx(tx *btcutil.Tx, details *btcjson.BlockDetails) {
- // Append this new transaction update to the end of the queue of new
- // chain updates.
- select {
- case b.txUpdates.ChanIn() <- &txUpdate{tx, details}:
- case <-b.quit:
- return
- }
-}
-
-// notificationDispatcher is the primary goroutine which handles client
-// notification registrations, as well as notification dispatches.
-func (b *BtcdNotifier) notificationDispatcher() {
- defer b.wg.Done()
-
-out:
- for {
- select {
- case cancelMsg := <-b.notificationCancels:
- switch msg := cancelMsg.(type) {
- case *epochCancel:
- log.Infof("Cancelling epoch "+
- "notification, epoch_id=%v", msg.epochID)
-
- // First, we'll lookup the original
- // registration in order to stop the active
- // queue goroutine.
- reg := b.blockEpochClients[msg.epochID]
- reg.epochQueue.Stop()
-
- // Next, close the cancel channel for this
- // specific client, and wait for the client to
- // exit.
- close(b.blockEpochClients[msg.epochID].cancelChan)
- b.blockEpochClients[msg.epochID].wg.Wait()
-
- // Once the client has exited, we can then
- // safely close the channel used to send epoch
- // notifications, in order to notify any
- // listeners that the intent has been
- // canceled.
- close(b.blockEpochClients[msg.epochID].epochChan)
- delete(b.blockEpochClients, msg.epochID)
- }
- case registerMsg := <-b.notificationRegistry:
- switch msg := registerMsg.(type) {
- case *chainntnfs.HistoricalConfDispatch:
- // Look up whether the transaction/output script
- // has already confirmed in the active chain.
- // We'll do this in a goroutine to prevent
- // blocking potentially long rescans.
- //
- // TODO(wilmer): add retry logic if rescan fails?
- b.wg.Add(1)
- go func() {
- defer b.wg.Done()
-
- confDetails, _, err := b.historicalConfDetails(
- msg.ConfRequest,
- msg.StartHeight, msg.EndHeight,
- )
- if err != nil {
- log.Error(err)
- return
- }
-
- // If the historical dispatch finished
- // without error, we will invoke
- // UpdateConfDetails even if none were
- // found. This allows the notifier to
- // begin safely updating the height hint
- // cache at tip, since any pending
- // rescans have now completed.
- err = b.txNotifier.UpdateConfDetails(
- msg.ConfRequest, confDetails,
- )
- if err != nil {
- log.Error(err)
- }
- }()
-
- case *blockEpochRegistration:
- log.Infof("New block epoch subscription")
-
- b.blockEpochClients[msg.epochID] = msg
-
- // If the client did not provide their best
- // known block, then we'll immediately dispatch
- // a notification for the current tip.
- if msg.bestBlock == nil {
- b.notifyBlockEpochClient(
- msg, b.bestBlock.Height,
- b.bestBlock.Hash,
- )
-
- msg.errorChan <- nil
- continue
- }
-
- // Otherwise, we'll attempt to deliver the
- // backlog of notifications from their best
- // known block.
- missedBlocks, err := chainntnfs.GetClientMissedBlocks(
- b.chainConn, msg.bestBlock,
- b.bestBlock.Height, true,
- )
- if err != nil {
- msg.errorChan <- err
- continue
- }
-
- for _, block := range missedBlocks {
- b.notifyBlockEpochClient(
- msg, block.Height, block.Hash,
- )
- }
-
- msg.errorChan <- nil
- }
-
- case item := <-b.chainUpdates.ChanOut():
- update := item.(*chainUpdate)
- if update.connect {
- blockHeader, err :=
- b.chainConn.GetBlockHeader(update.blockHash)
- if err != nil {
- log.Errorf("Unable to fetch "+
- "block header: %v", err)
- continue
- }
-
- if blockHeader.PrevBlock != *b.bestBlock.Hash {
- // Handle the case where the notifier
- // missed some blocks from its chain
- // backend
- log.Infof("Missed blocks, " +
- "attempting to catch up")
- newBestBlock, missedBlocks, err :=
- chainntnfs.HandleMissedBlocks(
- b.chainConn,
- b.txNotifier,
- b.bestBlock,
- update.blockHeight,
- true,
- )
- if err != nil {
- // Set the bestBlock here in case
- // a catch up partially completed.
- b.bestBlock = newBestBlock
- log.Error(err)
- continue
- }
-
- for _, block := range missedBlocks {
- err := b.handleBlockConnected(block)
- if err != nil {
- log.Error(err)
- continue out
- }
- }
- }
-
- newBlock := chainntnfs.BlockEpoch{
- Height: update.blockHeight,
- Hash: update.blockHash,
- }
- if err := b.handleBlockConnected(newBlock); err != nil {
- log.Error(err)
- }
- continue
- }
-
- if update.blockHeight != b.bestBlock.Height {
- log.Infof("Missed disconnected" +
- "blocks, attempting to catch up")
- }
-
- newBestBlock, err := chainntnfs.RewindChain(
- b.chainConn, b.txNotifier, b.bestBlock,
- update.blockHeight-1,
- )
- if err != nil {
- log.Errorf("Unable to rewind chain "+
- "from height %d to height %d: %v",
- b.bestBlock.Height, update.blockHeight-1, err)
- }
-
- // Set the bestBlock here in case a chain rewind
- // partially completed.
- b.bestBlock = newBestBlock
-
- case item := <-b.txUpdates.ChanOut():
- newSpend := item.(*txUpdate)
-
- // We only care about notifying on confirmed spends, so
- // if this is a mempool spend, we can ignore it and wait
- // for the spend to appear in on-chain.
- if newSpend.details == nil {
- continue
- }
-
- err := b.txNotifier.ProcessRelevantSpendTx(
- newSpend.tx, uint32(newSpend.details.Height),
- )
- if err != nil {
- log.Errorf("Unable to process "+
- "transaction %v: %v",
- newSpend.tx.Hash(), err)
- }
-
- case <-b.quit:
- break out
- }
- }
-}
-
-// historicalConfDetails looks up whether a confirmation request (txid/output
-// script) has already been included in a block in the active chain and, if so,
-// returns details about said block.
-func (b *BtcdNotifier) historicalConfDetails(confRequest chainntnfs.ConfRequest,
- startHeight, endHeight uint32) (*chainntnfs.TxConfirmation,
- chainntnfs.TxConfStatus, er.R) {
-
- // If a txid was not provided, then we should dispatch upon seeing the
- // script on-chain, so we'll short-circuit straight to scanning manually
- // as there doesn't exist a script index to query.
- if confRequest.TxID == chainntnfs.ZeroHash {
- return b.confDetailsManually(
- confRequest, startHeight, endHeight,
- )
- }
-
- // Otherwise, we'll dispatch upon seeing a transaction on-chain with the
- // given hash.
- //
- // We'll first attempt to retrieve the transaction using the node's
- // txindex.
- txNotFoundErr := "No information available about transaction"
- txConf, txStatus, err := chainntnfs.ConfDetailsFromTxIndex(
- b.chainConn, confRequest, txNotFoundErr,
- )
-
- // We'll then check the status of the transaction lookup returned to
- // determine whether we should proceed with any fallback methods.
- switch {
-
- // We failed querying the index for the transaction, fall back to
- // scanning manually.
- case err != nil:
- log.Debugf("Unable to determine confirmation of %v "+
- "through the backend's txindex (%v), scanning manually",
- confRequest.TxID, err)
-
- return b.confDetailsManually(
- confRequest, startHeight, endHeight,
- )
-
- // The transaction was found within the node's mempool.
- case txStatus == chainntnfs.TxFoundMempool:
-
- // The transaction was found within the node's txindex.
- case txStatus == chainntnfs.TxFoundIndex:
-
- // The transaction was not found within the node's mempool or txindex.
- case txStatus == chainntnfs.TxNotFoundIndex:
-
- // Unexpected txStatus returned.
- default:
- return nil, txStatus,
- er.Errorf("Got unexpected txConfStatus: %v", txStatus)
- }
-
- return txConf, txStatus, nil
-}
-
-// confDetailsManually looks up whether a transaction/output script has already
-// been included in a block in the active chain by scanning the chain's blocks
-// within the given range. If the transaction/output script is found, its
-// confirmation details are returned. Otherwise, nil is returned.
-func (b *BtcdNotifier) confDetailsManually(confRequest chainntnfs.ConfRequest,
- startHeight, endHeight uint32) (*chainntnfs.TxConfirmation,
- chainntnfs.TxConfStatus, er.R) {
-
- // Begin scanning blocks at every height to determine where the
- // transaction was included in.
- for height := endHeight; height >= startHeight && height > 0; height-- {
- // Ensure we haven't been requested to shut down before
- // processing the next height.
- select {
- case <-b.quit:
- return nil, chainntnfs.TxNotFoundManually,
- chainntnfs.ErrChainNotifierShuttingDown.Default()
- default:
- }
-
- blockHash, err := b.chainConn.GetBlockHash(int64(height))
- if err != nil {
- return nil, chainntnfs.TxNotFoundManually,
- er.Errorf("unable to get hash from block "+
- "with height %d", height)
- }
-
- // TODO: fetch the neutrino filters instead.
- block, err := b.chainConn.GetBlock(blockHash)
- if err != nil {
- return nil, chainntnfs.TxNotFoundManually,
- er.Errorf("unable to get block with hash "+
- "%v: %v", blockHash, err)
- }
-
- // For every transaction in the block, check which one matches
- // our request. If we find one that does, we can dispatch its
- // confirmation details.
- for txIndex, tx := range block.Transactions {
- if !confRequest.MatchesTx(tx) {
- continue
- }
-
- return &chainntnfs.TxConfirmation{
- Tx: tx,
- BlockHash: blockHash,
- BlockHeight: height,
- TxIndex: uint32(txIndex),
- }, chainntnfs.TxFoundManually, nil
- }
- }
-
- // If we reach here, then we were not able to find the transaction
- // within a block, so we avoid returning an error.
- return nil, chainntnfs.TxNotFoundManually, nil
-}
-
-// handleBlockConnected applies a chain update for a new block. Any watched
-// transactions included this block will processed to either send notifications
-// now or after numConfirmations confs.
-// TODO(halseth): this is reusing the neutrino notifier implementation, unify
-// them.
-func (b *BtcdNotifier) handleBlockConnected(epoch chainntnfs.BlockEpoch) er.R {
- // First, we'll fetch the raw block as we'll need to gather all the
- // transactions to determine whether any are relevant to our registered
- // clients.
- rawBlock, err := b.chainConn.GetBlock(epoch.Hash)
- if err != nil {
- return er.Errorf("unable to get block: %v", err)
- }
- newBlock := &filteredBlock{
- hash: *epoch.Hash,
- height: uint32(epoch.Height),
- txns: btcutil.NewBlock(rawBlock).Transactions(),
- connect: true,
- }
-
- // We'll then extend the txNotifier's height with the information of
- // this new block, which will handle all of the notification logic for
- // us.
- errr := b.txNotifier.ConnectTip(
- &newBlock.hash, newBlock.height, newBlock.txns,
- )
- if errr != nil {
- return er.Errorf("unable to connect tip: %v", errr)
- }
-
- log.Infof("New block: height=%v, sha=%v", epoch.Height,
- epoch.Hash)
-
- // Now that we've guaranteed the new block extends the txNotifier's
- // current tip, we'll proceed to dispatch notifications to all of our
- // registered clients whom have had notifications fulfilled. Before
- // doing so, we'll make sure update our in memory state in order to
- // satisfy any client requests based upon the new block.
- b.bestBlock = epoch
-
- b.notifyBlockEpochs(epoch.Height, epoch.Hash)
- return b.txNotifier.NotifyHeight(uint32(epoch.Height))
-}
-
-// notifyBlockEpochs notifies all registered block epoch clients of the newly
-// connected block to the main chain.
-func (b *BtcdNotifier) notifyBlockEpochs(newHeight int32, newSha *chainhash.Hash) {
- for _, client := range b.blockEpochClients {
- b.notifyBlockEpochClient(client, newHeight, newSha)
- }
-}
-
-// notifyBlockEpochClient sends a registered block epoch client a notification
-// about a specific block.
-func (b *BtcdNotifier) notifyBlockEpochClient(epochClient *blockEpochRegistration,
- height int32, sha *chainhash.Hash) {
-
- epoch := &chainntnfs.BlockEpoch{
- Height: height,
- Hash: sha,
- }
-
- select {
- case epochClient.epochQueue.ChanIn() <- epoch:
- case <-epochClient.cancelChan:
- case <-b.quit:
- }
-}
-
-// RegisterSpendNtfn registers an intent to be notified once the target
-// outpoint/output script has been spent by a transaction on-chain. When
-// intending to be notified of the spend of an output script, a nil outpoint
-// must be used. The heightHint should represent the earliest height in the
-// chain of the transaction that spent the outpoint/output script.
-//
-// Once a spend of has been detected, the details of the spending event will be
-// sent across the 'Spend' channel.
-func (b *BtcdNotifier) RegisterSpendNtfn(outpoint *wire.OutPoint,
- pkScript []byte, heightHint uint32) (*chainntnfs.SpendEvent, er.R) {
-
- // Register the conf notification with the TxNotifier. A non-nil value
- // for `dispatch` will be returned if we are required to perform a
- // manual scan for the confirmation. Otherwise the notifier will begin
- // watching at tip for the transaction to confirm.
- ntfn, errr := b.txNotifier.RegisterSpend(outpoint, pkScript, heightHint)
- if errr != nil {
- return nil, errr
- }
-
- // We'll then request the backend to notify us when it has detected the
- // outpoint/output script as spent.
- //
- // TODO(wilmer): use LoadFilter API instead.
- if outpoint == nil || *outpoint == chainntnfs.ZeroOutPoint {
- _, addrs, _, err := txscript.ExtractPkScriptAddrs(
- pkScript, b.chainParams,
- )
- if err != nil {
- return nil, er.Errorf("unable to parse script: %v", err)
- }
- if err := b.chainConn.NotifyReceived(addrs); err != nil {
- return nil, err
- }
- } else {
- ops := []*wire.OutPoint{outpoint}
- if err := b.chainConn.NotifySpent(ops); err != nil {
- return nil, err
- }
- }
-
- // If the txNotifier didn't return any details to perform a historical
- // scan of the chain, then we can return early as there's nothing left
- // for us to do.
- if ntfn.HistoricalDispatch == nil {
- return ntfn.Event, nil
- }
-
- // Otherwise, we'll need to dispatch a historical rescan to determine if
- // the outpoint was already spent at a previous height.
- //
- // We'll short-circuit the path when dispatching the spend of a script,
- // rather than an outpoint, as there aren't any additional checks we can
- // make for scripts.
- if outpoint == nil || *outpoint == chainntnfs.ZeroOutPoint {
- startHash, err := b.chainConn.GetBlockHash(
- int64(ntfn.HistoricalDispatch.StartHeight),
- )
- if err != nil {
- return nil, err
- }
-
- // TODO(wilmer): add retry logic if rescan fails?
- _, addrs, _, err := txscript.ExtractPkScriptAddrs(
- pkScript, b.chainParams,
- )
- if err != nil {
- return nil, er.Errorf("unable to parse address: %v", err)
- }
-
- asyncResult := b.chainConn.RescanAsync(startHash, addrs, nil)
- go func() {
- if rescanErr := asyncResult.Receive(); rescanErr != nil {
- log.Errorf("Rescan to determine "+
- "the spend details of %v failed: %v",
- ntfn.HistoricalDispatch.SpendRequest,
- rescanErr)
- }
- }()
-
- return ntfn.Event, nil
- }
-
- // When dispatching spends of outpoints, there are a number of checks we
- // can make to start our rescan from a better height or completely avoid
- // it.
- //
- // We'll start by checking the backend's UTXO set to determine whether
- // the outpoint has been spent. If it hasn't, we can return to the
- // caller as well.
- txOut, err := b.chainConn.GetTxOut(&outpoint.Hash, outpoint.Index, true)
- if err != nil {
- return nil, err
- }
- if txOut != nil {
- // We'll let the txNotifier know the outpoint is still unspent
- // in order to begin updating its spend hint.
- err := b.txNotifier.UpdateSpendDetails(
- ntfn.HistoricalDispatch.SpendRequest, nil,
- )
- if err != nil {
- return nil, err
- }
-
- return ntfn.Event, nil
- }
-
- // Since the outpoint was spent, as it no longer exists within the UTXO
- // set, we'll determine when it happened by scanning the chain. We'll
- // begin by fetching the block hash of our starting height.
- startHash, err := b.chainConn.GetBlockHash(
- int64(ntfn.HistoricalDispatch.StartHeight),
- )
- if err != nil {
- return nil, er.Errorf("unable to get block hash for height "+
- "%d: %v", ntfn.HistoricalDispatch.StartHeight, err)
- }
-
- // As a minimal optimization, we'll query the backend's transaction
- // index (if enabled) to determine if we have a better rescan starting
- // height. We can do this as the GetRawTransaction call will return the
- // hash of the block it was included in within the chain.
- tx, err := b.chainConn.GetRawTransactionVerbose(&outpoint.Hash)
- if err != nil {
- // Avoid returning an error if the transaction was not found to
- // proceed with fallback methods.
- if !btcjson.ErrRPCNoTxInfo.Is(err) {
- return nil, er.Errorf("unable to query for txid %v: %v",
- outpoint.Hash, err)
- }
- }
-
- // If the transaction index was enabled, we'll use the block's hash to
- // retrieve its height and check whether it provides a better starting
- // point for our rescan.
- if tx != nil {
- // If the transaction containing the outpoint hasn't confirmed
- // on-chain, then there's no need to perform a rescan.
- if tx.BlockHash == "" {
- return ntfn.Event, nil
- }
-
- blockHash, err := chainhash.NewHashFromStr(tx.BlockHash)
- if err != nil {
- return nil, err
- }
- blockHeader, err := b.chainConn.GetBlockHeaderVerbose(blockHash)
- if err != nil {
- return nil, er.Errorf("unable to get header for "+
- "block %v: %v", blockHash, err)
- }
-
- if uint32(blockHeader.Height) > ntfn.HistoricalDispatch.StartHeight {
- startHash, err = b.chainConn.GetBlockHash(
- int64(blockHeader.Height),
- )
- if err != nil {
- return nil, er.Errorf("unable to get block "+
- "hash for height %d: %v",
- blockHeader.Height, err)
- }
- }
- }
-
- // Now that we've determined the best starting point for our rescan,
- // we can go ahead and dispatch it.
- //
- // In order to ensure that we don't block the caller on what may be a
- // long rescan, we'll launch a new goroutine to handle the async result
- // of the rescan. We purposefully prevent from adding this goroutine to
- // the WaitGroup as we cannot wait for a quit signal due to the
- // asyncResult channel not being exposed.
- //
- // TODO(wilmer): add retry logic if rescan fails?
- asyncResult := b.chainConn.RescanAsync(
- startHash, nil, []*wire.OutPoint{outpoint},
- )
- go func() {
- if rescanErr := asyncResult.Receive(); rescanErr != nil {
- log.Errorf("Rescan to determine the spend "+
- "details of %v failed: %v", outpoint, rescanErr)
- }
- }()
-
- return ntfn.Event, nil
-}
-
-// RegisterConfirmationsNtfn registers an intent to be notified once the target
-// txid/output script has reached numConfs confirmations on-chain. When
-// intending to be notified of the confirmation of an output script, a nil txid
-// must be used. The heightHint should represent the earliest height at which
-// the txid/output script could have been included in the chain.
-//
-// Progress on the number of confirmations left can be read from the 'Updates'
-// channel. Once it has reached all of its confirmations, a notification will be
-// sent across the 'Confirmed' channel.
-func (b *BtcdNotifier) RegisterConfirmationsNtfn(txid *chainhash.Hash,
- pkScript []byte,
- numConfs, heightHint uint32) (*chainntnfs.ConfirmationEvent, er.R) {
-
- // Register the conf notification with the TxNotifier. A non-nil value
- // for `dispatch` will be returned if we are required to perform a
- // manual scan for the confirmation. Otherwise the notifier will begin
- // watching at tip for the transaction to confirm.
- ntfn, err := b.txNotifier.RegisterConf(
- txid, pkScript, numConfs, heightHint,
- )
- if err != nil {
- return nil, err
- }
-
- if ntfn.HistoricalDispatch == nil {
- return ntfn.Event, nil
- }
-
- select {
- case b.notificationRegistry <- ntfn.HistoricalDispatch:
- return ntfn.Event, nil
- case <-b.quit:
- return nil, chainntnfs.ErrChainNotifierShuttingDown.Default()
- }
-}
-
-// blockEpochRegistration represents a client's intent to receive a
-// notification with each newly connected block.
-type blockEpochRegistration struct {
- epochID uint64
-
- epochChan chan *chainntnfs.BlockEpoch
-
- epochQueue *queue.ConcurrentQueue
-
- bestBlock *chainntnfs.BlockEpoch
-
- errorChan chan er.R
-
- cancelChan chan struct{}
-
- wg sync.WaitGroup
-}
-
-// epochCancel is a message sent to the BtcdNotifier when a client wishes to
-// cancel an outstanding epoch notification that has yet to be dispatched.
-type epochCancel struct {
- epochID uint64
-}
-
-// RegisterBlockEpochNtfn returns a BlockEpochEvent which subscribes the
-// caller to receive notifications, of each new block connected to the main
-// chain. Clients have the option of passing in their best known block, which
-// the notifier uses to check if they are behind on blocks and catch them up. If
-// they do not provide one, then a notification will be dispatched immediately
-// for the current tip of the chain upon a successful registration.
-func (b *BtcdNotifier) RegisterBlockEpochNtfn(
- bestBlock *chainntnfs.BlockEpoch) (*chainntnfs.BlockEpochEvent, er.R) {
-
- reg := &blockEpochRegistration{
- epochQueue: queue.NewConcurrentQueue(20),
- epochChan: make(chan *chainntnfs.BlockEpoch, 20),
- cancelChan: make(chan struct{}),
- epochID: atomic.AddUint64(&b.epochClientCounter, 1),
- bestBlock: bestBlock,
- errorChan: make(chan er.R, 1),
- }
-
- reg.epochQueue.Start()
-
- // Before we send the request to the main goroutine, we'll launch a new
- // goroutine to proxy items added to our queue to the client itself.
- // This ensures that all notifications are received *in order*.
- reg.wg.Add(1)
- go func() {
- defer reg.wg.Done()
-
- for {
- select {
- case ntfn := <-reg.epochQueue.ChanOut():
- blockNtfn := ntfn.(*chainntnfs.BlockEpoch)
- select {
- case reg.epochChan <- blockNtfn:
-
- case <-reg.cancelChan:
- return
-
- case <-b.quit:
- return
- }
-
- case <-reg.cancelChan:
- return
-
- case <-b.quit:
- return
- }
- }
- }()
-
- select {
- case <-b.quit:
- // As we're exiting before the registration could be sent,
- // we'll stop the queue now ourselves.
- reg.epochQueue.Stop()
-
- return nil, er.New("chainntnfs: system interrupt while " +
- "attempting to register for block epoch notification.")
- case b.notificationRegistry <- reg:
- return &chainntnfs.BlockEpochEvent{
- Epochs: reg.epochChan,
- Cancel: func() {
- cancel := &epochCancel{
- epochID: reg.epochID,
- }
-
- // Submit epoch cancellation to notification dispatcher.
- select {
- case b.notificationCancels <- cancel:
- // Cancellation is being handled, drain
- // the epoch channel until it is closed
- // before yielding to caller.
- for {
- select {
- case _, ok := <-reg.epochChan:
- if !ok {
- return
- }
- case <-b.quit:
- return
- }
- }
- case <-b.quit:
- }
- },
- }, nil
- }
-}
diff --git a/lnd/chainntnfs/btcdnotify/btcd_dev.go b/lnd/chainntnfs/btcdnotify/btcd_dev.go
deleted file mode 100644
index 68d5213c..00000000
--- a/lnd/chainntnfs/btcdnotify/btcd_dev.go
+++ /dev/null
@@ -1,80 +0,0 @@
-// +build dev
-
-package btcdnotify
-
-import (
- "time"
-
- "github.com/pkt-cash/pktd/btcutil/er"
- "github.com/pkt-cash/pktd/chaincfg/chainhash"
- "github.com/pkt-cash/pktd/lnd/chainntnfs"
-)
-
-// UnsafeStart starts the notifier with a specified best height and optional
-// best hash. Its bestBlock and txNotifier are initialized with bestHeight and
-// optionally bestHash. The parameter generateBlocks is necessary for the
-// bitcoind notifier to ensure we drain all notifications up to syncHeight,
-// since if they are generated ahead of UnsafeStart the chainConn may start up
-// with an outdated best block and miss sending ntfns. Used for testing.
-func (b *BtcdNotifier) UnsafeStart(bestHeight int32, bestHash *chainhash.Hash,
- syncHeight int32, generateBlocks func() er.R) er.R {
-
- // Connect to btcd, and register for notifications on connected, and
- // disconnected blocks.
- if err := b.chainConn.Connect(20); err != nil {
- return err
- }
- if err := b.chainConn.NotifyBlocks(); err != nil {
- return err
- }
-
- b.txNotifier = chainntnfs.NewTxNotifier(
- uint32(bestHeight), chainntnfs.ReorgSafetyLimit,
- b.confirmHintCache, b.spendHintCache,
- )
-
- b.chainUpdates.Start()
- b.txUpdates.Start()
-
- if generateBlocks != nil {
- // Ensure no block notifications are pending when we start the
- // notification dispatcher goroutine.
-
- // First generate the blocks, then drain the notifications
- // for the generated blocks.
- if err := generateBlocks(); err != nil {
- return err
- }
-
- timeout := time.After(60 * time.Second)
- loop:
- for {
- select {
- case ntfn := <-b.chainUpdates.ChanOut():
- lastReceivedNtfn := ntfn.(*chainUpdate)
- if lastReceivedNtfn.blockHeight >= syncHeight {
- break loop
- }
- case <-timeout:
- return er.Errorf("unable to catch up to height %d",
- syncHeight)
- }
- }
- }
-
- // Run notificationDispatcher after setting the notifier's best block
- // to avoid a race condition.
- b.bestBlock = chainntnfs.BlockEpoch{Height: bestHeight, Hash: bestHash}
- if bestHash == nil {
- hash, err := b.chainConn.GetBlockHash(int64(bestHeight))
- if err != nil {
- return err
- }
- b.bestBlock.Hash = hash
- }
-
- b.wg.Add(1)
- go b.notificationDispatcher()
-
- return nil
-}
diff --git a/lnd/chainntnfs/btcdnotify/btcd_test.go b/lnd/chainntnfs/btcdnotify/btcd_test.go
deleted file mode 100644
index a8d46bd6..00000000
--- a/lnd/chainntnfs/btcdnotify/btcd_test.go
+++ /dev/null
@@ -1,249 +0,0 @@
-// +build dev
-
-package btcdnotify
-
-import (
- "bytes"
- "io/ioutil"
- "os"
- "testing"
-
- "github.com/pkt-cash/pktd/chaincfg/chainhash"
- "github.com/pkt-cash/pktd/chaincfg/globalcfg"
- "github.com/pkt-cash/pktd/integration/rpctest"
- "github.com/pkt-cash/pktd/lnd/chainntnfs"
- "github.com/pkt-cash/pktd/lnd/channeldb"
-)
-
-var (
- testScript = []byte{
- // OP_HASH160
- 0xA9,
- // OP_DATA_20
- 0x14,
- // <20-byte hash>
- 0xec, 0x6f, 0x7a, 0x5a, 0xa8, 0xf2, 0xb1, 0x0c, 0xa5, 0x15,
- 0x04, 0x52, 0x3a, 0x60, 0xd4, 0x03, 0x06, 0xf6, 0x96, 0xcd,
- // OP_EQUAL
- 0x87,
- }
-)
-
-func initHintCache(t *testing.T) *chainntnfs.HeightHintCache {
- t.Helper()
-
- tempDir, errr := ioutil.TempDir("", "kek")
- if errr != nil {
- t.Fatalf("unable to create temp dir: %v", errr)
- }
- db, err := channeldb.Open(tempDir)
- if err != nil {
- t.Fatalf("unable to create db: %v", err)
- }
- testCfg := chainntnfs.CacheConfig{
- QueryDisable: false,
- }
- hintCache, err := chainntnfs.NewHeightHintCache(testCfg, db)
- if err != nil {
- t.Fatalf("unable to create hint cache: %v", err)
- }
-
- return hintCache
-}
-
-// setUpNotifier is a helper function to start a new notifier backed by a btcd
-// driver.
-func setUpNotifier(t *testing.T, h *rpctest.Harness) *BtcdNotifier {
- hintCache := initHintCache(t)
-
- rpcCfg := h.RPCConfig()
- notifier, err := New(&rpcCfg, chainntnfs.NetParams, hintCache, hintCache)
- if err != nil {
- t.Fatalf("unable to create notifier: %v", err)
- }
- if err := notifier.Start(); err != nil {
- t.Fatalf("unable to start notifier: %v", err)
- }
-
- return notifier
-}
-
-// TestHistoricalConfDetailsTxIndex ensures that we correctly retrieve
-// historical confirmation details using the backend node's txindex.
-// TODO(cjd): DISABLED TEST - needs investigation
-func _TestHistoricalConfDetailsTxIndex(t *testing.T) {
- t.Parallel()
-
- harness, tearDown := chainntnfs.NewMiner(
- t, []string{"--txindex"}, true, 25,
- )
- defer tearDown()
-
- notifier := setUpNotifier(t, harness)
- defer notifier.Stop()
-
- // A transaction unknown to the node should not be found within the
- // txindex even if it is enabled, so we should not proceed with any
- // fallback methods.
- var unknownHash chainhash.Hash
- copy(unknownHash[:], bytes.Repeat([]byte{0x10}, 32))
- unknownConfReq, err := chainntnfs.NewConfRequest(&unknownHash, testScript)
- if err != nil {
- t.Fatalf("unable to create conf request: %v", err)
- }
- _, txStatus, err := notifier.historicalConfDetails(unknownConfReq, 0, 0)
- if err != nil {
- t.Fatalf("unable to retrieve historical conf details: %v", err)
- }
-
- switch txStatus {
- case chainntnfs.TxNotFoundIndex:
- case chainntnfs.TxNotFoundManually:
- t.Fatal("should not have proceeded with fallback method, but did")
- default:
- t.Fatal("should not have found non-existent transaction, but did")
- }
-
- // Now, we'll create a test transaction and attempt to retrieve its
- // confirmation details.
- txid, pkScript, err := chainntnfs.GetTestTxidAndScript(harness)
- if err != nil {
- t.Fatalf("unable to create tx: %v", err)
- }
- if err := chainntnfs.WaitForMempoolTx(harness, txid); err != nil {
- t.Fatalf("unable to find tx in the mempool: %v", err)
- }
- confReq, err := chainntnfs.NewConfRequest(txid, pkScript)
- if err != nil {
- t.Fatalf("unable to create conf request: %v", err)
- }
-
- // The transaction should be found in the mempool at this point.
- _, txStatus, err = notifier.historicalConfDetails(confReq, 0, 0)
- if err != nil {
- t.Fatalf("unable to retrieve historical conf details: %v", err)
- }
-
- // Since it has yet to be included in a block, it should have been found
- // within the mempool.
- switch txStatus {
- case chainntnfs.TxFoundMempool:
- default:
- t.Fatalf("should have found the transaction within the "+
- "mempool, but did not: %v", txStatus)
- }
-
- // We'll now confirm this transaction and re-attempt to retrieve its
- // confirmation details.
- if _, err := harness.Node.Generate(1); err != nil {
- t.Fatalf("unable to generate block: %v", err)
- }
-
- _, txStatus, err = notifier.historicalConfDetails(confReq, 0, 0)
- if err != nil {
- t.Fatalf("unable to retrieve historical conf details: %v", err)
- }
-
- // Since the backend node's txindex is enabled and the transaction has
- // confirmed, we should be able to retrieve it using the txindex.
- switch txStatus {
- case chainntnfs.TxFoundIndex:
- default:
- t.Fatal("should have found the transaction within the " +
- "txindex, but did not")
- }
-}
-
-// TestHistoricalConfDetailsNoTxIndex ensures that we correctly retrieve
-// historical confirmation details using the set of fallback methods when the
-// backend node's txindex is disabled.
-// TODO(cjd): DISABLED TEST - needs investigation
-func _TestHistoricalConfDetailsNoTxIndex(t *testing.T) {
- t.Parallel()
-
- harness, tearDown := chainntnfs.NewMiner(t, nil, true, 25)
- defer tearDown()
-
- notifier := setUpNotifier(t, harness)
- defer notifier.Stop()
-
- // Since the node has its txindex disabled, we fall back to scanning the
- // chain manually. A transaction unknown to the network should not be
- // found.
- var unknownHash chainhash.Hash
- copy(unknownHash[:], bytes.Repeat([]byte{0x10}, 32))
- unknownConfReq, err := chainntnfs.NewConfRequest(&unknownHash, testScript)
- if err != nil {
- t.Fatalf("unable to create conf request: %v", err)
- }
- _, txStatus, err := notifier.historicalConfDetails(unknownConfReq, 0, 0)
- if err != nil {
- t.Fatalf("unable to retrieve historical conf details: %v", err)
- }
-
- switch txStatus {
- case chainntnfs.TxNotFoundManually:
- case chainntnfs.TxNotFoundIndex:
- t.Fatal("should have proceeded with fallback method, but did not")
- default:
- t.Fatal("should not have found non-existent transaction, but did")
- }
-
- // Now, we'll create a test transaction and attempt to retrieve its
- // confirmation details. We'll note its broadcast height to use as the
- // height hint when manually scanning the chain.
- _, currentHeight, err := harness.Node.GetBestBlock()
- if err != nil {
- t.Fatalf("unable to retrieve current height: %v", err)
- }
-
- txid, pkScript, err := chainntnfs.GetTestTxidAndScript(harness)
- if err != nil {
- t.Fatalf("unable to create tx: %v", err)
- }
- if err := chainntnfs.WaitForMempoolTx(harness, txid); err != nil {
- t.Fatalf("unable to find tx in the mempool: %v", err)
- }
- confReq, err := chainntnfs.NewConfRequest(txid, pkScript)
- if err != nil {
- t.Fatalf("unable to create conf request: %v", err)
- }
-
- _, txStatus, err = notifier.historicalConfDetails(confReq, 0, 0)
- if err != nil {
- t.Fatalf("unable to retrieve historical conf details: %v", err)
- }
-
- // Since it has yet to be included in a block, it should have been found
- // within the mempool.
- if txStatus != chainntnfs.TxFoundMempool {
- t.Fatal("should have found the transaction within the " +
- "mempool, but did not")
- }
-
- // We'll now confirm this transaction and re-attempt to retrieve its
- // confirmation details.
- if _, err := harness.Node.Generate(1); err != nil {
- t.Fatalf("unable to generate block: %v", err)
- }
-
- _, txStatus, err = notifier.historicalConfDetails(
- confReq, uint32(currentHeight), uint32(currentHeight)+1,
- )
- if err != nil {
- t.Fatalf("unable to retrieve historical conf details: %v", err)
- }
-
- // Since the backend node's txindex is disabled and the transaction has
- // confirmed, we should be able to find it by falling back to scanning
- // the chain manually.
- if txStatus != chainntnfs.TxFoundManually {
- t.Fatal("should have found the transaction by manually " +
- "scanning the chain, but did not")
- }
-}
-
-func TestMain(m *testing.M) {
- globalcfg.SelectConfig(globalcfg.BitcoinDefaults())
- os.Exit(m.Run())
-}
diff --git a/lnd/chainntnfs/btcdnotify/driver.go b/lnd/chainntnfs/btcdnotify/driver.go
deleted file mode 100644
index adfecfbf..00000000
--- a/lnd/chainntnfs/btcdnotify/driver.go
+++ /dev/null
@@ -1,60 +0,0 @@
-package btcdnotify
-
-import (
- "fmt"
-
- "github.com/pkt-cash/pktd/btcutil/er"
- "github.com/pkt-cash/pktd/chaincfg"
- "github.com/pkt-cash/pktd/lnd/chainntnfs"
- "github.com/pkt-cash/pktd/rpcclient"
-)
-
-// createNewNotifier creates a new instance of the ChainNotifier interface
-// implemented by BtcdNotifier.
-func createNewNotifier(args ...interface{}) (chainntnfs.ChainNotifier, er.R) {
- if len(args) != 4 {
- return nil, er.Errorf("incorrect number of arguments to "+
- ".New(...), expected 4, instead passed %v", len(args))
- }
-
- config, ok := args[0].(*rpcclient.ConnConfig)
- if !ok {
- return nil, er.New("first argument to btcdnotify.New " +
- "is incorrect, expected a *rpcclient.ConnConfig")
- }
-
- chainParams, ok := args[1].(*chaincfg.Params)
- if !ok {
- return nil, er.New("second argument to btcdnotify.New " +
- "is incorrect, expected a *chaincfg.Params")
- }
-
- spendHintCache, ok := args[2].(chainntnfs.SpendHintCache)
- if !ok {
- return nil, er.New("third argument to btcdnotify.New " +
- "is incorrect, expected a chainntnfs.SpendHintCache")
- }
-
- confirmHintCache, ok := args[3].(chainntnfs.ConfirmHintCache)
- if !ok {
- return nil, er.New("fourth argument to btcdnotify.New " +
- "is incorrect, expected a chainntnfs.ConfirmHintCache")
- }
-
- return New(config, chainParams, spendHintCache, confirmHintCache)
-}
-
-// init registers a driver for the BtcdNotifier concrete implementation of the
-// chainntnfs.ChainNotifier interface.
-func init() {
- // Register the driver.
- notifier := &chainntnfs.NotifierDriver{
- NotifierType: notifierType,
- New: createNewNotifier,
- }
-
- if err := chainntnfs.RegisterNotifier(notifier); err != nil {
- panic(fmt.Sprintf("failed to register notifier driver '%s': %v",
- notifierType, err))
- }
-}
diff --git a/lnd/chainntnfs/height_hint_cache.go b/lnd/chainntnfs/height_hint_cache.go
deleted file mode 100644
index 3089a90e..00000000
--- a/lnd/chainntnfs/height_hint_cache.go
+++ /dev/null
@@ -1,325 +0,0 @@
-package chainntnfs
-
-import (
- "bytes"
-
- "github.com/pkt-cash/pktd/btcutil/er"
- "github.com/pkt-cash/pktd/lnd/channeldb"
- "github.com/pkt-cash/pktd/lnd/channeldb/kvdb"
- "github.com/pkt-cash/pktd/pktlog/log"
-)
-
-var (
- // spendHintBucket is the name of the bucket which houses the height
- // hint for outpoints. Each height hint represents the earliest height
- // at which its corresponding outpoint could have been spent within.
- spendHintBucket = []byte("spend-hints")
-
- // confirmHintBucket is the name of the bucket which houses the height
- // hints for transactions. Each height hint represents the earliest
- // height at which its corresponding transaction could have been
- // confirmed within.
- confirmHintBucket = []byte("confirm-hints")
-
- Err = er.NewErrorType("lnd.chainntnfs")
-
- // ErrCorruptedHeightHintCache indicates that the on-disk bucketing
- // structure has altered since the height hint cache instance was
- // initialized.
- ErrCorruptedHeightHintCache = Err.CodeWithDetail("ErrCorruptedHeightHintCache",
- "height hint cache has been corrupted")
-
- // ErrSpendHintNotFound is an error returned when a spend hint for an
- // outpoint was not found.
- ErrSpendHintNotFound = Err.CodeWithDetail("ErrSpendHintNotFound",
- "spend hint not found")
-
- // ErrConfirmHintNotFound is an error returned when a confirm hint for a
- // transaction was not found.
- ErrConfirmHintNotFound = Err.CodeWithDetail("ErrConfirmHintNotFound",
- "confirm hint not found")
-)
-
-// CacheConfig contains the HeightHintCache configuration
-type CacheConfig struct {
- // QueryDisable prevents reliance on the Height Hint Cache. This is
- // necessary to recover from an edge case when the height recorded in
- // the cache is higher than the actual height of a spend, causing a
- // channel to become "stuck" in a pending close state.
- QueryDisable bool
-}
-
-// SpendHintCache is an interface whose duty is to cache spend hints for
-// outpoints. A spend hint is defined as the earliest height in the chain at
-// which an outpoint could have been spent within.
-type SpendHintCache interface {
- // CommitSpendHint commits a spend hint for the outpoints to the cache.
- CommitSpendHint(height uint32, spendRequests ...SpendRequest) er.R
-
- // QuerySpendHint returns the latest spend hint for an outpoint.
- // ErrSpendHintNotFound is returned if a spend hint does not exist
- // within the cache for the outpoint.
- QuerySpendHint(spendRequest SpendRequest) (uint32, er.R)
-
- // PurgeSpendHint removes the spend hint for the outpoints from the
- // cache.
- PurgeSpendHint(spendRequests ...SpendRequest) er.R
-}
-
-// ConfirmHintCache is an interface whose duty is to cache confirm hints for
-// transactions. A confirm hint is defined as the earliest height in the chain
-// at which a transaction could have been included in a block.
-type ConfirmHintCache interface {
- // CommitConfirmHint commits a confirm hint for the transactions to the
- // cache.
- CommitConfirmHint(height uint32, confRequests ...ConfRequest) er.R
-
- // QueryConfirmHint returns the latest confirm hint for a transaction
- // hash. ErrConfirmHintNotFound is returned if a confirm hint does not
- // exist within the cache for the transaction hash.
- QueryConfirmHint(confRequest ConfRequest) (uint32, er.R)
-
- // PurgeConfirmHint removes the confirm hint for the transactions from
- // the cache.
- PurgeConfirmHint(confRequests ...ConfRequest) er.R
-}
-
-// HeightHintCache is an implementation of the SpendHintCache and
-// ConfirmHintCache interfaces backed by a channeldb DB instance where the hints
-// will be stored.
-type HeightHintCache struct {
- cfg CacheConfig
- db *channeldb.DB
-}
-
-// Compile-time checks to ensure HeightHintCache satisfies the SpendHintCache
-// and ConfirmHintCache interfaces.
-var _ SpendHintCache = (*HeightHintCache)(nil)
-var _ ConfirmHintCache = (*HeightHintCache)(nil)
-
-// NewHeightHintCache returns a new height hint cache backed by a database.
-func NewHeightHintCache(cfg CacheConfig, db *channeldb.DB) (*HeightHintCache, er.R) {
- cache := &HeightHintCache{cfg, db}
- if err := cache.initBuckets(); err != nil {
- return nil, err
- }
-
- return cache, nil
-}
-
-// initBuckets ensures that the primary buckets used by the circuit are
-// initialized so that we can assume their existence after startup.
-func (c *HeightHintCache) initBuckets() er.R {
- return kvdb.Batch(c.db.Backend, func(tx kvdb.RwTx) er.R {
- _, err := tx.CreateTopLevelBucket(spendHintBucket)
- if err != nil {
- return err
- }
-
- _, err = tx.CreateTopLevelBucket(confirmHintBucket)
- return err
- })
-}
-
-// CommitSpendHint commits a spend hint for the outpoints to the cache.
-func (c *HeightHintCache) CommitSpendHint(height uint32,
- spendRequests ...SpendRequest) er.R {
-
- if len(spendRequests) == 0 {
- return nil
- }
-
- log.Tracef("Updating spend hint to height %d for %v", height,
- spendRequests)
-
- return kvdb.Batch(c.db.Backend, func(tx kvdb.RwTx) er.R {
- spendHints := tx.ReadWriteBucket(spendHintBucket)
- if spendHints == nil {
- return ErrCorruptedHeightHintCache.Default()
- }
-
- var hint bytes.Buffer
- if err := channeldb.WriteElement(&hint, height); err != nil {
- return err
- }
-
- for _, spendRequest := range spendRequests {
- spendHintKey, err := spendRequest.SpendHintKey()
- if err != nil {
- return err
- }
- err = spendHints.Put(spendHintKey, hint.Bytes())
- if err != nil {
- return err
- }
- }
-
- return nil
- })
-}
-
-// QuerySpendHint returns the latest spend hint for an outpoint.
-// ErrSpendHintNotFound is returned if a spend hint does not exist within the
-// cache for the outpoint.
-func (c *HeightHintCache) QuerySpendHint(spendRequest SpendRequest) (uint32, er.R) {
- var hint uint32
- if c.cfg.QueryDisable {
- log.Debugf("Ignoring spend height hint for %v (height hint cache "+
- "query disabled)", spendRequest)
- return 0, nil
- }
- err := kvdb.View(c.db, func(tx kvdb.RTx) er.R {
- spendHints := tx.ReadBucket(spendHintBucket)
- if spendHints == nil {
- return ErrCorruptedHeightHintCache.Default()
- }
-
- spendHintKey, err := spendRequest.SpendHintKey()
- if err != nil {
- return err
- }
- spendHint := spendHints.Get(spendHintKey)
- if spendHint == nil {
- return ErrSpendHintNotFound.Default()
- }
-
- return channeldb.ReadElement(bytes.NewReader(spendHint), &hint)
- }, func() {
- hint = 0
- })
- if err != nil {
- return 0, err
- }
-
- return hint, nil
-}
-
-// PurgeSpendHint removes the spend hint for the outpoints from the cache.
-func (c *HeightHintCache) PurgeSpendHint(spendRequests ...SpendRequest) er.R {
- if len(spendRequests) == 0 {
- return nil
- }
-
- log.Tracef("Removing spend hints for %v", spendRequests)
-
- return kvdb.Batch(c.db.Backend, func(tx kvdb.RwTx) er.R {
- spendHints := tx.ReadWriteBucket(spendHintBucket)
- if spendHints == nil {
- return ErrCorruptedHeightHintCache.Default()
- }
-
- for _, spendRequest := range spendRequests {
- spendHintKey, err := spendRequest.SpendHintKey()
- if err != nil {
- return err
- }
- if err := spendHints.Delete(spendHintKey); err != nil {
- return err
- }
- }
-
- return nil
- })
-}
-
-// CommitConfirmHint commits a confirm hint for the transactions to the cache.
-func (c *HeightHintCache) CommitConfirmHint(height uint32,
- confRequests ...ConfRequest) er.R {
-
- if len(confRequests) == 0 {
- return nil
- }
-
- log.Tracef("Updating confirm hints to height %d for %v", height,
- confRequests)
-
- return kvdb.Batch(c.db.Backend, func(tx kvdb.RwTx) er.R {
- confirmHints := tx.ReadWriteBucket(confirmHintBucket)
- if confirmHints == nil {
- return ErrCorruptedHeightHintCache.Default()
- }
-
- var hint bytes.Buffer
- if err := channeldb.WriteElement(&hint, height); err != nil {
- return err
- }
-
- for _, confRequest := range confRequests {
- confHintKey, err := confRequest.ConfHintKey()
- if err != nil {
- return err
- }
- err = confirmHints.Put(confHintKey, hint.Bytes())
- if err != nil {
- return err
- }
- }
-
- return nil
- })
-}
-
-// QueryConfirmHint returns the latest confirm hint for a transaction hash.
-// ErrConfirmHintNotFound is returned if a confirm hint does not exist within
-// the cache for the transaction hash.
-func (c *HeightHintCache) QueryConfirmHint(confRequest ConfRequest) (uint32, er.R) {
- var hint uint32
- if c.cfg.QueryDisable {
- log.Debugf("Ignoring confirmation height hint for %v (height hint "+
- "cache query disabled)", confRequest)
- return 0, nil
- }
- err := kvdb.View(c.db, func(tx kvdb.RTx) er.R {
- confirmHints := tx.ReadBucket(confirmHintBucket)
- if confirmHints == nil {
- return ErrCorruptedHeightHintCache.Default()
- }
-
- confHintKey, err := confRequest.ConfHintKey()
- if err != nil {
- return err
- }
- confirmHint := confirmHints.Get(confHintKey)
- if confirmHint == nil {
- return ErrConfirmHintNotFound.Default()
- }
-
- return channeldb.ReadElement(bytes.NewReader(confirmHint), &hint)
- }, func() {
- hint = 0
- })
- if err != nil {
- return 0, err
- }
-
- return hint, nil
-}
-
-// PurgeConfirmHint removes the confirm hint for the transactions from the
-// cache.
-func (c *HeightHintCache) PurgeConfirmHint(confRequests ...ConfRequest) er.R {
- if len(confRequests) == 0 {
- return nil
- }
-
- log.Tracef("Removing confirm hints for %v", confRequests)
-
- return kvdb.Batch(c.db.Backend, func(tx kvdb.RwTx) er.R {
- confirmHints := tx.ReadWriteBucket(confirmHintBucket)
- if confirmHints == nil {
- return ErrCorruptedHeightHintCache.Default()
- }
-
- for _, confRequest := range confRequests {
- confHintKey, err := confRequest.ConfHintKey()
- if err != nil {
- return err
- }
- if err := confirmHints.Delete(confHintKey); err != nil {
- return err
- }
- }
-
- return nil
- })
-}
diff --git a/lnd/chainntnfs/height_hint_cache_test.go b/lnd/chainntnfs/height_hint_cache_test.go
deleted file mode 100644
index eafdc47e..00000000
--- a/lnd/chainntnfs/height_hint_cache_test.go
+++ /dev/null
@@ -1,203 +0,0 @@
-package chainntnfs
-
-import (
- "bytes"
- "io/ioutil"
- "testing"
-
- "github.com/pkt-cash/pktd/chaincfg/chainhash"
- "github.com/pkt-cash/pktd/lnd/channeldb"
- "github.com/pkt-cash/pktd/wire"
- "github.com/stretchr/testify/require"
-)
-
-func initHintCache(t *testing.T) *HeightHintCache {
- t.Helper()
-
- defaultCfg := CacheConfig{
- QueryDisable: false,
- }
-
- return initHintCacheWithConfig(t, defaultCfg)
-}
-
-func initHintCacheWithConfig(t *testing.T, cfg CacheConfig) *HeightHintCache {
- t.Helper()
-
- tempDir, errr := ioutil.TempDir("", "kek")
- if errr != nil {
- t.Fatalf("unable to create temp dir: %v", errr)
- }
- db, err := channeldb.Open(tempDir)
- if err != nil {
- t.Fatalf("unable to create db: %v", err)
- }
- hintCache, err := NewHeightHintCache(cfg, db)
- if err != nil {
- t.Fatalf("unable to create hint cache: %v", err)
- }
-
- return hintCache
-}
-
-// TestHeightHintCacheConfirms ensures that the height hint cache properly
-// caches confirm hints for transactions.
-func TestHeightHintCacheConfirms(t *testing.T) {
- t.Parallel()
-
- hintCache := initHintCache(t)
-
- // Querying for a transaction hash not found within the cache should
- // return an error indication so.
- var unknownHash chainhash.Hash
- copy(unknownHash[:], bytes.Repeat([]byte{0x01}, 32))
- unknownConfRequest := ConfRequest{TxID: unknownHash}
- _, err := hintCache.QueryConfirmHint(unknownConfRequest)
- if !ErrConfirmHintNotFound.Is(err) {
- t.Fatalf("expected ErrConfirmHintNotFound, got: %v", err)
- }
-
- // Now, we'll create some transaction hashes and commit them to the
- // cache with the same confirm hint.
- const height = 100
- const numHashes = 5
- confRequests := make([]ConfRequest, numHashes)
- for i := 0; i < numHashes; i++ {
- var txHash chainhash.Hash
- copy(txHash[:], bytes.Repeat([]byte{byte(i + 1)}, 32))
- confRequests[i] = ConfRequest{TxID: txHash}
- }
-
- err = hintCache.CommitConfirmHint(height, confRequests...)
- if err != nil {
- t.Fatalf("unable to add entries to cache: %v", err)
- }
-
- // With the hashes committed, we'll now query the cache to ensure that
- // we're able to properly retrieve the confirm hints.
- for _, confRequest := range confRequests {
- confirmHint, err := hintCache.QueryConfirmHint(confRequest)
- if err != nil {
- t.Fatalf("unable to query for hint of %v: %v", confRequest, err)
- }
- if confirmHint != height {
- t.Fatalf("expected confirm hint %d, got %d", height,
- confirmHint)
- }
- }
-
- // We'll also attempt to purge all of them in a single database
- // transaction.
- if err := hintCache.PurgeConfirmHint(confRequests...); err != nil {
- t.Fatalf("unable to remove confirm hints: %v", err)
- }
-
- // Finally, we'll attempt to query for each hash. We should expect not
- // to find a hint for any of them.
- for _, confRequest := range confRequests {
- _, err := hintCache.QueryConfirmHint(confRequest)
- if !ErrConfirmHintNotFound.Is(err) {
- t.Fatalf("expected ErrConfirmHintNotFound, got :%v", err)
- }
- }
-}
-
-// TestHeightHintCacheSpends ensures that the height hint cache properly caches
-// spend hints for outpoints.
-func TestHeightHintCacheSpends(t *testing.T) {
- t.Parallel()
-
- hintCache := initHintCache(t)
-
- // Querying for an outpoint not found within the cache should return an
- // error indication so.
- unknownOutPoint := wire.OutPoint{Index: 1}
- unknownSpendRequest := SpendRequest{OutPoint: unknownOutPoint}
- _, err := hintCache.QuerySpendHint(unknownSpendRequest)
- if !ErrSpendHintNotFound.Is(err) {
- t.Fatalf("expected ErrSpendHintNotFound, got: %v", err)
- }
-
- // Now, we'll create some outpoints and commit them to the cache with
- // the same spend hint.
- const height = 100
- const numOutpoints = 5
- spendRequests := make([]SpendRequest, numOutpoints)
- for i := uint32(0); i < numOutpoints; i++ {
- spendRequests[i] = SpendRequest{
- OutPoint: wire.OutPoint{Index: i + 1},
- }
- }
-
- err = hintCache.CommitSpendHint(height, spendRequests...)
- if err != nil {
- t.Fatalf("unable to add entries to cache: %v", err)
- }
-
- // With the outpoints committed, we'll now query the cache to ensure
- // that we're able to properly retrieve the confirm hints.
- for _, spendRequest := range spendRequests {
- spendHint, err := hintCache.QuerySpendHint(spendRequest)
- if err != nil {
- t.Fatalf("unable to query for hint: %v", err)
- }
- if spendHint != height {
- t.Fatalf("expected spend hint %d, got %d", height,
- spendHint)
- }
- }
-
- // We'll also attempt to purge all of them in a single database
- // transaction.
- if err := hintCache.PurgeSpendHint(spendRequests...); err != nil {
- t.Fatalf("unable to remove spend hint: %v", err)
- }
-
- // Finally, we'll attempt to query for each outpoint. We should expect
- // not to find a hint for any of them.
- for _, spendRequest := range spendRequests {
- _, err = hintCache.QuerySpendHint(spendRequest)
- if !ErrSpendHintNotFound.Is(err) {
- t.Fatalf("expected ErrSpendHintNotFound, got: %v", err)
- }
- }
-}
-
-// TestQueryDisable asserts querying for confirmation or spend hints always
-// return height zero when QueryDisabled is set to true in the CacheConfig.
-func TestQueryDisable(t *testing.T) {
- cfg := CacheConfig{
- QueryDisable: true,
- }
-
- hintCache := initHintCacheWithConfig(t, cfg)
-
- // Insert a new confirmation hint with a non-zero height.
- const confHeight = 100
- confRequest := ConfRequest{
- TxID: chainhash.Hash{0x01, 0x02, 0x03},
- }
- err := hintCache.CommitConfirmHint(confHeight, confRequest)
- require.Nil(t, err)
-
- // Query for the confirmation hint, which should return zero.
- cachedConfHeight, err := hintCache.QueryConfirmHint(confRequest)
- require.Nil(t, err)
- require.Equal(t, uint32(0), cachedConfHeight)
-
- // Insert a new spend hint with a non-zero height.
- const spendHeight = 200
- spendRequest := SpendRequest{
- OutPoint: wire.OutPoint{
- Hash: chainhash.Hash{0x4, 0x05, 0x06},
- Index: 42,
- },
- }
- err = hintCache.CommitSpendHint(spendHeight, spendRequest)
- require.Nil(t, err)
-
- // Query for the spend hint, which should return zero.
- cachedSpendHeight, err := hintCache.QuerySpendHint(spendRequest)
- require.Nil(t, err)
- require.Equal(t, uint32(0), cachedSpendHeight)
-}
diff --git a/lnd/chainntnfs/interface.go b/lnd/chainntnfs/interface.go
deleted file mode 100644
index c60f6e38..00000000
--- a/lnd/chainntnfs/interface.go
+++ /dev/null
@@ -1,704 +0,0 @@
-package chainntnfs
-
-import (
- "bytes"
- "fmt"
- "strings"
- "sync"
-
- "github.com/pkt-cash/pktd/btcjson"
- "github.com/pkt-cash/pktd/btcutil/er"
- "github.com/pkt-cash/pktd/btcutil/util"
- "github.com/pkt-cash/pktd/chaincfg/chainhash"
- "github.com/pkt-cash/pktd/pktlog/log"
- "github.com/pkt-cash/pktd/wire"
-)
-
-var (
- // ErrChainNotifierShuttingDown is used when we are trying to
- // measure a spend notification when notifier is already stopped.
- ErrChainNotifierShuttingDown = Err.CodeWithDetail("ErrChainNotifierShuttingDown", "chain notifier shutting down")
-)
-
-// TxConfStatus denotes the status of a transaction's lookup.
-type TxConfStatus uint8
-
-const (
- // TxFoundMempool denotes that the transaction was found within the
- // backend node's mempool.
- TxFoundMempool TxConfStatus = iota
-
- // TxFoundIndex denotes that the transaction was found within the
- // backend node's txindex.
- TxFoundIndex
-
- // TxNotFoundIndex denotes that the transaction was not found within the
- // backend node's txindex.
- TxNotFoundIndex
-
- // TxFoundManually denotes that the transaction was found within the
- // chain by scanning for it manually.
- TxFoundManually
-
- // TxNotFoundManually denotes that the transaction was not found within
- // the chain by scanning for it manually.
- TxNotFoundManually
-)
-
-// String returns the string representation of the TxConfStatus.
-func (t TxConfStatus) String() string {
- switch t {
- case TxFoundMempool:
- return "TxFoundMempool"
-
- case TxFoundIndex:
- return "TxFoundIndex"
-
- case TxNotFoundIndex:
- return "TxNotFoundIndex"
-
- case TxFoundManually:
- return "TxFoundManually"
-
- case TxNotFoundManually:
- return "TxNotFoundManually"
-
- default:
- return "unknown"
- }
-}
-
-// ChainNotifier represents a trusted source to receive notifications concerning
-// targeted events on the Bitcoin blockchain. The interface specification is
-// intentionally general in order to support a wide array of chain notification
-// implementations such as: btcd's websockets notifications, Bitcoin Core's
-// ZeroMQ notifications, various Bitcoin API services, Electrum servers, etc.
-//
-// Concrete implementations of ChainNotifier should be able to support multiple
-// concurrent client requests, as well as multiple concurrent notification events.
-type ChainNotifier interface {
- // RegisterConfirmationsNtfn registers an intent to be notified once
- // txid reaches numConfs confirmations. We also pass in the pkScript as
- // the default light client instead needs to match on scripts created in
- // the block. If a nil txid is passed in, then not only should we match
- // on the script, but we should also dispatch once the transaction
- // containing the script reaches numConfs confirmations. This can be
- // useful in instances where we only know the script in advance, but not
- // the transaction containing it.
- //
- // The returned ConfirmationEvent should properly notify the client once
- // the specified number of confirmations has been reached for the txid,
- // as well as if the original tx gets re-org'd out of the mainchain. The
- // heightHint parameter is provided as a convenience to light clients.
- // It heightHint denotes the earliest height in the blockchain in which
- // the target txid _could_ have been included in the chain. This can be
- // used to bound the search space when checking to see if a notification
- // can immediately be dispatched due to historical data.
- //
- // NOTE: Dispatching notifications to multiple clients subscribed to
- // the same (txid, numConfs) tuple MUST be supported.
- RegisterConfirmationsNtfn(txid *chainhash.Hash, pkScript []byte,
- numConfs, heightHint uint32) (*ConfirmationEvent, er.R)
-
- // RegisterSpendNtfn registers an intent to be notified once the target
- // outpoint is successfully spent within a transaction. The script that
- // the outpoint creates must also be specified. This allows this
- // interface to be implemented by BIP 158-like filtering. If a nil
- // outpoint is passed in, then not only should we match on the script,
- // but we should also dispatch once a transaction spends the output
- // containing said script. This can be useful in instances where we only
- // know the script in advance, but not the outpoint itself.
- //
- // The returned SpendEvent will receive a send on the 'Spend'
- // transaction once a transaction spending the input is detected on the
- // blockchain. The heightHint parameter is provided as a convenience to
- // light clients. It denotes the earliest height in the blockchain in
- // which the target output could have been spent.
- //
- // NOTE: The notification should only be triggered when the spending
- // transaction receives a single confirmation.
- //
- // NOTE: Dispatching notifications to multiple clients subscribed to a
- // spend of the same outpoint MUST be supported.
- RegisterSpendNtfn(outpoint *wire.OutPoint, pkScript []byte,
- heightHint uint32) (*SpendEvent, er.R)
-
- // RegisterBlockEpochNtfn registers an intent to be notified of each
- // new block connected to the tip of the main chain. The returned
- // BlockEpochEvent struct contains a channel which will be sent upon
- // for each new block discovered.
- //
- // Clients have the option of passing in their best known block.
- // If they specify a block, the ChainNotifier checks whether the client
- // is behind on blocks. If they are, the ChainNotifier sends a backlog
- // of block notifications for the missed blocks. If they do not provide
- // one, then a notification will be dispatched immediately for the
- // current tip of the chain upon a successful registration.
- RegisterBlockEpochNtfn(*BlockEpoch) (*BlockEpochEvent, er.R)
-
- // Start the ChainNotifier. Once started, the implementation should be
- // ready, and able to receive notification registrations from clients.
- Start() er.R
-
- // Started returns true if this instance has been started, and false otherwise.
- Started() bool
-
- // Stops the concrete ChainNotifier. Once stopped, the ChainNotifier
- // should disallow any future requests from potential clients.
- // Additionally, all pending client notifications will be canceled
- // by closing the related channels on the *Event's.
- Stop() er.R
-}
-
-// TxConfirmation carries some additional block-level details of the exact
-// block that specified transactions was confirmed within.
-type TxConfirmation struct {
- // BlockHash is the hash of the block that confirmed the original
- // transition.
- BlockHash *chainhash.Hash
-
- // BlockHeight is the height of the block in which the transaction was
- // confirmed within.
- BlockHeight uint32
-
- // TxIndex is the index within the block of the ultimate confirmed
- // transaction.
- TxIndex uint32
-
- // Tx is the transaction for which the notification was requested for.
- Tx *wire.MsgTx
-}
-
-// ConfirmationEvent encapsulates a confirmation notification. With this struct,
-// callers can be notified of: the instance the target txid reaches the targeted
-// number of confirmations, how many confirmations are left for the target txid
-// to be fully confirmed at every new block height, and also in the event that
-// the original txid becomes disconnected from the blockchain as a result of a
-// re-org.
-//
-// Once the txid reaches the specified number of confirmations, the 'Confirmed'
-// channel will be sent upon fulfilling the notification.
-//
-// If the event that the original transaction becomes re-org'd out of the main
-// chain, the 'NegativeConf' will be sent upon with a value representing the
-// depth of the re-org.
-//
-// NOTE: If the caller wishes to cancel their registered spend notification,
-// the Cancel closure MUST be called.
-type ConfirmationEvent struct {
- // Confirmed is a channel that will be sent upon once the transaction
- // has been fully confirmed. The struct sent will contain all the
- // details of the channel's confirmation.
- //
- // NOTE: This channel must be buffered.
- Confirmed chan *TxConfirmation
-
- // Updates is a channel that will sent upon, at every incremental
- // confirmation, how many confirmations are left to declare the
- // transaction as fully confirmed.
- //
- // NOTE: This channel must be buffered with the number of required
- // confirmations.
- Updates chan uint32
-
- // NegativeConf is a channel that will be sent upon if the transaction
- // confirms, but is later reorged out of the chain. The integer sent
- // through the channel represents the reorg depth.
- //
- // NOTE: This channel must be buffered.
- NegativeConf chan int32
-
- // Done is a channel that gets sent upon once the confirmation request
- // is no longer under the risk of being reorged out of the chain.
- //
- // NOTE: This channel must be buffered.
- Done chan struct{}
-
- // Cancel is a closure that should be executed by the caller in the case
- // that they wish to prematurely abandon their registered confirmation
- // notification.
- Cancel func()
-}
-
-// NewConfirmationEvent constructs a new ConfirmationEvent with newly opened
-// channels.
-func NewConfirmationEvent(numConfs uint32, cancel func()) *ConfirmationEvent {
- return &ConfirmationEvent{
- Confirmed: make(chan *TxConfirmation, 1),
- Updates: make(chan uint32, numConfs),
- NegativeConf: make(chan int32, 1),
- Done: make(chan struct{}, 1),
- Cancel: cancel,
- }
-}
-
-// SpendDetail contains details pertaining to a spent output. This struct itself
-// is the spentness notification. It includes the original outpoint which triggered
-// the notification, the hash of the transaction spending the output, the
-// spending transaction itself, and finally the input index which spent the
-// target output.
-type SpendDetail struct {
- SpentOutPoint *wire.OutPoint
- SpenderTxHash *chainhash.Hash
- SpendingTx *wire.MsgTx
- SpenderInputIndex uint32
- SpendingHeight int32
-}
-
-// String returns a string representation of SpendDetail.
-func (s *SpendDetail) String() string {
- return fmt.Sprintf("%v[%d] spending %v at height=%v", s.SpenderTxHash,
- s.SpenderInputIndex, s.SpentOutPoint, s.SpendingHeight)
-}
-
-// SpendEvent encapsulates a spentness notification. Its only field 'Spend' will
-// be sent upon once the target output passed into RegisterSpendNtfn has been
-// spent on the blockchain.
-//
-// NOTE: If the caller wishes to cancel their registered spend notification,
-// the Cancel closure MUST be called.
-type SpendEvent struct {
- // Spend is a receive only channel which will be sent upon once the
- // target outpoint has been spent.
- //
- // NOTE: This channel must be buffered.
- Spend chan *SpendDetail
-
- // Reorg is a channel that will be sent upon once we detect the spending
- // transaction of the outpoint in question has been reorged out of the
- // chain.
- //
- // NOTE: This channel must be buffered.
- Reorg chan struct{}
-
- // Done is a channel that gets sent upon once the confirmation request
- // is no longer under the risk of being reorged out of the chain.
- //
- // NOTE: This channel must be buffered.
- Done chan struct{}
-
- // Cancel is a closure that should be executed by the caller in the case
- // that they wish to prematurely abandon their registered spend
- // notification.
- Cancel func()
-}
-
-// NewSpendEvent constructs a new SpendEvent with newly opened channels.
-func NewSpendEvent(cancel func()) *SpendEvent {
- return &SpendEvent{
- Spend: make(chan *SpendDetail, 1),
- Reorg: make(chan struct{}, 1),
- Done: make(chan struct{}, 1),
- Cancel: cancel,
- }
-}
-
-// BlockEpoch represents metadata concerning each new block connected to the
-// main chain.
-type BlockEpoch struct {
- // Hash is the block hash of the latest block to be added to the tip of
- // the main chain.
- Hash *chainhash.Hash
-
- // Height is the height of the latest block to be added to the tip of
- // the main chain.
- Height int32
-}
-
-// BlockEpochEvent encapsulates an on-going stream of block epoch
-// notifications. Its only field 'Epochs' will be sent upon for each new block
-// connected to the main-chain.
-//
-// NOTE: If the caller wishes to cancel their registered block epoch
-// notification, the Cancel closure MUST be called.
-type BlockEpochEvent struct {
- // Epochs is a receive only channel that will be sent upon each time a
- // new block is connected to the end of the main chain.
- //
- // NOTE: This channel must be buffered.
- Epochs <-chan *BlockEpoch
-
- // Cancel is a closure that should be executed by the caller in the case
- // that they wish to abandon their registered block epochs notification.
- Cancel func()
-}
-
-// NotifierDriver represents a "driver" for a particular interface. A driver is
-// identified by a globally unique string identifier along with a 'New()'
-// method which is responsible for initializing a particular ChainNotifier
-// concrete implementation.
-type NotifierDriver struct {
- // NotifierType is a string which uniquely identifies the ChainNotifier
- // that this driver, drives.
- NotifierType string
-
- // New creates a new instance of a concrete ChainNotifier
- // implementation given a variadic set up arguments. The function takes
- // a variadic number of interface parameters in order to provide
- // initialization flexibility, thereby accommodating several potential
- // ChainNotifier implementations.
- New func(args ...interface{}) (ChainNotifier, er.R)
-}
-
-var (
- notifiers = make(map[string]*NotifierDriver)
- registerMtx sync.Mutex
-)
-
-// RegisteredNotifiers returns a slice of all currently registered notifiers.
-//
-// NOTE: This function is safe for concurrent access.
-func RegisteredNotifiers() []*NotifierDriver {
- registerMtx.Lock()
- defer registerMtx.Unlock()
-
- drivers := make([]*NotifierDriver, 0, len(notifiers))
- for _, driver := range notifiers {
- drivers = append(drivers, driver)
- }
-
- return drivers
-}
-
-// RegisterNotifier registers a NotifierDriver which is capable of driving a
-// concrete ChainNotifier interface. In the case that this driver has already
-// been registered, an error is returned.
-//
-// NOTE: This function is safe for concurrent access.
-func RegisterNotifier(driver *NotifierDriver) er.R {
- registerMtx.Lock()
- defer registerMtx.Unlock()
-
- if _, ok := notifiers[driver.NotifierType]; ok {
- return er.Errorf("notifier already registered")
- }
-
- notifiers[driver.NotifierType] = driver
-
- return nil
-}
-
-// SupportedNotifiers returns a slice of strings that represent the database
-// drivers that have been registered and are therefore supported.
-//
-// NOTE: This function is safe for concurrent access.
-func SupportedNotifiers() []string {
- registerMtx.Lock()
- defer registerMtx.Unlock()
-
- supportedNotifiers := make([]string, 0, len(notifiers))
- for driverName := range notifiers {
- supportedNotifiers = append(supportedNotifiers, driverName)
- }
-
- return supportedNotifiers
-}
-
-// ChainConn enables notifiers to pass in their chain backend to interface
-// functions that require it.
-type ChainConn interface {
- // GetBlockHeader returns the block header for a hash.
- GetBlockHeader(blockHash *chainhash.Hash) (*wire.BlockHeader, er.R)
-
- // GetBlockHeaderVerbose returns the verbose block header for a hash.
- GetBlockHeaderVerbose(blockHash *chainhash.Hash) (
- *btcjson.GetBlockHeaderVerboseResult, er.R)
-
- // GetBlockHash returns the hash from a block height.
- GetBlockHash(blockHeight int64) (*chainhash.Hash, er.R)
-}
-
-// GetCommonBlockAncestorHeight takes in:
-// (1) the hash of a block that has been reorged out of the main chain
-// (2) the hash of the block of the same height from the main chain
-// It returns the height of the nearest common ancestor between the two hashes,
-// or an error
-func GetCommonBlockAncestorHeight(chainConn ChainConn, reorgHash,
- chainHash chainhash.Hash) (int32, er.R) {
-
- for reorgHash != chainHash {
- reorgHeader, err := chainConn.GetBlockHeader(&reorgHash)
- if err != nil {
- return 0, er.Errorf("unable to get header for hash=%v: %v",
- reorgHash, err)
- }
- chainHeader, err := chainConn.GetBlockHeader(&chainHash)
- if err != nil {
- return 0, er.Errorf("unable to get header for hash=%v: %v",
- chainHash, err)
- }
- reorgHash = reorgHeader.PrevBlock
- chainHash = chainHeader.PrevBlock
- }
-
- verboseHeader, err := chainConn.GetBlockHeaderVerbose(&chainHash)
- if err != nil {
- return 0, er.Errorf("unable to get verbose header for hash=%v: %v",
- chainHash, err)
- }
-
- return verboseHeader.Height, nil
-}
-
-// GetClientMissedBlocks uses a client's best block to determine what blocks
-// it missed being notified about, and returns them in a slice. Its
-// backendStoresReorgs parameter tells it whether or not the notifier's
-// chainConn stores information about blocks that have been reorged out of the
-// chain, which allows GetClientMissedBlocks to find out whether the client's
-// best block has been reorged out of the chain, rewind to the common ancestor
-// and return blocks starting right after the common ancestor.
-func GetClientMissedBlocks(chainConn ChainConn, clientBestBlock *BlockEpoch,
- notifierBestHeight int32, backendStoresReorgs bool) ([]BlockEpoch, er.R) {
-
- startingHeight := clientBestBlock.Height
- if backendStoresReorgs {
- // If a reorg causes the client's best hash to be incorrect,
- // retrieve the closest common ancestor and dispatch
- // notifications from there.
- hashAtBestHeight, err := chainConn.GetBlockHash(
- int64(clientBestBlock.Height))
- if err != nil {
- return nil, er.Errorf("unable to find blockhash for "+
- "height=%d: %v", clientBestBlock.Height, err)
- }
-
- startingHeight, err = GetCommonBlockAncestorHeight(
- chainConn, *clientBestBlock.Hash, *hashAtBestHeight,
- )
- if err != nil {
- return nil, er.Errorf("unable to find common ancestor: "+
- "%v", err)
- }
- }
-
- // We want to start dispatching historical notifications from the block
- // right after the client's best block, to avoid a redundant notification.
- missedBlocks, err := getMissedBlocks(
- chainConn, startingHeight+1, notifierBestHeight+1,
- )
- if err != nil {
- return nil, er.Errorf("unable to get missed blocks: %v", err)
- }
-
- return missedBlocks, nil
-}
-
-// RewindChain handles internal state updates for the notifier's TxNotifier. It
-// has no effect if given a height greater than or equal to our current best
-// known height. It returns the new best block for the notifier.
-func RewindChain(chainConn ChainConn, txNotifier *TxNotifier,
- currBestBlock BlockEpoch, targetHeight int32) (BlockEpoch, er.R) {
-
- newBestBlock := BlockEpoch{
- Height: currBestBlock.Height,
- Hash: currBestBlock.Hash,
- }
-
- for height := currBestBlock.Height; height > targetHeight; height-- {
- hash, err := chainConn.GetBlockHash(int64(height - 1))
- if err != nil {
- return newBestBlock, er.Errorf("unable to "+
- "find blockhash for disconnected height=%d: %v",
- height, err)
- }
-
- log.Infof("Block disconnected from main chain: "+
- "height=%v, sha=%v", height, newBestBlock.Hash)
-
- err = txNotifier.DisconnectTip(uint32(height))
- if err != nil {
- return newBestBlock, er.Errorf("unable to "+
- " disconnect tip for height=%d: %v",
- height, err)
- }
- newBestBlock.Height = height - 1
- newBestBlock.Hash = hash
- }
- return newBestBlock, nil
-}
-
-// HandleMissedBlocks is called when the chain backend for a notifier misses a
-// series of blocks, handling a reorg if necessary. Its backendStoresReorgs
-// parameter tells it whether or not the notifier's chainConn stores
-// information about blocks that have been reorged out of the chain, which allows
-// HandleMissedBlocks to check whether the notifier's best block has been
-// reorged out, and rewind the chain accordingly. It returns the best block for
-// the notifier and a slice of the missed blocks. The new best block needs to be
-// returned in case a chain rewind occurs and partially completes before
-// erroring. In the case where there is no rewind, the notifier's
-// current best block is returned.
-func HandleMissedBlocks(chainConn ChainConn, txNotifier *TxNotifier,
- currBestBlock BlockEpoch, newHeight int32,
- backendStoresReorgs bool) (BlockEpoch, []BlockEpoch, er.R) {
-
- startingHeight := currBestBlock.Height
-
- if backendStoresReorgs {
- // If a reorg causes our best hash to be incorrect, rewind the
- // chain so our best block is set to the closest common
- // ancestor, then dispatch notifications from there.
- hashAtBestHeight, err :=
- chainConn.GetBlockHash(int64(currBestBlock.Height))
- if err != nil {
- return currBestBlock, nil, er.Errorf("unable to find "+
- "blockhash for height=%d: %v",
- currBestBlock.Height, err)
- }
-
- startingHeight, err = GetCommonBlockAncestorHeight(
- chainConn, *currBestBlock.Hash, *hashAtBestHeight,
- )
- if err != nil {
- return currBestBlock, nil, er.Errorf("unable to find "+
- "common ancestor: %v", err)
- }
-
- currBestBlock, err = RewindChain(chainConn, txNotifier,
- currBestBlock, startingHeight)
- if err != nil {
- return currBestBlock, nil, er.Errorf("unable to "+
- "rewind chain: %v", err)
- }
- }
-
- // We want to start dispatching historical notifications from the block
- // right after our best block, to avoid a redundant notification.
- missedBlocks, err := getMissedBlocks(chainConn, startingHeight+1, newHeight)
- if err != nil {
- return currBestBlock, nil, er.Errorf("unable to get missed "+
- "blocks: %v", err)
- }
-
- return currBestBlock, missedBlocks, nil
-}
-
-// getMissedBlocks returns a slice of blocks: [startingHeight, endingHeight)
-// fetched from the chain.
-func getMissedBlocks(chainConn ChainConn, startingHeight,
- endingHeight int32) ([]BlockEpoch, er.R) {
-
- numMissedBlocks := endingHeight - startingHeight
- if numMissedBlocks < 0 {
- return nil, er.Errorf("starting height %d is greater than "+
- "ending height %d", startingHeight, endingHeight)
- }
-
- missedBlocks := make([]BlockEpoch, 0, numMissedBlocks)
- for height := startingHeight; height < endingHeight; height++ {
- hash, err := chainConn.GetBlockHash(int64(height))
- if err != nil {
- return nil, er.Errorf("unable to find blockhash for "+
- "height=%d: %v", height, err)
- }
- missedBlocks = append(missedBlocks,
- BlockEpoch{Hash: hash, Height: height})
- }
-
- return missedBlocks, nil
-}
-
-// TxIndexConn abstracts an RPC backend with txindex enabled.
-type TxIndexConn interface {
- // GetRawTransactionVerbose returns the transaction identified by the
- // passed chain hash, and returns additional information such as the
- // block that the transaction confirmed.
- GetRawTransactionVerbose(*chainhash.Hash) (*btcjson.TxRawResult, er.R)
-
- // GetBlockVerbose returns the block identified by the chain hash along
- // with additional information such as the block's height in the chain.
- GetBlockVerbose(*chainhash.Hash) (*btcjson.GetBlockVerboseResult, er.R)
-}
-
-// ConfDetailsFromTxIndex looks up whether a transaction is already included in
-// a block in the active chain by using the backend node's transaction index.
-// If the transaction is found its TxConfStatus is returned. If it was found in
-// the mempool this will be TxFoundMempool, if it is found in a block this will
-// be TxFoundIndex. Otherwise TxNotFoundIndex is returned. If the tx is found
-// in a block its confirmation details are also returned.
-func ConfDetailsFromTxIndex(chainConn TxIndexConn, r ConfRequest,
- txNotFoundErr string) (*TxConfirmation, TxConfStatus, er.R) {
-
- // If the transaction has some or all of its confirmations required,
- // then we may be able to dispatch it immediately.
- rawTxRes, err := chainConn.GetRawTransactionVerbose(&r.TxID)
- if err != nil {
- // If the transaction lookup was successful, but it wasn't found
- // within the index itself, then we can exit early. We'll also
- // need to look at the error message returned as the error code
- // is used for multiple errors.
- if btcjson.ErrRPCNoTxInfo.Is(err) &&
- strings.Contains(err.Message(), txNotFoundErr) {
- return nil, TxNotFoundIndex, nil
- }
-
- return nil, TxNotFoundIndex,
- er.Errorf("unable to query for txid %v: %v",
- r.TxID, err)
- }
-
- // Deserialize the hex-encoded transaction to include it in the
- // confirmation details.
- rawTx, err := util.DecodeHex(rawTxRes.Hex)
- if err != nil {
- return nil, TxNotFoundIndex,
- er.Errorf("unable to deserialize tx %v: %v",
- r.TxID, err)
- }
- var tx wire.MsgTx
- if err := tx.Deserialize(bytes.NewReader(rawTx)); err != nil {
- return nil, TxNotFoundIndex,
- er.Errorf("unable to deserialize tx %v: %v",
- r.TxID, err)
- }
-
- // Ensure the transaction matches our confirmation request in terms of
- // txid and pkscript.
- if !r.MatchesTx(&tx) {
- return nil, TxNotFoundIndex,
- er.Errorf("unable to locate tx %v", r.TxID)
- }
-
- // Make sure we actually retrieved a transaction that is included in a
- // block. If not, the transaction must be unconfirmed (in the mempool),
- // and we'll return TxFoundMempool together with a nil TxConfirmation.
- if rawTxRes.BlockHash == "" {
- return nil, TxFoundMempool, nil
- }
-
- // As we need to fully populate the returned TxConfirmation struct,
- // grab the block in which the transaction was confirmed so we can
- // locate its exact index within the block.
- blockHash, err := chainhash.NewHashFromStr(rawTxRes.BlockHash)
- if err != nil {
- return nil, TxNotFoundIndex,
- er.Errorf("unable to get block hash %v for "+
- "historical dispatch: %v", rawTxRes.BlockHash, err)
- }
- block, err := chainConn.GetBlockVerbose(blockHash)
- if err != nil {
- return nil, TxNotFoundIndex,
- er.Errorf("unable to get block with hash %v for "+
- "historical dispatch: %v", blockHash, err)
- }
-
- // If the block was obtained, locate the transaction's index within the
- // block so we can give the subscriber full confirmation details.
- txidStr := r.TxID.String()
- for txIndex, txHash := range block.Tx {
- if txHash != txidStr {
- continue
- }
-
- return &TxConfirmation{
- Tx: &tx,
- BlockHash: blockHash,
- BlockHeight: uint32(block.Height),
- TxIndex: uint32(txIndex),
- }, TxFoundIndex, nil
- }
-
- // We return an error because we should have found the transaction
- // within the block, but didn't.
- return nil, TxNotFoundIndex, er.Errorf("unable to locate "+
- "tx %v in block %v", r.TxID, blockHash)
-}
diff --git a/lnd/chainntnfs/interface_dev.go b/lnd/chainntnfs/interface_dev.go
deleted file mode 100644
index c020614f..00000000
--- a/lnd/chainntnfs/interface_dev.go
+++ /dev/null
@@ -1,18 +0,0 @@
-// +build dev
-
-package chainntnfs
-
-import (
- "github.com/pkt-cash/pktd/btcutil/er"
- "github.com/pkt-cash/pktd/chaincfg/chainhash"
-)
-
-// TestChainNotifier enables the use of methods that are only present during
-// testing for ChainNotifiers.
-type TestChainNotifier interface {
- ChainNotifier
-
- // UnsafeStart enables notifiers to start up with a specific best block.
- // Used for testing.
- UnsafeStart(int32, *chainhash.Hash, int32, func() er.R) er.R
-}
diff --git a/lnd/chainntnfs/interface_test.go b/lnd/chainntnfs/interface_test.go
deleted file mode 100644
index cae7136e..00000000
--- a/lnd/chainntnfs/interface_test.go
+++ /dev/null
@@ -1,2025 +0,0 @@
-// +build dev
-
-package chainntnfs_test
-
-import (
- "bytes"
- "fmt"
- "io/ioutil"
- "log"
- "os"
- "sync"
- "testing"
- "time"
-
- "github.com/pkt-cash/pktd/btcutil"
- "github.com/pkt-cash/pktd/btcutil/er"
- "github.com/pkt-cash/pktd/chaincfg/chainhash"
- "github.com/pkt-cash/pktd/chaincfg/globalcfg"
- "github.com/pkt-cash/pktd/integration/rpctest"
- "github.com/pkt-cash/pktd/lnd/chainntnfs"
- "github.com/pkt-cash/pktd/lnd/chainntnfs/btcdnotify"
- "github.com/pkt-cash/pktd/lnd/chainntnfs/neutrinonotify"
- "github.com/pkt-cash/pktd/lnd/channeldb"
- "github.com/pkt-cash/pktd/neutrino"
- _ "github.com/pkt-cash/pktd/pktwallet/walletdb/bdb" // Required to auto-register the boltdb walletdb implementation.
- "github.com/pkt-cash/pktd/rpcclient"
- "github.com/pkt-cash/pktd/wire"
-)
-
-func testSingleConfirmationNotification(miner *rpctest.Harness,
- notifier chainntnfs.TestChainNotifier, scriptDispatch bool, t *testing.T) {
-
- // We'd like to test the case of being notified once a txid reaches
- // a *single* confirmation.
- //
- // So first, let's send some coins to "ourself", obtaining a txid.
- // We're spending from a coinbase output here, so we use the dedicated
- // function.
- txid, pkScript, err := chainntnfs.GetTestTxidAndScript(miner)
- if err != nil {
- t.Fatalf("unable to create test tx: %v", err)
- }
- if err := chainntnfs.WaitForMempoolTx(miner, txid); err != nil {
- t.Fatalf("tx not relayed to miner: %v", err)
- }
-
- _, currentHeight, err := miner.Node.GetBestBlock()
- if err != nil {
- t.Fatalf("unable to get current height: %v", err)
- }
-
- // Now that we have a txid, register a confirmation notification with
- // the chainntfn source.
- numConfs := uint32(1)
- var confIntent *chainntnfs.ConfirmationEvent
- if scriptDispatch {
- confIntent, err = notifier.RegisterConfirmationsNtfn(
- nil, pkScript, numConfs, uint32(currentHeight),
- )
- } else {
- confIntent, err = notifier.RegisterConfirmationsNtfn(
- txid, pkScript, numConfs, uint32(currentHeight),
- )
- }
- if err != nil {
- t.Fatalf("unable to register ntfn: %v", err)
- }
-
- // Now generate a single block, the transaction should be included which
- // should trigger a notification event.
- blockHash, err := miner.Node.Generate(1)
- if err != nil {
- t.Fatalf("unable to generate single block: %v", err)
- }
-
- select {
- case confInfo := <-confIntent.Confirmed:
- if !confInfo.BlockHash.IsEqual(blockHash[0]) {
- t.Fatalf("mismatched block hashes: expected %v, got %v",
- blockHash[0], confInfo.BlockHash)
- }
-
- // Finally, we'll verify that the tx index returned is the exact same
- // as the tx index of the transaction within the block itself.
- msgBlock, err := miner.Node.GetBlock(blockHash[0])
- if err != nil {
- t.Fatalf("unable to fetch block: %v", err)
- }
-
- block := btcutil.NewBlock(msgBlock)
- specifiedTxHash, err := block.TxHash(int(confInfo.TxIndex))
- if err != nil {
- t.Fatalf("unable to index into block: %v", err)
- }
-
- if !specifiedTxHash.IsEqual(txid) {
- t.Fatalf("mismatched tx indexes: expected %v, got %v",
- txid, specifiedTxHash)
- }
- case <-time.After(20 * time.Second):
- t.Fatalf("confirmation notification never received")
- }
-}
-
-func testMultiConfirmationNotification(miner *rpctest.Harness,
- notifier chainntnfs.TestChainNotifier, scriptDispatch bool, t *testing.T) {
-
- // We'd like to test the case of being notified once a txid reaches
- // N confirmations, where N > 1.
- //
- // Again, we'll begin by creating a fresh transaction, so we can obtain
- // a fresh txid.
- txid, pkScript, err := chainntnfs.GetTestTxidAndScript(miner)
- if err != nil {
- t.Fatalf("unable to create test addr: %v", err)
- }
- if err := chainntnfs.WaitForMempoolTx(miner, txid); err != nil {
- t.Fatalf("tx not relayed to miner: %v", err)
- }
-
- _, currentHeight, err := miner.Node.GetBestBlock()
- if err != nil {
- t.Fatalf("unable to get current height: %v", err)
- }
-
- numConfs := uint32(6)
- var confIntent *chainntnfs.ConfirmationEvent
- if scriptDispatch {
- confIntent, err = notifier.RegisterConfirmationsNtfn(
- nil, pkScript, numConfs, uint32(currentHeight),
- )
- } else {
- confIntent, err = notifier.RegisterConfirmationsNtfn(
- txid, pkScript, numConfs, uint32(currentHeight),
- )
- }
- if err != nil {
- t.Fatalf("unable to register ntfn: %v", err)
- }
-
- // Now generate a six blocks. The transaction should be included in the
- // first block, which will be built upon by the other 5 blocks.
- if _, err := miner.Node.Generate(6); err != nil {
- t.Fatalf("unable to generate single block: %v", err)
- }
-
- // TODO(roasbeef): reduce all timeouts after neutrino sync tightended
- // up
-
- select {
- case <-confIntent.Confirmed:
- break
- case <-time.After(20 * time.Second):
- t.Fatalf("confirmation notification never received")
- }
-}
-
-func testBatchConfirmationNotification(miner *rpctest.Harness,
- notifier chainntnfs.TestChainNotifier, scriptDispatch bool, t *testing.T) {
-
- // We'd like to test a case of serving notifications to multiple
- // clients, each requesting to be notified once a txid receives
- // various numbers of confirmations.
- confSpread := [6]uint32{1, 2, 3, 6, 20, 22}
- confIntents := make([]*chainntnfs.ConfirmationEvent, len(confSpread))
-
- _, currentHeight, err := miner.Node.GetBestBlock()
- if err != nil {
- t.Fatalf("unable to get current height: %v", err)
- }
-
- // Create a new txid spending miner coins for each confirmation entry
- // in confSpread, we collect each conf intent into a slice so we can
- // verify they're each notified at the proper number of confirmations
- // below.
- for i, numConfs := range confSpread {
- txid, pkScript, err := chainntnfs.GetTestTxidAndScript(miner)
- if err != nil {
- t.Fatalf("unable to create test addr: %v", err)
- }
- var confIntent *chainntnfs.ConfirmationEvent
- if scriptDispatch {
- confIntent, err = notifier.RegisterConfirmationsNtfn(
- nil, pkScript, numConfs, uint32(currentHeight),
- )
- } else {
- confIntent, err = notifier.RegisterConfirmationsNtfn(
- txid, pkScript, numConfs, uint32(currentHeight),
- )
- }
- if err != nil {
- t.Fatalf("unable to register ntfn: %v", err)
- }
- confIntents[i] = confIntent
- if err := chainntnfs.WaitForMempoolTx(miner, txid); err != nil {
- t.Fatalf("tx not relayed to miner: %v", err)
- }
-
- }
-
- initialConfHeight := uint32(currentHeight + 1)
-
- // Now, for each confirmation intent, generate the delta number of blocks
- // needed to trigger the confirmation notification. A goroutine is
- // spawned in order to verify the proper notification is triggered.
- for i, numConfs := range confSpread {
- var blocksToGen uint32
-
- // If this is the last instance, manually index to generate the
- // proper block delta in order to avoid a panic.
- if i == len(confSpread)-1 {
- blocksToGen = confSpread[len(confSpread)-1] - confSpread[len(confSpread)-2]
- } else {
- blocksToGen = confSpread[i+1] - confSpread[i]
- }
-
- // Generate the number of blocks necessary to trigger this
- // current confirmation notification.
- if _, err := miner.Node.Generate(blocksToGen); err != nil {
- t.Fatalf("unable to generate single block: %v", err)
- }
-
- select {
- case conf := <-confIntents[i].Confirmed:
- // All of the notifications above were originally
- // confirmed in the same block. The returned
- // notification should list the initial confirmation
- // height rather than the height they were _fully_
- // confirmed.
- if conf.BlockHeight != initialConfHeight {
- t.Fatalf("notification has incorrect initial "+
- "conf height: expected %v, got %v",
- initialConfHeight, conf.BlockHeight)
- }
- continue
- case <-time.After(20 * time.Second):
- t.Fatalf("confirmation notification never received: %v", numConfs)
- }
- }
-}
-
-func checkNotificationFields(ntfn *chainntnfs.SpendDetail,
- outpoint *wire.OutPoint, spenderSha *chainhash.Hash,
- height int32, t *testing.T) {
-
- t.Helper()
-
- if *ntfn.SpentOutPoint != *outpoint {
- t.Fatalf("ntfn includes wrong output, reports "+
- "%v instead of %v",
- ntfn.SpentOutPoint, outpoint)
- }
- if !bytes.Equal(ntfn.SpenderTxHash[:], spenderSha[:]) {
- t.Fatalf("ntfn includes wrong spender tx sha, "+
- "reports %v instead of %v",
- ntfn.SpenderTxHash[:], spenderSha[:])
- }
- if ntfn.SpenderInputIndex != 0 {
- t.Fatalf("ntfn includes wrong spending input "+
- "index, reports %v, should be %v",
- ntfn.SpenderInputIndex, 0)
- }
- if ntfn.SpendingHeight != height {
- t.Fatalf("ntfn has wrong spending height: "+
- "expected %v, got %v", height,
- ntfn.SpendingHeight)
- }
-}
-
-func testSpendNotification(miner *rpctest.Harness,
- notifier chainntnfs.TestChainNotifier, scriptDispatch bool, t *testing.T) {
-
- // We'd like to test the spend notifications for all ChainNotifier
- // concrete implementations.
- //
- // To do so, we first create a new output to our test target address.
- outpoint, output, privKey := chainntnfs.CreateSpendableOutput(t, miner)
-
- _, currentHeight, err := miner.Node.GetBestBlock()
- if err != nil {
- t.Fatalf("unable to get current height: %v", err)
- }
-
- // Now that we have an output index and the pkScript, register for a
- // spentness notification for the newly created output with multiple
- // clients in order to ensure the implementation can support
- // multi-client spend notifications.
- const numClients = 5
- spendClients := make([]*chainntnfs.SpendEvent, numClients)
- for i := 0; i < numClients; i++ {
- var spentIntent *chainntnfs.SpendEvent
- if scriptDispatch {
- spentIntent, err = notifier.RegisterSpendNtfn(
- nil, output.PkScript, uint32(currentHeight),
- )
- } else {
- spentIntent, err = notifier.RegisterSpendNtfn(
- outpoint, output.PkScript, uint32(currentHeight),
- )
- }
- if err != nil {
- t.Fatalf("unable to register for spend ntfn: %v", err)
- }
-
- spendClients[i] = spentIntent
- }
-
- // Next, create a new transaction spending that output.
- spendingTx := chainntnfs.CreateSpendTx(t, outpoint, output, privKey)
-
- // Broadcast our spending transaction.
- spenderSha, err := miner.Node.SendRawTransaction(spendingTx, true)
- if err != nil {
- t.Fatalf("unable to broadcast tx: %v", err)
- }
-
- if err := chainntnfs.WaitForMempoolTx(miner, spenderSha); err != nil {
- t.Fatalf("tx not relayed to miner: %v", err)
- }
-
- // Make sure notifications are not yet sent. We launch a go routine for
- // all the spend clients, such that we can wait for them all in
- // parallel.
- mempoolSpendTimeout := 2 * chainntnfs.TrickleInterval
- mempoolSpends := make(chan *chainntnfs.SpendDetail, numClients)
- for _, c := range spendClients {
- go func(client *chainntnfs.SpendEvent) {
- select {
- case s := <-client.Spend:
- mempoolSpends <- s
- case <-time.After(mempoolSpendTimeout):
- }
- }(c)
- }
-
- select {
- case <-mempoolSpends:
- t.Fatalf("did not expect to get notification before " +
- "block was mined")
- case <-time.After(mempoolSpendTimeout):
- }
-
- // Make sure registering a client after the tx is in the mempool still
- // doesn't trigger a notification.
- var spentIntent *chainntnfs.SpendEvent
- if scriptDispatch {
- spentIntent, err = notifier.RegisterSpendNtfn(
- nil, output.PkScript, uint32(currentHeight),
- )
- } else {
- spentIntent, err = notifier.RegisterSpendNtfn(
- outpoint, output.PkScript, uint32(currentHeight),
- )
- }
- if err != nil {
- t.Fatalf("unable to register for spend ntfn: %v", err)
- }
-
- select {
- case <-spentIntent.Spend:
- t.Fatalf("did not expect to get notification before " +
- "block was mined")
- case <-time.After(mempoolSpendTimeout):
- }
- spendClients = append(spendClients, spentIntent)
-
- // Now we mine a single block, which should include our spend. The
- // notification should also be sent off.
- if _, err := miner.Node.Generate(1); err != nil {
- t.Fatalf("unable to generate single block: %v", err)
- }
-
- _, currentHeight, err = miner.Node.GetBestBlock()
- if err != nil {
- t.Fatalf("unable to get current height: %v", err)
- }
-
- for _, c := range spendClients {
- select {
- case ntfn := <-c.Spend:
- // We've received the spend nftn. So now verify all the
- // fields have been set properly.
- checkNotificationFields(ntfn, outpoint, spenderSha,
- currentHeight, t)
- case <-time.After(30 * time.Second):
- t.Fatalf("spend ntfn never received")
- }
- }
-}
-
-func testBlockEpochNotification(miner *rpctest.Harness,
- notifier chainntnfs.TestChainNotifier, t *testing.T) {
-
- // We'd like to test the case of multiple registered clients receiving
- // block epoch notifications.
-
- const numBlocks = 10
- const numNtfns = numBlocks + 1
- const numClients = 5
- var wg sync.WaitGroup
-
- // Create numClients clients which will listen for block notifications. We
- // expect each client to receive 11 notifications, one for the current
- // tip of the chain, and one for each of the ten blocks we generate
- // below. So we'll use a WaitGroup to synchronize the test.
- for i := 0; i < numClients; i++ {
- epochClient, err := notifier.RegisterBlockEpochNtfn(nil)
- if err != nil {
- t.Fatalf("unable to register for epoch notification")
- }
-
- wg.Add(numNtfns)
- go func() {
- for i := 0; i < numNtfns; i++ {
- <-epochClient.Epochs
- wg.Done()
- }
- }()
- }
-
- epochsSent := make(chan struct{})
- go func() {
- wg.Wait()
- close(epochsSent)
- }()
-
- // Now generate 10 blocks, the clients above should each receive 10
- // notifications, thereby unblocking the goroutine above.
- if _, err := miner.Node.Generate(numBlocks); err != nil {
- t.Fatalf("unable to generate blocks: %v", err)
- }
-
- select {
- case <-epochsSent:
- case <-time.After(30 * time.Second):
- t.Fatalf("all notifications not sent")
- }
-}
-
-func testMultiClientConfirmationNotification(miner *rpctest.Harness,
- notifier chainntnfs.TestChainNotifier, scriptDispatch bool, t *testing.T) {
-
- // We'd like to test the case of a multiple clients registered to
- // receive a confirmation notification for the same transaction.
- txid, pkScript, err := chainntnfs.GetTestTxidAndScript(miner)
- if err != nil {
- t.Fatalf("unable to create test tx: %v", err)
- }
- if err := chainntnfs.WaitForMempoolTx(miner, txid); err != nil {
- t.Fatalf("tx not relayed to miner: %v", err)
- }
-
- var wg sync.WaitGroup
- const (
- numConfsClients = 5
- numConfs = 1
- )
-
- _, currentHeight, err := miner.Node.GetBestBlock()
- if err != nil {
- t.Fatalf("unable to get current height: %v", err)
- }
-
- // Register for a conf notification for the above generated txid with
- // numConfsClients distinct clients.
- for i := 0; i < numConfsClients; i++ {
- var confClient *chainntnfs.ConfirmationEvent
- if scriptDispatch {
- confClient, err = notifier.RegisterConfirmationsNtfn(
- nil, pkScript, numConfs, uint32(currentHeight),
- )
- } else {
- confClient, err = notifier.RegisterConfirmationsNtfn(
- txid, pkScript, numConfs, uint32(currentHeight),
- )
- }
- if err != nil {
- t.Fatalf("unable to register for confirmation: %v", err)
- }
-
- wg.Add(1)
- go func() {
- <-confClient.Confirmed
- wg.Done()
- }()
- }
-
- confsSent := make(chan struct{})
- go func() {
- wg.Wait()
- close(confsSent)
- }()
-
- // Finally, generate a single block which should trigger the unblocking
- // of all numConfsClients blocked on the channel read above.
- if _, err := miner.Node.Generate(1); err != nil {
- t.Fatalf("unable to generate block: %v", err)
- }
-
- select {
- case <-confsSent:
- case <-time.After(30 * time.Second):
- t.Fatalf("all confirmation notifications not sent")
- }
-}
-
-// Tests the case in which a confirmation notification is requested for a
-// transaction that has already been included in a block. In this case, the
-// confirmation notification should be dispatched immediately.
-func testTxConfirmedBeforeNtfnRegistration(miner *rpctest.Harness,
- notifier chainntnfs.TestChainNotifier, scriptDispatch bool, t *testing.T) {
-
- // First, let's send some coins to "ourself", obtaining a txid. We're
- // spending from a coinbase output here, so we use the dedicated
- // function.
- txid3, pkScript3, err := chainntnfs.GetTestTxidAndScript(miner)
- if err != nil {
- t.Fatalf("unable to create test tx: %v", err)
- }
- if err := chainntnfs.WaitForMempoolTx(miner, txid3); err != nil {
- t.Fatalf("tx not relayed to miner: %v", err)
- }
-
- // Generate another block containing tx 3, but we won't register conf
- // notifications for this tx until much later. The notifier must check
- // older blocks when the confirmation event is registered below to ensure
- // that the TXID hasn't already been included in the chain, otherwise the
- // notification will never be sent.
- _, err = miner.Node.Generate(1)
- if err != nil {
- t.Fatalf("unable to generate block: %v", err)
- }
-
- txid1, pkScript1, err := chainntnfs.GetTestTxidAndScript(miner)
- if err != nil {
- t.Fatalf("unable to create test tx: %v", err)
- }
- if err := chainntnfs.WaitForMempoolTx(miner, txid1); err != nil {
- t.Fatalf("tx not relayed to miner: %v", err)
- }
-
- txid2, pkScript2, err := chainntnfs.GetTestTxidAndScript(miner)
- if err != nil {
- t.Fatalf("unable to create test tx: %v", err)
- }
- if err := chainntnfs.WaitForMempoolTx(miner, txid2); err != nil {
- t.Fatalf("tx not relayed to miner: %v", err)
- }
-
- _, currentHeight, err := miner.Node.GetBestBlock()
- if err != nil {
- t.Fatalf("unable to get current height: %v", err)
- }
-
- // Now generate another block containing txs 1 & 2.
- blockHash, err := miner.Node.Generate(1)
- if err != nil {
- t.Fatalf("unable to generate block: %v", err)
- }
-
- // Register a confirmation notification with the chainntfn source for tx2,
- // which is included in the last block. The height hint is the height before
- // the block is included. This notification should fire immediately since
- // only 1 confirmation is required.
- var ntfn1 *chainntnfs.ConfirmationEvent
- if scriptDispatch {
- ntfn1, err = notifier.RegisterConfirmationsNtfn(
- nil, pkScript1, 1, uint32(currentHeight),
- )
- } else {
- ntfn1, err = notifier.RegisterConfirmationsNtfn(
- txid1, pkScript1, 1, uint32(currentHeight),
- )
- }
- if err != nil {
- t.Fatalf("unable to register ntfn: %v", err)
- }
-
- select {
- case confInfo := <-ntfn1.Confirmed:
- // Finally, we'll verify that the tx index returned is the exact same
- // as the tx index of the transaction within the block itself.
- msgBlock, err := miner.Node.GetBlock(blockHash[0])
- if err != nil {
- t.Fatalf("unable to fetch block: %v", err)
- }
- block := btcutil.NewBlock(msgBlock)
- specifiedTxHash, err := block.TxHash(int(confInfo.TxIndex))
- if err != nil {
- t.Fatalf("unable to index into block: %v", err)
- }
- if !specifiedTxHash.IsEqual(txid1) {
- t.Fatalf("mismatched tx indexes: expected %v, got %v",
- txid1, specifiedTxHash)
- }
-
- // We'll also ensure that the block height has been set
- // properly.
- if confInfo.BlockHeight != uint32(currentHeight+1) {
- t.Fatalf("incorrect block height: expected %v, got %v",
- confInfo.BlockHeight, currentHeight)
- }
- break
- case <-time.After(20 * time.Second):
- t.Fatalf("confirmation notification never received")
- }
-
- // Register a confirmation notification for tx2, requiring 3 confirmations.
- // This transaction is only partially confirmed, so the notification should
- // not fire yet.
- var ntfn2 *chainntnfs.ConfirmationEvent
- if scriptDispatch {
- ntfn2, err = notifier.RegisterConfirmationsNtfn(
- nil, pkScript2, 3, uint32(currentHeight),
- )
- } else {
- ntfn2, err = notifier.RegisterConfirmationsNtfn(
- txid2, pkScript2, 3, uint32(currentHeight),
- )
- }
- if err != nil {
- t.Fatalf("unable to register ntfn: %v", err)
- }
-
- // Fully confirm tx3.
- _, err = miner.Node.Generate(2)
- if err != nil {
- t.Fatalf("unable to generate block: %v", err)
- }
-
- select {
- case <-ntfn2.Confirmed:
- case <-time.After(10 * time.Second):
- t.Fatalf("confirmation notification never received")
- }
-
- select {
- case <-ntfn1.Confirmed:
- t.Fatalf("received multiple confirmations for tx")
- case <-time.After(1 * time.Second):
- }
-
- // Finally register a confirmation notification for tx3, requiring 1
- // confirmation. Ensure that conf notifications do not refire on txs
- // 1 or 2.
- var ntfn3 *chainntnfs.ConfirmationEvent
- if scriptDispatch {
- ntfn3, err = notifier.RegisterConfirmationsNtfn(
- nil, pkScript3, 1, uint32(currentHeight-1),
- )
- } else {
- ntfn3, err = notifier.RegisterConfirmationsNtfn(
- txid3, pkScript3, 1, uint32(currentHeight-1),
- )
- }
- if err != nil {
- t.Fatalf("unable to register ntfn: %v", err)
- }
-
- // We'll also register for a confirmation notification with the pkscript
- // of a different transaction. This notification shouldn't fire since we
- // match on both txid and pkscript.
- var ntfn4 *chainntnfs.ConfirmationEvent
- ntfn4, err = notifier.RegisterConfirmationsNtfn(
- txid3, pkScript2, 1, uint32(currentHeight-1),
- )
- if err != nil {
- t.Fatalf("unable to register ntfn: %v", err)
- }
-
- select {
- case <-ntfn3.Confirmed:
- case <-time.After(10 * time.Second):
- t.Fatalf("confirmation notification never received")
- }
-
- select {
- case <-ntfn4.Confirmed:
- t.Fatalf("confirmation notification received")
- case <-time.After(5 * time.Second):
- }
-
- time.Sleep(1 * time.Second)
-
- select {
- case <-ntfn1.Confirmed:
- t.Fatalf("received multiple confirmations for tx")
- default:
- }
-
- select {
- case <-ntfn2.Confirmed:
- t.Fatalf("received multiple confirmations for tx")
- default:
- }
-}
-
-// Test the case of a notification consumer having forget or being delayed in
-// checking for a confirmation. This should not cause the notifier to stop
-// working
-func testLazyNtfnConsumer(miner *rpctest.Harness,
- notifier chainntnfs.TestChainNotifier, scriptDispatch bool, t *testing.T) {
-
- // Create a transaction to be notified about. We'll register for
- // notifications on this transaction but won't be prompt in checking them
- txid, pkScript, err := chainntnfs.GetTestTxidAndScript(miner)
- if err != nil {
- t.Fatalf("unable to create test tx: %v", err)
- }
- if err := chainntnfs.WaitForMempoolTx(miner, txid); err != nil {
- t.Fatalf("tx not relayed to miner: %v", err)
- }
-
- _, currentHeight, err := miner.Node.GetBestBlock()
- if err != nil {
- t.Fatalf("unable to get current height: %v", err)
- }
-
- numConfs := uint32(3)
-
- // Add a block right before registering, this makes race conditions
- // between the historical dispatcher and the normal dispatcher more obvious
- if _, err := miner.Node.Generate(1); err != nil {
- t.Fatalf("unable to generate blocks: %v", err)
- }
-
- var firstConfIntent *chainntnfs.ConfirmationEvent
- if scriptDispatch {
- firstConfIntent, err = notifier.RegisterConfirmationsNtfn(
- nil, pkScript, numConfs, uint32(currentHeight),
- )
- } else {
- firstConfIntent, err = notifier.RegisterConfirmationsNtfn(
- txid, pkScript, numConfs, uint32(currentHeight),
- )
- }
- if err != nil {
- t.Fatalf("unable to register ntfn: %v", err)
- }
-
- // Generate another 2 blocks, this should dispatch the confirm notification
- if _, err := miner.Node.Generate(2); err != nil {
- t.Fatalf("unable to generate blocks: %v", err)
- }
-
- // Now make another transaction, just because we haven't checked to see
- // if the first transaction has confirmed doesn't mean that we shouldn't
- // be able to see if this transaction confirms first
- txid, pkScript, err = chainntnfs.GetTestTxidAndScript(miner)
- if err != nil {
- t.Fatalf("unable to create test tx: %v", err)
- }
- if err := chainntnfs.WaitForMempoolTx(miner, txid); err != nil {
- t.Fatalf("tx not relayed to miner: %v", err)
- }
-
- _, currentHeight, err = miner.Node.GetBestBlock()
- if err != nil {
- t.Fatalf("unable to get current height: %v", err)
- }
-
- numConfs = 1
- var secondConfIntent *chainntnfs.ConfirmationEvent
- if scriptDispatch {
- secondConfIntent, err = notifier.RegisterConfirmationsNtfn(
- nil, pkScript, numConfs, uint32(currentHeight),
- )
- } else {
- secondConfIntent, err = notifier.RegisterConfirmationsNtfn(
- txid, pkScript, numConfs, uint32(currentHeight),
- )
- }
- if err != nil {
- t.Fatalf("unable to register ntfn: %v", err)
- }
-
- if _, err := miner.Node.Generate(1); err != nil {
- t.Fatalf("unable to generate blocks: %v", err)
- }
-
- select {
- case <-secondConfIntent.Confirmed:
- // Successfully receive the second notification
- break
- case <-time.After(30 * time.Second):
- t.Fatalf("Second confirmation notification never received")
- }
-
- // Make sure the first tx confirmed successfully
- select {
- case <-firstConfIntent.Confirmed:
- break
- case <-time.After(30 * time.Second):
- t.Fatalf("First confirmation notification never received")
- }
-}
-
-// Tests the case in which a spend notification is requested for a spend that
-// has already been included in a block. In this case, the spend notification
-// should be dispatched immediately.
-func testSpendBeforeNtfnRegistration(miner *rpctest.Harness,
- notifier chainntnfs.TestChainNotifier, scriptDispatch bool, t *testing.T) {
-
- // We'd like to test the spend notifications for all ChainNotifier
- // concrete implementations.
- //
- // To do so, we first create a new output to our test target address.
- outpoint, output, privKey := chainntnfs.CreateSpendableOutput(t, miner)
-
- _, heightHint, err := miner.Node.GetBestBlock()
- if err != nil {
- t.Fatalf("unable to get current height: %v", err)
- }
-
- // We'll then spend this output and broadcast the spend transaction.
- spendingTx := chainntnfs.CreateSpendTx(t, outpoint, output, privKey)
- spenderSha, err := miner.Node.SendRawTransaction(spendingTx, true)
- if err != nil {
- t.Fatalf("unable to broadcast tx: %v", err)
- }
- if err := chainntnfs.WaitForMempoolTx(miner, spenderSha); err != nil {
- t.Fatalf("tx not relayed to miner: %v", err)
- }
-
- // We create an epoch client we can use to make sure the notifier is
- // caught up to the mining node's chain.
- epochClient, err := notifier.RegisterBlockEpochNtfn(nil)
- if err != nil {
- t.Fatalf("unable to register for block epoch: %v", err)
- }
-
- // Now we mine an additional block, which should include our spend.
- if _, err := miner.Node.Generate(1); err != nil {
- t.Fatalf("unable to generate single block: %v", err)
- }
- _, spendHeight, err := miner.Node.GetBestBlock()
- if err != nil {
- t.Fatalf("unable to get current height: %v", err)
- }
-
- // checkSpends registers two clients to be notified of a spend that has
- // already happened. The notifier should dispatch a spend notification
- // immediately.
- checkSpends := func() {
- t.Helper()
-
- const numClients = 2
- spendClients := make([]*chainntnfs.SpendEvent, numClients)
- for i := 0; i < numClients; i++ {
- var spentIntent *chainntnfs.SpendEvent
- if scriptDispatch {
- spentIntent, err = notifier.RegisterSpendNtfn(
- nil, output.PkScript, uint32(heightHint),
- )
- } else {
- spentIntent, err = notifier.RegisterSpendNtfn(
- outpoint, output.PkScript,
- uint32(heightHint),
- )
- }
- if err != nil {
- t.Fatalf("unable to register for spend ntfn: %v",
- err)
- }
-
- spendClients[i] = spentIntent
- }
-
- for _, client := range spendClients {
- select {
- case ntfn := <-client.Spend:
- // We've received the spend nftn. So now verify
- // all the fields have been set properly.
- checkNotificationFields(
- ntfn, outpoint, spenderSha, spendHeight, t,
- )
- case <-time.After(30 * time.Second):
- t.Fatalf("spend ntfn never received")
- }
- }
- }
-
- // Wait for the notifier to have caught up to the mined block.
- select {
- case _, ok := <-epochClient.Epochs:
- if !ok {
- t.Fatalf("epoch channel was closed")
- }
- case <-time.After(15 * time.Second):
- t.Fatalf("did not receive block epoch")
- }
-
- // Check that the spend clients gets immediately notified for the spend
- // in the previous block.
- checkSpends()
-
- // Bury the spend even deeper, and do the same check.
- const numBlocks = 10
- if _, err := miner.Node.Generate(numBlocks); err != nil {
- t.Fatalf("unable to generate single block: %v", err)
- }
-
- // Wait for the notifier to have caught up with the new blocks.
- for i := 0; i < numBlocks; i++ {
- select {
- case _, ok := <-epochClient.Epochs:
- if !ok {
- t.Fatalf("epoch channel was closed")
- }
- case <-time.After(15 * time.Second):
- t.Fatalf("did not receive block epoch")
- }
- }
-
- // The clients should still be notified immediately.
- checkSpends()
-}
-
-func testCancelSpendNtfn(node *rpctest.Harness,
- notifier chainntnfs.TestChainNotifier, scriptDispatch bool, t *testing.T) {
-
- // We'd like to test that once a spend notification is registered, it
- // can be canceled before the notification is dispatched.
-
- // First, we'll start by creating a new output that we can spend
- // ourselves.
- outpoint, output, privKey := chainntnfs.CreateSpendableOutput(t, node)
-
- _, currentHeight, err := node.Node.GetBestBlock()
- if err != nil {
- t.Fatalf("unable to get current height: %v", err)
- }
-
- // Create two clients that each registered to the spend notification.
- // We'll cancel the notification for the first client and leave the
- // notification for the second client enabled.
- const numClients = 2
- spendClients := make([]*chainntnfs.SpendEvent, numClients)
- for i := 0; i < numClients; i++ {
- var spentIntent *chainntnfs.SpendEvent
- if scriptDispatch {
- spentIntent, err = notifier.RegisterSpendNtfn(
- nil, output.PkScript, uint32(currentHeight),
- )
- } else {
- spentIntent, err = notifier.RegisterSpendNtfn(
- outpoint, output.PkScript, uint32(currentHeight),
- )
- }
- if err != nil {
- t.Fatalf("unable to register for spend ntfn: %v", err)
- }
-
- spendClients[i] = spentIntent
- }
-
- // Next, create a new transaction spending that output.
- spendingTx := chainntnfs.CreateSpendTx(t, outpoint, output, privKey)
-
- // Before we broadcast the spending transaction, we'll cancel the
- // notification of the first client.
- spendClients[1].Cancel()
-
- // Broadcast our spending transaction.
- spenderSha, err := node.Node.SendRawTransaction(spendingTx, true)
- if err != nil {
- t.Fatalf("unable to broadcast tx: %v", err)
- }
-
- if err := chainntnfs.WaitForMempoolTx(node, spenderSha); err != nil {
- t.Fatalf("tx not relayed to miner: %v", err)
- }
-
- // Now we mine a single block, which should include our spend. The
- // notification should also be sent off.
- if _, err := node.Node.Generate(1); err != nil {
- t.Fatalf("unable to generate single block: %v", err)
- }
-
- // The spend notification for the first client should have been
- // dispatched.
- select {
- case ntfn := <-spendClients[0].Spend:
- // We've received the spend nftn. So now verify all the
- // fields have been set properly.
- if *ntfn.SpentOutPoint != *outpoint {
- t.Fatalf("ntfn includes wrong output, reports "+
- "%v instead of %v",
- ntfn.SpentOutPoint, outpoint)
- }
- if !bytes.Equal(ntfn.SpenderTxHash[:], spenderSha[:]) {
- t.Fatalf("ntfn includes wrong spender tx sha, "+
- "reports %v instead of %v",
- ntfn.SpenderTxHash[:], spenderSha[:])
- }
- if ntfn.SpenderInputIndex != 0 {
- t.Fatalf("ntfn includes wrong spending input "+
- "index, reports %v, should be %v",
- ntfn.SpenderInputIndex, 0)
- }
- case <-time.After(20 * time.Second):
- t.Fatalf("spend ntfn never received")
- }
-
- // However, the spend notification of the second client should NOT have
- // been dispatched.
- select {
- case _, ok := <-spendClients[1].Spend:
- if ok {
- t.Fatalf("spend ntfn should have been canceled")
- }
- case <-time.After(20 * time.Second):
- t.Fatalf("spend ntfn never canceled")
- }
-}
-
-func testCancelEpochNtfn(node *rpctest.Harness,
- notifier chainntnfs.TestChainNotifier, t *testing.T) {
-
- // We'd like to ensure that once a client cancels their block epoch
- // notifications, no further notifications are sent over the channel
- // if/when new blocks come in.
- const numClients = 2
-
- epochClients := make([]*chainntnfs.BlockEpochEvent, numClients)
- for i := 0; i < numClients; i++ {
- epochClient, err := notifier.RegisterBlockEpochNtfn(nil)
- if err != nil {
- t.Fatalf("unable to register for epoch notification")
- }
- epochClients[i] = epochClient
- }
-
- // Now before we mine any blocks, cancel the notification for the first
- // epoch client.
- epochClients[0].Cancel()
-
- // Now mine a single block, this should trigger the logic to dispatch
- // epoch notifications.
- if _, err := node.Node.Generate(1); err != nil {
- t.Fatalf("unable to generate blocks: %v", err)
- }
-
- // The epoch notification for the first client shouldn't have been
- // dispatched.
- select {
- case _, ok := <-epochClients[0].Epochs:
- if ok {
- t.Fatalf("epoch notification should have been canceled")
- }
- case <-time.After(2 * time.Second):
- t.Fatalf("epoch notification not sent")
- }
-
- // However, the epoch notification for the second client should have
- // been dispatched as normal.
- select {
- case _, ok := <-epochClients[1].Epochs:
- if !ok {
- t.Fatalf("epoch was canceled")
- }
- case <-time.After(20 * time.Second):
- t.Fatalf("epoch notification not sent")
- }
-}
-
-func testReorgConf(miner *rpctest.Harness,
- notifier chainntnfs.TestChainNotifier, scriptDispatch bool, t *testing.T) {
-
- // Set up a new miner that we can use to cause a reorg.
- miner2, err := rpctest.New(chainntnfs.NetParams, nil, []string{"--txindex"})
- if err != nil {
- t.Fatalf("unable to create mining node: %v", err)
- }
- if err := miner2.SetUp(false, 0); err != nil {
- t.Fatalf("unable to set up mining node: %v", err)
- }
- defer miner2.TearDown()
-
- // We start by connecting the new miner to our original miner,
- // such that it will sync to our original chain.
- if err := rpctest.ConnectNode(miner, miner2); err != nil {
- t.Fatalf("unable to connect harnesses: %v", err)
- }
- nodeSlice := []*rpctest.Harness{miner, miner2}
- if err := rpctest.JoinNodes(nodeSlice, rpctest.Blocks); err != nil {
- t.Fatalf("unable to join node on blocks: %v", err)
- }
-
- // The two should be on the same blockheight.
- _, nodeHeight1, err := miner.Node.GetBestBlock()
- if err != nil {
- t.Fatalf("unable to get current blockheight %v", err)
- }
-
- _, nodeHeight2, err := miner2.Node.GetBestBlock()
- if err != nil {
- t.Fatalf("unable to get current blockheight %v", err)
- }
-
- if nodeHeight1 != nodeHeight2 {
- t.Fatalf("expected both miners to be on the same height: %v vs %v",
- nodeHeight1, nodeHeight2)
- }
-
- // We disconnect the two nodes, such that we can start mining on them
- // individually without the other one learning about the new blocks.
- err = miner.Node.AddNode(miner2.P2PAddress(), rpcclient.ANRemove)
- if err != nil {
- t.Fatalf("unable to remove node: %v", err)
- }
-
- txid, pkScript, err := chainntnfs.GetTestTxidAndScript(miner)
- if err != nil {
- t.Fatalf("unable to create test tx: %v", err)
- }
- if err := chainntnfs.WaitForMempoolTx(miner, txid); err != nil {
- t.Fatalf("tx not relayed to miner: %v", err)
- }
-
- _, currentHeight, err := miner.Node.GetBestBlock()
- if err != nil {
- t.Fatalf("unable to get current height: %v", err)
- }
-
- // Now that we have a txid, register a confirmation notification with
- // the chainntfn source.
- numConfs := uint32(2)
- var confIntent *chainntnfs.ConfirmationEvent
- if scriptDispatch {
- confIntent, err = notifier.RegisterConfirmationsNtfn(
- nil, pkScript, numConfs, uint32(currentHeight),
- )
- } else {
- confIntent, err = notifier.RegisterConfirmationsNtfn(
- txid, pkScript, numConfs, uint32(currentHeight),
- )
- }
- if err != nil {
- t.Fatalf("unable to register ntfn: %v", err)
- }
-
- // Now generate a single block, the transaction should be included.
- _, err = miner.Node.Generate(1)
- if err != nil {
- t.Fatalf("unable to generate single block: %v", err)
- }
-
- // Transaction only has one confirmation, and the notification is registered
- // with 2 confirmations, so we should not be notified yet.
- select {
- case <-confIntent.Confirmed:
- t.Fatal("tx was confirmed unexpectedly")
- case <-time.After(1 * time.Second):
- }
-
- // Reorganize transaction out of the chain by generating a longer fork
- // from the other miner. The transaction is not included in this fork.
- miner2.Node.Generate(2)
-
- // Reconnect nodes to reach consensus on the longest chain. miner2's chain
- // should win and become active on miner1.
- if err := rpctest.ConnectNode(miner, miner2); err != nil {
- t.Fatalf("unable to connect harnesses: %v", err)
- }
- nodeSlice = []*rpctest.Harness{miner, miner2}
- if err := rpctest.JoinNodes(nodeSlice, rpctest.Blocks); err != nil {
- t.Fatalf("unable to join node on blocks: %v", err)
- }
-
- _, nodeHeight1, err = miner.Node.GetBestBlock()
- if err != nil {
- t.Fatalf("unable to get current blockheight %v", err)
- }
-
- _, nodeHeight2, err = miner2.Node.GetBestBlock()
- if err != nil {
- t.Fatalf("unable to get current blockheight %v", err)
- }
-
- if nodeHeight1 != nodeHeight2 {
- t.Fatalf("expected both miners to be on the same height: %v vs %v",
- nodeHeight1, nodeHeight2)
- }
-
- // Even though there is one block above the height of the block that the
- // transaction was included in, it is not the active chain so the
- // notification should not be sent.
- select {
- case <-confIntent.Confirmed:
- t.Fatal("tx was confirmed unexpectedly")
- case <-time.After(1 * time.Second):
- }
-
- // Now confirm the transaction on the longest chain and verify that we
- // receive the notification.
- tx, err := miner.Node.GetRawTransaction(txid)
- if err != nil {
- t.Fatalf("unable to get raw tx: %v", err)
- }
-
- txid, err = miner2.Node.SendRawTransaction(tx.MsgTx(), false)
- if err != nil {
- t.Fatalf("unable to get send tx: %v", err)
- }
- if err := chainntnfs.WaitForMempoolTx(miner, txid); err != nil {
- t.Fatalf("tx not relayed to miner: %v", err)
- }
-
- _, err = miner.Node.Generate(3)
- if err != nil {
- t.Fatalf("unable to generate single block: %v", err)
- }
-
- select {
- case <-confIntent.Confirmed:
- case <-time.After(20 * time.Second):
- t.Fatalf("confirmation notification never received")
- }
-}
-
-// testReorgSpend ensures that the different ChainNotifier implementations
-// correctly handle outpoints whose spending transaction has been reorged out of
-// the chain.
-func testReorgSpend(miner *rpctest.Harness,
- notifier chainntnfs.TestChainNotifier, scriptDispatch bool, t *testing.T) {
-
- // We'll start by creating an output and registering a spend
- // notification for it.
- outpoint, output, privKey := chainntnfs.CreateSpendableOutput(t, miner)
- _, heightHint, err := miner.Node.GetBestBlock()
- if err != nil {
- t.Fatalf("unable to retrieve current height: %v", err)
- }
-
- var spendIntent *chainntnfs.SpendEvent
- if scriptDispatch {
- spendIntent, err = notifier.RegisterSpendNtfn(
- nil, output.PkScript, uint32(heightHint),
- )
- } else {
- spendIntent, err = notifier.RegisterSpendNtfn(
- outpoint, output.PkScript, uint32(heightHint),
- )
- }
- if err != nil {
- t.Fatalf("unable to register for spend: %v", err)
- }
-
- // Set up a new miner that we can use to cause a reorg.
- miner2, err := rpctest.New(chainntnfs.NetParams, nil, []string{"--txindex"})
- if err != nil {
- t.Fatalf("unable to create mining node: %v", err)
- }
- if err := miner2.SetUp(false, 0); err != nil {
- t.Fatalf("unable to set up mining node: %v", err)
- }
- defer miner2.TearDown()
-
- // We start by connecting the new miner to our original miner, in order
- // to have a consistent view of the chain from both miners. They should
- // be on the same block height.
- if err := rpctest.ConnectNode(miner, miner2); err != nil {
- t.Fatalf("unable to connect miners: %v", err)
- }
- nodeSlice := []*rpctest.Harness{miner, miner2}
- if err := rpctest.JoinNodes(nodeSlice, rpctest.Blocks); err != nil {
- t.Fatalf("unable to sync miners: %v", err)
- }
- _, minerHeight1, err := miner.Node.GetBestBlock()
- if err != nil {
- t.Fatalf("unable to get miner1's current height: %v", err)
- }
- _, minerHeight2, err := miner2.Node.GetBestBlock()
- if err != nil {
- t.Fatalf("unable to get miner2's current height: %v", err)
- }
- if minerHeight1 != minerHeight2 {
- t.Fatalf("expected both miners to be on the same height: "+
- "%v vs %v", minerHeight1, minerHeight2)
- }
-
- // We disconnect the two nodes, such that we can start mining on them
- // individually without the other one learning about the new blocks.
- err = miner.Node.AddNode(miner2.P2PAddress(), rpcclient.ANRemove)
- if err != nil {
- t.Fatalf("unable to disconnect miners: %v", err)
- }
-
- // Craft the spending transaction for the outpoint created above and
- // confirm it under the chain of the original miner.
- spendTx := chainntnfs.CreateSpendTx(t, outpoint, output, privKey)
- spendTxHash, err := miner.Node.SendRawTransaction(spendTx, true)
- if err != nil {
- t.Fatalf("unable to broadcast spend tx: %v", err)
- }
- if err := chainntnfs.WaitForMempoolTx(miner, spendTxHash); err != nil {
- t.Fatalf("spend tx not relayed to miner: %v", err)
- }
- const numBlocks = 1
- if _, err := miner.Node.Generate(numBlocks); err != nil {
- t.Fatalf("unable to generate blocks: %v", err)
- }
- _, spendHeight, err := miner.Node.GetBestBlock()
- if err != nil {
- t.Fatalf("unable to get spend height: %v", err)
- }
-
- // We should see a spend notification dispatched with the correct spend
- // details.
- select {
- case spendDetails := <-spendIntent.Spend:
- checkNotificationFields(
- spendDetails, outpoint, spendTxHash, spendHeight, t,
- )
- case <-time.After(5 * time.Second):
- t.Fatal("expected spend notification to be dispatched")
- }
-
- // Now, with the other miner, we'll generate one more block than the
- // other miner and connect them to cause a reorg.
- if _, err := miner2.Node.Generate(numBlocks + 1); err != nil {
- t.Fatalf("unable to generate blocks: %v", err)
- }
- if err := rpctest.ConnectNode(miner, miner2); err != nil {
- t.Fatalf("unable to connect miners: %v", err)
- }
- nodeSlice = []*rpctest.Harness{miner2, miner}
- if err := rpctest.JoinNodes(nodeSlice, rpctest.Blocks); err != nil {
- t.Fatalf("unable to sync miners: %v", err)
- }
- _, minerHeight1, err = miner.Node.GetBestBlock()
- if err != nil {
- t.Fatalf("unable to get miner1's current height: %v", err)
- }
- _, minerHeight2, err = miner2.Node.GetBestBlock()
- if err != nil {
- t.Fatalf("unable to get miner2's current height: %v", err)
- }
- if minerHeight1 != minerHeight2 {
- t.Fatalf("expected both miners to be on the same height: "+
- "%v vs %v", minerHeight1, minerHeight2)
- }
-
- // We should receive a reorg notification.
- select {
- case _, ok := <-spendIntent.Reorg:
- if !ok {
- t.Fatal("unexpected reorg channel closed")
- }
- case <-time.After(5 * time.Second):
- t.Fatal("expected to receive reorg notification")
- }
-
- // Now that both miners are on the same chain, we'll confirm the
- // spending transaction of the outpoint and receive a notification for
- // it.
- if _, err = miner2.Node.SendRawTransaction(spendTx, true); err != nil {
- t.Fatalf("unable to broadcast spend tx: %v", err)
- }
- if err := chainntnfs.WaitForMempoolTx(miner, spendTxHash); err != nil {
- t.Fatalf("tx not relayed to miner: %v", err)
- }
- if _, err := miner.Node.Generate(numBlocks); err != nil {
- t.Fatalf("unable to generate single block: %v", err)
- }
- _, spendHeight, err = miner.Node.GetBestBlock()
- if err != nil {
- t.Fatalf("unable to retrieve current height: %v", err)
- }
-
- select {
- case spendDetails := <-spendIntent.Spend:
- checkNotificationFields(
- spendDetails, outpoint, spendTxHash, spendHeight, t,
- )
- case <-time.After(5 * time.Second):
- t.Fatal("expected spend notification to be dispatched")
- }
-}
-
-// testCatchUpClientOnMissedBlocks tests the case of multiple registered client
-// receiving historical block epoch notifications due to their best known block
-// being out of date.
-func testCatchUpClientOnMissedBlocks(miner *rpctest.Harness,
- notifier chainntnfs.TestChainNotifier, t *testing.T) {
-
- const numBlocks = 10
- const numClients = 5
- var wg sync.WaitGroup
-
- outdatedHash, outdatedHeight, err := miner.Node.GetBestBlock()
- if err != nil {
- t.Fatalf("unable to retrieve current height: %v", err)
- }
-
- // This function is used by UnsafeStart to ensure all notifications
- // are fully drained before clients register for notifications.
- generateBlocks := func() er.R {
- _, err = miner.Node.Generate(numBlocks)
- return err
- }
-
- // We want to ensure that when a client registers for block notifications,
- // the notifier's best block is at the tip of the chain. If it isn't, the
- // client may not receive all historical notifications.
- bestHeight := outdatedHeight + numBlocks
- err = notifier.UnsafeStart(bestHeight, nil, bestHeight, generateBlocks)
- if err != nil {
- t.Fatalf("unable to unsafe start the notifier: %v", err)
- }
- defer notifier.Stop()
-
- // Create numClients clients whose best known block is 10 blocks behind
- // the tip of the chain. We expect each client to receive numBlocks
- // notifications, 1 for each block they're behind.
- clients := make([]*chainntnfs.BlockEpochEvent, 0, numClients)
- outdatedBlock := &chainntnfs.BlockEpoch{
- Height: outdatedHeight, Hash: outdatedHash,
- }
- for i := 0; i < numClients; i++ {
- epochClient, err := notifier.RegisterBlockEpochNtfn(outdatedBlock)
- if err != nil {
- t.Fatalf("unable to register for epoch notification: %v", err)
- }
- clients = append(clients, epochClient)
- }
- for expectedHeight := outdatedHeight + 1; expectedHeight <=
- bestHeight; expectedHeight++ {
-
- for _, epochClient := range clients {
- select {
- case block := <-epochClient.Epochs:
- if block.Height != expectedHeight {
- t.Fatalf("received block of height: %d, "+
- "expected: %d", block.Height,
- expectedHeight)
- }
- case <-time.After(20 * time.Second):
- t.Fatalf("did not receive historical notification "+
- "for height %d", expectedHeight)
- }
-
- }
- }
-
- // Finally, ensure that an extra block notification wasn't received.
- anyExtras := make(chan struct{}, len(clients))
- for _, epochClient := range clients {
- wg.Add(1)
- go func(epochClient *chainntnfs.BlockEpochEvent) {
- defer wg.Done()
- select {
- case <-epochClient.Epochs:
- anyExtras <- struct{}{}
- case <-time.After(5 * time.Second):
- }
- }(epochClient)
- }
-
- wg.Wait()
- close(anyExtras)
-
- var extraCount int
- for range anyExtras {
- extraCount++
- }
-
- if extraCount > 0 {
- t.Fatalf("received %d unexpected block notification", extraCount)
- }
-}
-
-// testCatchUpOnMissedBlocks the case of multiple registered clients receiving
-// historical block epoch notifications due to the notifier's best known block
-// being out of date.
-func testCatchUpOnMissedBlocks(miner *rpctest.Harness,
- notifier chainntnfs.TestChainNotifier, t *testing.T) {
-
- const numBlocks = 10
- const numClients = 5
- var wg sync.WaitGroup
-
- _, bestHeight, err := miner.Node.GetBestBlock()
- if err != nil {
- t.Fatalf("unable to get current blockheight %v", err)
- }
-
- // This function is used by UnsafeStart to ensure all notifications
- // are fully drained before clients register for notifications.
- generateBlocks := func() er.R {
- _, err = miner.Node.Generate(numBlocks)
- return err
- }
-
- // Next, start the notifier with outdated best block information.
- err = notifier.UnsafeStart(
- bestHeight, nil, bestHeight+numBlocks, generateBlocks,
- )
- if err != nil {
- t.Fatalf("unable to unsafe start the notifier: %v", err)
- }
- defer notifier.Stop()
-
- // Create numClients clients who will listen for block notifications.
- clients := make([]*chainntnfs.BlockEpochEvent, 0, numClients)
- for i := 0; i < numClients; i++ {
- epochClient, err := notifier.RegisterBlockEpochNtfn(nil)
- if err != nil {
- t.Fatalf("unable to register for epoch notification: %v", err)
- }
-
- // Drain the notification dispatched upon registration as we're
- // not interested in it.
- select {
- case <-epochClient.Epochs:
- case <-time.After(5 * time.Second):
- t.Fatal("expected to receive epoch for current block " +
- "upon registration")
- }
-
- clients = append(clients, epochClient)
- }
-
- // Generate a single block to trigger the backlog of historical
- // notifications for the previously mined blocks.
- if _, err := miner.Node.Generate(1); err != nil {
- t.Fatalf("unable to generate blocks: %v", err)
- }
-
- // We expect each client to receive numBlocks + 1 notifications, 1 for
- // each block that the notifier has missed out on.
- for expectedHeight := bestHeight + 1; expectedHeight <=
- bestHeight+numBlocks+1; expectedHeight++ {
-
- for _, epochClient := range clients {
- select {
- case block := <-epochClient.Epochs:
- if block.Height != expectedHeight {
- t.Fatalf("received block of height: %d, "+
- "expected: %d", block.Height,
- expectedHeight)
- }
- case <-time.After(20 * time.Second):
- t.Fatalf("did not receive historical notification "+
- "for height %d", expectedHeight)
- }
- }
- }
-
- // Finally, ensure that an extra block notification wasn't received.
- anyExtras := make(chan struct{}, len(clients))
- for _, epochClient := range clients {
- wg.Add(1)
- go func(epochClient *chainntnfs.BlockEpochEvent) {
- defer wg.Done()
- select {
- case <-epochClient.Epochs:
- anyExtras <- struct{}{}
- case <-time.After(5 * time.Second):
- }
- }(epochClient)
- }
-
- wg.Wait()
- close(anyExtras)
-
- var extraCount int
- for range anyExtras {
- extraCount++
- }
-
- if extraCount > 0 {
- t.Fatalf("received %d unexpected block notification", extraCount)
- }
-}
-
-// testCatchUpOnMissedBlocks tests that a client will still receive all valid
-// block notifications in the case where a notifier's best block has been reorged
-// out of the chain.
-func testCatchUpOnMissedBlocksWithReorg(miner1 *rpctest.Harness,
- notifier chainntnfs.TestChainNotifier, t *testing.T) {
-
- // If this is the neutrino notifier, then we'll skip this test for now
- // as we're missing functionality required to ensure the test passes
- // reliably.
- if _, ok := notifier.(*neutrinonotify.NeutrinoNotifier); ok {
- t.Skip("skipping re-org test for neutrino")
- }
-
- const numBlocks = 10
- const numClients = 5
- var wg sync.WaitGroup
-
- // Set up a new miner that we can use to cause a reorg.
- miner2, err := rpctest.New(chainntnfs.NetParams, nil, []string{"--txindex"})
- if err != nil {
- t.Fatalf("unable to create mining node: %v", err)
- }
- if err := miner2.SetUp(false, 0); err != nil {
- t.Fatalf("unable to set up mining node: %v", err)
- }
- defer miner2.TearDown()
-
- // We start by connecting the new miner to our original miner,
- // such that it will sync to our original chain.
- if err := rpctest.ConnectNode(miner1, miner2); err != nil {
- t.Fatalf("unable to connect harnesses: %v", err)
- }
- nodeSlice := []*rpctest.Harness{miner1, miner2}
- if err := rpctest.JoinNodes(nodeSlice, rpctest.Blocks); err != nil {
- t.Fatalf("unable to join node on blocks: %v", err)
- }
-
- // The two should be on the same blockheight.
- _, nodeHeight1, err := miner1.Node.GetBestBlock()
- if err != nil {
- t.Fatalf("unable to get current blockheight %v", err)
- }
-
- _, nodeHeight2, err := miner2.Node.GetBestBlock()
- if err != nil {
- t.Fatalf("unable to get current blockheight %v", err)
- }
-
- if nodeHeight1 != nodeHeight2 {
- t.Fatalf("expected both miners to be on the same height: %v vs %v",
- nodeHeight1, nodeHeight2)
- }
-
- // We disconnect the two nodes, such that we can start mining on them
- // individually without the other one learning about the new blocks.
- err = miner1.Node.AddNode(miner2.P2PAddress(), rpcclient.ANRemove)
- if err != nil {
- t.Fatalf("unable to remove node: %v", err)
- }
-
- // Now mine on each chain separately
- blocks, err := miner1.Node.Generate(numBlocks)
- if err != nil {
- t.Fatalf("unable to generate single block: %v", err)
- }
-
- // We generate an extra block on miner 2's chain to ensure it is the
- // longer chain.
- _, err = miner2.Node.Generate(numBlocks + 1)
- if err != nil {
- t.Fatalf("unable to generate single block: %v", err)
- }
-
- // Sync the two chains to ensure they will sync to miner2's chain.
- if err := rpctest.ConnectNode(miner1, miner2); err != nil {
- t.Fatalf("unable to connect harnesses: %v", err)
- }
- nodeSlice = []*rpctest.Harness{miner1, miner2}
- if err := rpctest.JoinNodes(nodeSlice, rpctest.Blocks); err != nil {
- t.Fatalf("unable to join node on blocks: %v", err)
- }
-
- // The two should be on the same block hash.
- timeout := time.After(10 * time.Second)
- for {
- nodeHash1, _, err := miner1.Node.GetBestBlock()
- if err != nil {
- t.Fatalf("unable to get current block hash: %v", err)
- }
-
- nodeHash2, _, err := miner2.Node.GetBestBlock()
- if err != nil {
- t.Fatalf("unable to get current block hash: %v", err)
- }
-
- if *nodeHash1 == *nodeHash2 {
- break
- }
- select {
- case <-timeout:
- t.Fatalf("Unable to sync two chains")
- case <-time.After(50 * time.Millisecond):
- continue
- }
- }
-
- // Next, start the notifier with outdated best block information.
- // We set the notifier's best block to be the last block mined on the
- // shorter chain, to test that the notifier correctly rewinds to
- // the common ancestor between the two chains.
- syncHeight := nodeHeight1 + numBlocks + 1
- err = notifier.UnsafeStart(
- nodeHeight1+numBlocks, blocks[numBlocks-1], syncHeight, nil,
- )
- if err != nil {
- t.Fatalf("Unable to unsafe start the notifier: %v", err)
- }
- defer notifier.Stop()
-
- // Create numClients clients who will listen for block notifications.
- clients := make([]*chainntnfs.BlockEpochEvent, 0, numClients)
- for i := 0; i < numClients; i++ {
- epochClient, err := notifier.RegisterBlockEpochNtfn(nil)
- if err != nil {
- t.Fatalf("unable to register for epoch notification: %v", err)
- }
-
- // Drain the notification dispatched upon registration as we're
- // not interested in it.
- select {
- case <-epochClient.Epochs:
- case <-time.After(5 * time.Second):
- t.Fatal("expected to receive epoch for current block " +
- "upon registration")
- }
-
- clients = append(clients, epochClient)
- }
-
- // Generate a single block, which should trigger the notifier to rewind
- // to the common ancestor and dispatch notifications from there.
- _, err = miner2.Node.Generate(1)
- if err != nil {
- t.Fatalf("unable to generate single block: %v", err)
- }
-
- // If the chain backend to the notifier stores information about reorged
- // blocks, the notifier is able to rewind the chain to the common
- // ancestor between the chain tip and its outdated best known block.
- // In this case, the client is expected to receive numBlocks + 2
- // notifications, 1 for each block the notifier has missed out on from
- // the longer chain.
- //
- // If the chain backend does not store information about reorged blocks,
- // the notifier has no way of knowing where to rewind to and therefore
- // the client is only expected to receive notifications for blocks
- // whose height is greater than the notifier's best known height: 2
- // notifications, in this case.
- var startingHeight int32
- switch notifier.(type) {
- case *neutrinonotify.NeutrinoNotifier:
- startingHeight = nodeHeight1 + numBlocks + 1
- default:
- startingHeight = nodeHeight1 + 1
- }
-
- for expectedHeight := startingHeight; expectedHeight <=
- nodeHeight1+numBlocks+2; expectedHeight++ {
-
- for _, epochClient := range clients {
- select {
- case block := <-epochClient.Epochs:
- if block.Height != expectedHeight {
- t.Fatalf("received block of height: %d, "+
- "expected: %d", block.Height,
- expectedHeight)
- }
- case <-time.After(20 * time.Second):
- t.Fatalf("did not receive historical notification "+
- "for height %d", expectedHeight)
- }
- }
- }
-
- // Finally, ensure that an extra block notification wasn't received.
- anyExtras := make(chan struct{}, len(clients))
- for _, epochClient := range clients {
- wg.Add(1)
- go func(epochClient *chainntnfs.BlockEpochEvent) {
- defer wg.Done()
- select {
- case <-epochClient.Epochs:
- anyExtras <- struct{}{}
- case <-time.After(5 * time.Second):
- }
- }(epochClient)
- }
-
- wg.Wait()
- close(anyExtras)
-
- var extraCount int
- for range anyExtras {
- extraCount++
- }
-
- if extraCount > 0 {
- t.Fatalf("received %d unexpected block notification", extraCount)
- }
-}
-
-type txNtfnTestCase struct {
- name string
- test func(node *rpctest.Harness, notifier chainntnfs.TestChainNotifier,
- scriptDispatch bool, t *testing.T)
-}
-
-type blockNtfnTestCase struct {
- name string
- test func(node *rpctest.Harness, notifier chainntnfs.TestChainNotifier,
- t *testing.T)
-}
-
-type blockCatchupTestCase struct {
- name string
- test func(node *rpctest.Harness, notifier chainntnfs.TestChainNotifier,
- t *testing.T)
-}
-
-var txNtfnTests = []txNtfnTestCase{
- {
- name: "single conf ntfn",
- test: testSingleConfirmationNotification,
- },
- {
- name: "multi conf ntfn",
- test: testMultiConfirmationNotification,
- },
- {
- name: "batch conf ntfn",
- test: testBatchConfirmationNotification,
- },
- {
- name: "multi client conf",
- test: testMultiClientConfirmationNotification,
- },
- {
- name: "lazy ntfn consumer",
- test: testLazyNtfnConsumer,
- },
- {
- name: "historical conf dispatch",
- test: testTxConfirmedBeforeNtfnRegistration,
- },
- {
- name: "reorg conf",
- test: testReorgConf,
- },
- {
- name: "spend ntfn",
- test: testSpendNotification,
- },
- {
- name: "historical spend dispatch",
- test: testSpendBeforeNtfnRegistration,
- },
- {
- name: "reorg spend",
- test: testReorgSpend,
- },
- {
- name: "cancel spend ntfn",
- test: testCancelSpendNtfn,
- },
-}
-
-var blockNtfnTests = []blockNtfnTestCase{
- {
- name: "block epoch",
- test: testBlockEpochNotification,
- },
- {
- name: "cancel epoch ntfn",
- test: testCancelEpochNtfn,
- },
-}
-
-var blockCatchupTests = []blockCatchupTestCase{
- {
- name: "catch up client on historical block epoch ntfns",
- test: testCatchUpClientOnMissedBlocks,
- },
- {
- name: "test catch up on missed blocks",
- test: testCatchUpOnMissedBlocks,
- },
- {
- name: "test catch up on missed blocks w/ reorged best block",
- test: testCatchUpOnMissedBlocksWithReorg,
- },
-}
-
-// TestInterfaces tests all registered interfaces with a unified set of tests
-// which exercise each of the required methods found within the ChainNotifier
-// interface.
-//
-// NOTE: In the future, when additional implementations of the ChainNotifier
-// interface have been implemented, in order to ensure the new concrete
-// implementation is automatically tested, two steps must be undertaken. First,
-// one needs add a "non-captured" (_) import from the new sub-package. This
-// import should trigger an init() method within the package which registers
-// the interface. Second, an additional case in the switch within the main loop
-// below needs to be added which properly initializes the interface.
-// TODO(cjd): DISABLED TEST - our neutrino not working with sha256 chains yet
-func _TestInterfaces(t *testing.T) {
- // Initialize the harness around a btcd node which will serve as our
- // dedicated miner to generate blocks, cause re-orgs, etc. We'll set up
- // this node with a chain length of 125, so we have plenty of BTC to
- // play around with.
- miner, tearDown := chainntnfs.NewMiner(t, nil, true, 25)
- defer tearDown()
-
- rpcConfig := miner.RPCConfig()
- p2pAddr := miner.P2PAddress()
-
- log.Printf("Running %v ChainNotifier interface tests",
- 2*len(txNtfnTests)+len(blockNtfnTests)+len(blockCatchupTests))
-
- for _, notifierDriver := range chainntnfs.RegisteredNotifiers() {
- // Initialize a height hint cache for each notifier.
- tempDir, errr := ioutil.TempDir("", "channeldb")
- if errr != nil {
- t.Fatalf("unable to create temp dir: %v", errr)
- }
- db, err := channeldb.Open(tempDir)
- if err != nil {
- t.Fatalf("unable to create db: %v", err)
- }
- testCfg := chainntnfs.CacheConfig{
- QueryDisable: false,
- }
- hintCache, err := chainntnfs.NewHeightHintCache(testCfg, db)
- if err != nil {
- t.Fatalf("unable to create height hint cache: %v", err)
- }
-
- var (
- cleanUp func()
- newNotifier func() (chainntnfs.TestChainNotifier, er.R)
- notifierType = notifierDriver.NotifierType
- )
-
- switch notifierType {
- case "btcd":
- newNotifier = func() (chainntnfs.TestChainNotifier, er.R) {
- return btcdnotify.New(
- &rpcConfig, chainntnfs.NetParams,
- hintCache, hintCache,
- )
- }
-
- case "neutrino":
- var spvNode *neutrino.ChainService
- spvNode, cleanUp = chainntnfs.NewNeutrinoBackend(
- t, p2pAddr,
- )
- newNotifier = func() (chainntnfs.TestChainNotifier, er.R) {
- return neutrinonotify.New(
- spvNode, hintCache, hintCache,
- ), nil
- }
- }
-
- log.Printf("Running ChainNotifier interface tests for: %v",
- notifierType)
-
- notifier, err := newNotifier()
- if err != nil {
- t.Fatalf("unable to create %v notifier: %v",
- notifierType, err)
- }
- if err := notifier.Start(); err != nil {
- t.Fatalf("unable to start notifier %v: %v",
- notifierType, err)
- }
-
- for _, txNtfnTest := range txNtfnTests {
- for _, scriptDispatch := range []bool{false, true} {
- testName := fmt.Sprintf("%v %v", notifierType,
- txNtfnTest.name)
- if scriptDispatch {
- testName += " with script dispatch"
- }
- success := t.Run(testName, func(t *testing.T) {
- txNtfnTest.test(
- miner, notifier, scriptDispatch,
- t,
- )
- })
- if !success {
- break
- }
- }
- }
-
- for _, blockNtfnTest := range blockNtfnTests {
- testName := fmt.Sprintf("%v %v", notifierType,
- blockNtfnTest.name)
- success := t.Run(testName, func(t *testing.T) {
- blockNtfnTest.test(miner, notifier, t)
- })
- if !success {
- break
- }
- }
-
- notifier.Stop()
-
- // Run catchup tests separately since they require restarting
- // the notifier every time.
- for _, blockCatchupTest := range blockCatchupTests {
- notifier, err = newNotifier()
- if err != nil {
- t.Fatalf("unable to create %v notifier: %v",
- notifierType, err)
- }
-
- testName := fmt.Sprintf("%v %v", notifierType,
- blockCatchupTest.name)
-
- success := t.Run(testName, func(t *testing.T) {
- blockCatchupTest.test(miner, notifier, t)
- })
- if !success {
- break
- }
- }
-
- if cleanUp != nil {
- cleanUp()
- }
- }
-}
-
-func TestMain(m *testing.M) {
- globalcfg.SelectConfig(globalcfg.BitcoinDefaults())
- os.Exit(m.Run())
-}
diff --git a/lnd/chainntnfs/neutrinonotify/driver.go b/lnd/chainntnfs/neutrinonotify/driver.go
deleted file mode 100644
index ae8dafad..00000000
--- a/lnd/chainntnfs/neutrinonotify/driver.go
+++ /dev/null
@@ -1,53 +0,0 @@
-package neutrinonotify
-
-import (
- "fmt"
-
- "github.com/pkt-cash/pktd/btcutil/er"
- "github.com/pkt-cash/pktd/lnd/chainntnfs"
- "github.com/pkt-cash/pktd/neutrino"
-)
-
-// createNewNotifier creates a new instance of the ChainNotifier interface
-// implemented by NeutrinoNotifier.
-func createNewNotifier(args ...interface{}) (chainntnfs.ChainNotifier, er.R) {
- if len(args) != 3 {
- return nil, er.Errorf("incorrect number of arguments to "+
- ".New(...), expected 3, instead passed %v", len(args))
- }
-
- config, ok := args[0].(*neutrino.ChainService)
- if !ok {
- return nil, er.New("first argument to neutrinonotify.New " +
- "is incorrect, expected a *neutrino.ChainService")
- }
-
- spendHintCache, ok := args[1].(chainntnfs.SpendHintCache)
- if !ok {
- return nil, er.New("second argument to neutrinonotify.New " +
- "is incorrect, expected a chainntfs.SpendHintCache")
- }
-
- confirmHintCache, ok := args[2].(chainntnfs.ConfirmHintCache)
- if !ok {
- return nil, er.New("third argument to neutrinonotify.New " +
- "is incorrect, expected a chainntfs.ConfirmHintCache")
- }
-
- return New(config, spendHintCache, confirmHintCache), nil
-}
-
-// init registers a driver for the NeutrinoNotify concrete implementation of
-// the chainntnfs.ChainNotifier interface.
-func init() {
- // Register the driver.
- notifier := &chainntnfs.NotifierDriver{
- NotifierType: notifierType,
- New: createNewNotifier,
- }
-
- if err := chainntnfs.RegisterNotifier(notifier); err != nil {
- panic(fmt.Sprintf("failed to register notifier driver '%s': %v",
- notifierType, err))
- }
-}
diff --git a/lnd/chainntnfs/neutrinonotify/neutrino.go b/lnd/chainntnfs/neutrinonotify/neutrino.go
deleted file mode 100644
index 3ece4e8e..00000000
--- a/lnd/chainntnfs/neutrinonotify/neutrino.go
+++ /dev/null
@@ -1,1038 +0,0 @@
-package neutrinonotify
-
-import (
- "strings"
- "sync"
- "sync/atomic"
- "time"
-
- "github.com/pkt-cash/pktd/btcjson"
- "github.com/pkt-cash/pktd/btcutil"
- "github.com/pkt-cash/pktd/btcutil/er"
- "github.com/pkt-cash/pktd/btcutil/gcs/builder"
- "github.com/pkt-cash/pktd/chaincfg/chainhash"
- "github.com/pkt-cash/pktd/lnd/chainntnfs"
- "github.com/pkt-cash/pktd/lnd/queue"
- "github.com/pkt-cash/pktd/neutrino"
- "github.com/pkt-cash/pktd/pktlog/log"
- "github.com/pkt-cash/pktd/pktwallet/waddrmgr"
- "github.com/pkt-cash/pktd/rpcclient"
- "github.com/pkt-cash/pktd/txscript"
- "github.com/pkt-cash/pktd/wire"
-)
-
-const (
- // notifierType uniquely identifies this concrete implementation of the
- // ChainNotifier interface.
- notifierType = "neutrino"
-)
-
-// NeutrinoNotifier is a version of ChainNotifier that's backed by the neutrino
-// Bitcoin light client. Unlike other implementations, this implementation
-// speaks directly to the p2p network. As a result, this implementation of the
-// ChainNotifier interface is much more light weight that other implementation
-// which rely of receiving notification over an RPC interface backed by a
-// running full node.
-//
-// TODO(roasbeef): heavily consolidate with NeutrinoNotifier code
-// * maybe combine into single package?
-type NeutrinoNotifier struct {
- epochClientCounter uint64 // To be used atomically.
-
- start sync.Once
- active int32 // To be used atomically.
- stopped int32 // To be used atomically.
-
- bestBlockMtx sync.RWMutex
- bestBlock chainntnfs.BlockEpoch
-
- p2pNode *neutrino.ChainService
- chainView *neutrino.Rescan
-
- chainConn *NeutrinoChainConn
-
- notificationCancels chan interface{}
- notificationRegistry chan interface{}
-
- txNotifier *chainntnfs.TxNotifier
-
- blockEpochClients map[uint64]*blockEpochRegistration
-
- rescanErr <-chan er.R
-
- chainUpdates *queue.ConcurrentQueue
- txUpdates *queue.ConcurrentQueue
-
- // spendHintCache is a cache used to query and update the latest height
- // hints for an outpoint. Each height hint represents the earliest
- // height at which the outpoint could have been spent within the chain.
- spendHintCache chainntnfs.SpendHintCache
-
- // confirmHintCache is a cache used to query the latest height hints for
- // a transaction. Each height hint represents the earliest height at
- // which the transaction could have confirmed within the chain.
- confirmHintCache chainntnfs.ConfirmHintCache
-
- wg sync.WaitGroup
- quit chan struct{}
-}
-
-// Ensure NeutrinoNotifier implements the ChainNotifier interface at compile time.
-var _ chainntnfs.ChainNotifier = (*NeutrinoNotifier)(nil)
-
-// New creates a new instance of the NeutrinoNotifier concrete implementation
-// of the ChainNotifier interface.
-//
-// NOTE: The passed neutrino node should already be running and active before
-// being passed into this function.
-func New(node *neutrino.ChainService, spendHintCache chainntnfs.SpendHintCache,
- confirmHintCache chainntnfs.ConfirmHintCache) *NeutrinoNotifier {
-
- return &NeutrinoNotifier{
- notificationCancels: make(chan interface{}),
- notificationRegistry: make(chan interface{}),
-
- blockEpochClients: make(map[uint64]*blockEpochRegistration),
-
- p2pNode: node,
- chainConn: &NeutrinoChainConn{node},
-
- rescanErr: make(chan er.R),
-
- chainUpdates: queue.NewConcurrentQueue(10),
- txUpdates: queue.NewConcurrentQueue(10),
-
- spendHintCache: spendHintCache,
- confirmHintCache: confirmHintCache,
-
- quit: make(chan struct{}),
- }
-}
-
-// Start contacts the running neutrino light client and kicks off an initial
-// empty rescan.
-func (n *NeutrinoNotifier) Start() er.R {
- var startErr er.R
- n.start.Do(func() {
- startErr = n.startNotifier()
- })
- return startErr
-}
-
-// Stop shuts down the NeutrinoNotifier.
-func (n *NeutrinoNotifier) Stop() er.R {
- // Already shutting down?
- if atomic.AddInt32(&n.stopped, 1) != 1 {
- return nil
- }
-
- close(n.quit)
- n.wg.Wait()
-
- n.chainUpdates.Stop()
- n.txUpdates.Stop()
-
- // Notify all pending clients of our shutdown by closing the related
- // notification channels.
- for _, epochClient := range n.blockEpochClients {
- close(epochClient.cancelChan)
- epochClient.wg.Wait()
-
- close(epochClient.epochChan)
- }
- n.txNotifier.TearDown()
-
- return nil
-}
-
-// Started returns true if this instance has been started, and false otherwise.
-func (n *NeutrinoNotifier) Started() bool {
- return atomic.LoadInt32(&n.active) != 0
-}
-
-func (n *NeutrinoNotifier) startNotifier() er.R {
- // Start our concurrent queues before starting the rescan, to ensure
- // onFilteredBlockConnected and onRelavantTx callbacks won't be
- // blocked.
- n.chainUpdates.Start()
- n.txUpdates.Start()
-
- // First, we'll obtain the latest block height of the p2p node. We'll
- // start the auto-rescan from this point. Once a caller actually wishes
- // to register a chain view, the rescan state will be rewound
- // accordingly.
- startingPoint, err := n.p2pNode.BestBlock()
- if err != nil {
- n.txUpdates.Stop()
- n.chainUpdates.Stop()
- return err
- }
- n.bestBlock.Hash = &startingPoint.Hash
- n.bestBlock.Height = startingPoint.Height
-
- n.txNotifier = chainntnfs.NewTxNotifier(
- uint32(n.bestBlock.Height), chainntnfs.ReorgSafetyLimit,
- n.confirmHintCache, n.spendHintCache,
- )
-
- // Next, we'll create our set of rescan options. Currently it's
- // required that a user MUST set an addr/outpoint/txid when creating a
- // rescan. To get around this, we'll add a "zero" outpoint, that won't
- // actually be matched.
- var zeroInput neutrino.InputWithScript
- rescanOptions := []neutrino.RescanOption{
- neutrino.StartBlock(startingPoint),
- neutrino.QuitChan(n.quit),
- neutrino.NotificationHandlers(
- rpcclient.NotificationHandlers{
- OnFilteredBlockConnected: n.onFilteredBlockConnected,
- OnFilteredBlockDisconnected: n.onFilteredBlockDisconnected,
- OnRedeemingTx: n.onRelevantTx,
- },
- ),
- neutrino.WatchInputs(zeroInput),
- }
-
- // Finally, we'll create our rescan struct, start it, and launch all
- // the goroutines we need to operate this ChainNotifier instance.
- n.chainView = neutrino.NewRescan(
- &neutrino.RescanChainSource{
- ChainService: n.p2pNode,
- },
- rescanOptions...,
- )
- n.rescanErr = n.chainView.Start()
-
- n.wg.Add(1)
- go n.notificationDispatcher()
-
- // Set the active flag now that we've completed the full
- // startup.
- atomic.StoreInt32(&n.active, 1)
-
- return nil
-}
-
-// filteredBlock represents a new block which has been connected to the main
-// chain. The slice of transactions will only be populated if the block
-// includes a transaction that confirmed one of our watched txids, or spends
-// one of the outputs currently being watched.
-type filteredBlock struct {
- hash chainhash.Hash
- height uint32
- txns []*btcutil.Tx
-
- // connected is true if this update is a new block and false if it is a
- // disconnected block.
- connect bool
-}
-
-// rescanFilterUpdate represents a request that will be sent to the
-// notificaionRegistry in order to prevent race conditions between the filter
-// update and new block notifications.
-type rescanFilterUpdate struct {
- updateOptions []neutrino.UpdateOption
- errChan chan er.R
-}
-
-// onFilteredBlockConnected is a callback which is executed each a new block is
-// connected to the end of the main chain.
-func (n *NeutrinoNotifier) onFilteredBlockConnected(height int32,
- header *wire.BlockHeader, txns []*btcutil.Tx) {
-
- // Append this new chain update to the end of the queue of new chain
- // updates.
- select {
- case n.chainUpdates.ChanIn() <- &filteredBlock{
- hash: header.BlockHash(),
- height: uint32(height),
- txns: txns,
- connect: true,
- }:
- case <-n.quit:
- }
-}
-
-// onFilteredBlockDisconnected is a callback which is executed each time a new
-// block has been disconnected from the end of the mainchain due to a re-org.
-func (n *NeutrinoNotifier) onFilteredBlockDisconnected(height int32,
- header *wire.BlockHeader) {
-
- // Append this new chain update to the end of the queue of new chain
- // disconnects.
- select {
- case n.chainUpdates.ChanIn() <- &filteredBlock{
- hash: header.BlockHash(),
- height: uint32(height),
- connect: false,
- }:
- case <-n.quit:
- }
-}
-
-// relevantTx represents a relevant transaction to the notifier that fulfills
-// any outstanding spend requests.
-type relevantTx struct {
- tx *btcutil.Tx
- details *btcjson.BlockDetails
-}
-
-// onRelevantTx is a callback that proxies relevant transaction notifications
-// from the backend to the notifier's main event handler.
-func (n *NeutrinoNotifier) onRelevantTx(tx *btcutil.Tx, details *btcjson.BlockDetails) {
- select {
- case n.txUpdates.ChanIn() <- &relevantTx{tx, details}:
- case <-n.quit:
- }
-}
-
-// notificationDispatcher is the primary goroutine which handles client
-// notification registrations, as well as notification dispatches.
-func (n *NeutrinoNotifier) notificationDispatcher() {
- defer n.wg.Done()
-out:
- for {
- select {
- case cancelMsg := <-n.notificationCancels:
- switch msg := cancelMsg.(type) {
- case *epochCancel:
- log.Infof("Cancelling epoch "+
- "notification, epoch_id=%v", msg.epochID)
-
- // First, we'll lookup the original
- // registration in order to stop the active
- // queue goroutine.
- reg := n.blockEpochClients[msg.epochID]
- reg.epochQueue.Stop()
-
- // Next, close the cancel channel for this
- // specific client, and wait for the client to
- // exit.
- close(n.blockEpochClients[msg.epochID].cancelChan)
- n.blockEpochClients[msg.epochID].wg.Wait()
-
- // Once the client has exited, we can then
- // safely close the channel used to send epoch
- // notifications, in order to notify any
- // listeners that the intent has been
- // canceled.
- close(n.blockEpochClients[msg.epochID].epochChan)
- delete(n.blockEpochClients, msg.epochID)
- }
-
- case registerMsg := <-n.notificationRegistry:
- switch msg := registerMsg.(type) {
- case *chainntnfs.HistoricalConfDispatch:
- // We'll start a historical rescan chain of the
- // chain asynchronously to prevent blocking
- // potentially long rescans.
- n.wg.Add(1)
- go func() {
- defer n.wg.Done()
-
- confDetails, err := n.historicalConfDetails(
- msg.ConfRequest,
- msg.StartHeight, msg.EndHeight,
- )
- if err != nil {
- log.Error(err)
- return
- }
-
- // If the historical dispatch finished
- // without error, we will invoke
- // UpdateConfDetails even if none were
- // found. This allows the notifier to
- // begin safely updating the height hint
- // cache at tip, since any pending
- // rescans have now completed.
- err = n.txNotifier.UpdateConfDetails(
- msg.ConfRequest, confDetails,
- )
- if err != nil {
- log.Error(err)
- }
- }()
-
- case *blockEpochRegistration:
- log.Infof("New block epoch subscription")
-
- n.blockEpochClients[msg.epochID] = msg
-
- // If the client did not provide their best
- // known block, then we'll immediately dispatch
- // a notification for the current tip.
- if msg.bestBlock == nil {
- n.notifyBlockEpochClient(
- msg, n.bestBlock.Height,
- n.bestBlock.Hash,
- )
-
- msg.errorChan <- nil
- continue
- }
-
- // Otherwise, we'll attempt to deliver the
- // backlog of notifications from their best
- // known block.
- n.bestBlockMtx.Lock()
- bestHeight := n.bestBlock.Height
- n.bestBlockMtx.Unlock()
-
- missedBlocks, err := chainntnfs.GetClientMissedBlocks(
- n.chainConn, msg.bestBlock, bestHeight,
- false,
- )
- if err != nil {
- msg.errorChan <- err
- continue
- }
-
- for _, block := range missedBlocks {
- n.notifyBlockEpochClient(
- msg, block.Height, block.Hash,
- )
- }
-
- msg.errorChan <- nil
-
- case *rescanFilterUpdate:
- err := n.chainView.Update(msg.updateOptions...)
- if err != nil {
- log.Errorf("Unable to "+
- "update rescan filter: %v", err)
- }
- msg.errChan <- err
- }
-
- case item := <-n.chainUpdates.ChanOut():
- update := item.(*filteredBlock)
- if update.connect {
- n.bestBlockMtx.Lock()
- // Since neutrino has no way of knowing what
- // height to rewind to in the case of a reorged
- // best known height, there is no point in
- // checking that the previous hash matches the
- // the hash from our best known height the way
- // the other notifiers do when they receive
- // a new connected block. Therefore, we just
- // compare the heights.
- if update.height != uint32(n.bestBlock.Height+1) {
- // Handle the case where the notifier
- // missed some blocks from its chain
- // backend
- log.Infof("Missed blocks, " +
- "attempting to catch up")
-
- _, missedBlocks, err :=
- chainntnfs.HandleMissedBlocks(
- n.chainConn,
- n.txNotifier,
- n.bestBlock,
- int32(update.height),
- false,
- )
- if err != nil {
- log.Error(err)
- n.bestBlockMtx.Unlock()
- continue
- }
-
- for _, block := range missedBlocks {
- filteredBlock, err :=
- n.getFilteredBlock(block)
- if err != nil {
- log.Error(err)
- n.bestBlockMtx.Unlock()
- continue out
- }
- err = n.handleBlockConnected(filteredBlock)
- if err != nil {
- log.Error(err)
- n.bestBlockMtx.Unlock()
- continue out
- }
- }
-
- }
-
- err := n.handleBlockConnected(update)
- if err != nil {
- log.Error(err)
- }
-
- n.bestBlockMtx.Unlock()
- continue
- }
-
- n.bestBlockMtx.Lock()
- if update.height != uint32(n.bestBlock.Height) {
- log.Infof("Missed disconnected " +
- "blocks, attempting to catch up")
- }
- newBestBlock, err := chainntnfs.RewindChain(
- n.chainConn, n.txNotifier, n.bestBlock,
- int32(update.height-1),
- )
- if err != nil {
- log.Errorf("Unable to rewind chain "+
- "from height %d to height %d: %v",
- n.bestBlock.Height, update.height-1, err)
- }
-
- // Set the bestHeight here in case a chain rewind
- // partially completed.
- n.bestBlock = newBestBlock
- n.bestBlockMtx.Unlock()
-
- case txUpdate := <-n.txUpdates.ChanOut():
- // A new relevant transaction notification has been
- // received from the backend. We'll attempt to process
- // it to determine if it fulfills any outstanding
- // confirmation and/or spend requests and dispatch
- // notifications for them.
- update := txUpdate.(*relevantTx)
- err := n.txNotifier.ProcessRelevantSpendTx(
- update.tx, uint32(update.details.Height),
- )
- if err != nil {
- log.Errorf("Unable to process "+
- "transaction %v: %v", update.tx.Hash(),
- err)
- }
-
- case err := <-n.rescanErr:
- log.Errorf("Error during rescan: %v", err)
-
- case <-n.quit:
- return
-
- }
- }
-}
-
-// historicalConfDetails looks up whether a confirmation request (txid/output
-// script) has already been included in a block in the active chain and, if so,
-// returns details about said block.
-func (n *NeutrinoNotifier) historicalConfDetails(confRequest chainntnfs.ConfRequest,
- startHeight, endHeight uint32) (*chainntnfs.TxConfirmation, er.R) {
-
- // Starting from the height hint, we'll walk forwards in the chain to
- // see if this transaction/output script has already been confirmed.
- for scanHeight := endHeight; scanHeight >= startHeight && scanHeight > 0; scanHeight-- {
- // Ensure we haven't been requested to shut down before
- // processing the next height.
- select {
- case <-n.quit:
- return nil, chainntnfs.ErrChainNotifierShuttingDown.Default()
- default:
- }
-
- // First, we'll fetch the block header for this height so we
- // can compute the current block hash.
- blockHash, err := n.p2pNode.GetBlockHash(int64(scanHeight))
- if err != nil {
- return nil, er.Errorf("unable to get header for height=%v: %v",
- scanHeight, err)
- }
-
- // With the hash computed, we can now fetch the basic filter for this
- // height. Since the range of required items is known we avoid
- // roundtrips by requesting a batched response and save bandwidth by
- // limiting the max number of items per batch. Since neutrino populates
- // its underline filters cache with the batch response, the next call
- // will execute a network query only once per batch and not on every
- // iteration.
- regFilter, err := n.p2pNode.GetCFilter(
- *blockHash, wire.GCSFilterRegular,
- neutrino.NumRetries(5),
- neutrino.OptimisticReverseBatch(),
- // TODO(cjd): Maybe we want to implement MaxBatchSize in neutrino?
- //neutrino.MaxBatchSize(int64(scanHeight-startHeight+1)),
- )
- if err != nil {
- return nil, er.Errorf("unable to retrieve regular filter for "+
- "height=%v: %v", scanHeight, err)
- }
-
- // In the case that the filter exists, we'll attempt to see if
- // any element in it matches our target public key script.
- key := builder.DeriveKey(blockHash)
- match, err := regFilter.Match(key, confRequest.PkScript.Script())
- if err != nil {
- return nil, er.Errorf("unable to query filter: %v", err)
- }
-
- // If there's no match, then we can continue forward to the
- // next block.
- if !match {
- continue
- }
-
- // In the case that we do have a match, we'll fetch the block
- // from the network so we can find the positional data required
- // to send the proper response.
- block, err := n.p2pNode.GetBlock(*blockHash)
- if err != nil {
- return nil, er.Errorf("unable to get block from network: %v", err)
- }
-
- // For every transaction in the block, check which one matches
- // our request. If we find one that does, we can dispatch its
- // confirmation details.
- for i, tx := range block.Transactions() {
- if !confRequest.MatchesTx(tx.MsgTx()) {
- continue
- }
-
- return &chainntnfs.TxConfirmation{
- Tx: tx.MsgTx(),
- BlockHash: blockHash,
- BlockHeight: scanHeight,
- TxIndex: uint32(i),
- }, nil
- }
- }
-
- return nil, nil
-}
-
-// handleBlockConnected applies a chain update for a new block. Any watched
-// transactions included this block will processed to either send notifications
-// now or after numConfirmations confs.
-//
-// NOTE: This method must be called with the bestBlockMtx lock held.
-func (n *NeutrinoNotifier) handleBlockConnected(newBlock *filteredBlock) er.R {
- // We'll extend the txNotifier's height with the information of this new
- // block, which will handle all of the notification logic for us.
- err := n.txNotifier.ConnectTip(
- &newBlock.hash, newBlock.height, newBlock.txns,
- )
- if err != nil {
- return er.Errorf("unable to connect tip: %v", err)
- }
-
- log.Infof("New block: height=%v, sha=%v", newBlock.height,
- newBlock.hash)
-
- // Now that we've guaranteed the new block extends the txNotifier's
- // current tip, we'll proceed to dispatch notifications to all of our
- // registered clients whom have had notifications fulfilled. Before
- // doing so, we'll make sure update our in memory state in order to
- // satisfy any client requests based upon the new block.
- n.bestBlock.Hash = &newBlock.hash
- n.bestBlock.Height = int32(newBlock.height)
-
- n.notifyBlockEpochs(int32(newBlock.height), &newBlock.hash)
- return n.txNotifier.NotifyHeight(newBlock.height)
-}
-
-// getFilteredBlock is a utility to retrieve the full filtered block from a block epoch.
-func (n *NeutrinoNotifier) getFilteredBlock(epoch chainntnfs.BlockEpoch) (*filteredBlock, er.R) {
- rawBlock, err := n.p2pNode.GetBlock(*epoch.Hash)
- if err != nil {
- return nil, er.Errorf("unable to get block: %v", err)
- }
-
- txns := rawBlock.Transactions()
-
- block := &filteredBlock{
- hash: *epoch.Hash,
- height: uint32(epoch.Height),
- txns: txns,
- connect: true,
- }
- return block, nil
-}
-
-// notifyBlockEpochs notifies all registered block epoch clients of the newly
-// connected block to the main chain.
-func (n *NeutrinoNotifier) notifyBlockEpochs(newHeight int32, newSha *chainhash.Hash) {
- for _, client := range n.blockEpochClients {
- n.notifyBlockEpochClient(client, newHeight, newSha)
- }
-}
-
-// notifyBlockEpochClient sends a registered block epoch client a notification
-// about a specific block.
-func (n *NeutrinoNotifier) notifyBlockEpochClient(epochClient *blockEpochRegistration,
- height int32, sha *chainhash.Hash) {
-
- epoch := &chainntnfs.BlockEpoch{
- Height: height,
- Hash: sha,
- }
-
- select {
- case epochClient.epochQueue.ChanIn() <- epoch:
- case <-epochClient.cancelChan:
- case <-n.quit:
- }
-}
-
-// RegisterSpendNtfn registers an intent to be notified once the target
-// outpoint/output script has been spent by a transaction on-chain. When
-// intending to be notified of the spend of an output script, a nil outpoint
-// must be used. The heightHint should represent the earliest height in the
-// chain of the transaction that spent the outpoint/output script.
-//
-// Once a spend of has been detected, the details of the spending event will be
-// sent across the 'Spend' channel.
-func (n *NeutrinoNotifier) RegisterSpendNtfn(outpoint *wire.OutPoint,
- pkScript []byte, heightHint uint32) (*chainntnfs.SpendEvent, er.R) {
-
- // Register the conf notification with the TxNotifier. A non-nil value
- // for `dispatch` will be returned if we are required to perform a
- // manual scan for the confirmation. Otherwise the notifier will begin
- // watching at tip for the transaction to confirm.
- ntfn, err := n.txNotifier.RegisterSpend(outpoint, pkScript, heightHint)
- if err != nil {
- return nil, err
- }
-
- // To determine whether this outpoint has been spent on-chain, we'll
- // update our filter to watch for the transaction at tip and we'll also
- // dispatch a historical rescan to determine if it has been spent in the
- // past.
- //
- // We'll update our filter first to ensure we can immediately detect the
- // spend at tip.
- if outpoint == nil {
- outpoint = &chainntnfs.ZeroOutPoint
- }
- inputToWatch := neutrino.InputWithScript{
- OutPoint: *outpoint,
- PkScript: pkScript,
- }
- updateOptions := []neutrino.UpdateOption{
- neutrino.AddInputs(inputToWatch),
- neutrino.DisableDisconnectedNtfns(true),
- }
-
- // We'll use the txNotifier's tip as the starting point of our filter
- // update. In the case of an output script spend request, we'll check if
- // we should perform a historical rescan and start from there, as we
- // cannot do so with GetUtxo since it matches outpoints.
- rewindHeight := ntfn.Height
- if ntfn.HistoricalDispatch != nil && *outpoint == chainntnfs.ZeroOutPoint {
- rewindHeight = ntfn.HistoricalDispatch.StartHeight
- }
- updateOptions = append(updateOptions, neutrino.Rewind(rewindHeight))
-
- errChan := make(chan er.R, 1)
- select {
- case n.notificationRegistry <- &rescanFilterUpdate{
- updateOptions: updateOptions,
- errChan: errChan,
- }:
- case <-n.quit:
- return nil, chainntnfs.ErrChainNotifierShuttingDown.Default()
- }
-
- select {
- case err = <-errChan:
- case <-n.quit:
- return nil, chainntnfs.ErrChainNotifierShuttingDown.Default()
- }
- if err != nil {
- return nil, er.Errorf("unable to update filter: %v", err)
- }
-
- // If the txNotifier didn't return any details to perform a historical
- // scan of the chain, or if we already performed one like in the case of
- // output script spend requests, then we can return early as there's
- // nothing left for us to do.
- if ntfn.HistoricalDispatch == nil || *outpoint == chainntnfs.ZeroOutPoint {
- return ntfn.Event, nil
- }
-
- // With the filter updated, we'll dispatch our historical rescan to
- // ensure we detect the spend if it happened in the past.
- n.wg.Add(1)
- go func() {
- defer n.wg.Done()
-
- // We'll ensure that neutrino is caught up to the starting
- // height before we attempt to fetch the UTXO from the chain.
- // If we're behind, then we may miss a notification dispatch.
- for {
- n.bestBlockMtx.RLock()
- currentHeight := uint32(n.bestBlock.Height)
- n.bestBlockMtx.RUnlock()
-
- if currentHeight >= ntfn.HistoricalDispatch.StartHeight {
- break
- }
-
- select {
- case <-time.After(time.Millisecond * 200):
- case <-n.quit:
- return
- }
- }
-
- spendReport, err := n.p2pNode.GetUtxo(
- neutrino.WatchInputs(inputToWatch),
- neutrino.StartBlock(&waddrmgr.BlockStamp{
- Height: int32(ntfn.HistoricalDispatch.StartHeight),
- }),
- neutrino.EndBlock(&waddrmgr.BlockStamp{
- Height: int32(ntfn.HistoricalDispatch.EndHeight),
- }),
- neutrino.QuitChan(n.quit),
- )
- if err != nil && !strings.Contains(err.String(), "not found") {
- log.Errorf("Failed getting UTXO: %v", err)
- return
- }
-
- // If a spend report was returned, and the transaction is present, then
- // this means that the output is already spent.
- var spendDetails *chainntnfs.SpendDetail
- if spendReport != nil && spendReport.SpendingTx != nil {
- spendingTxHash := spendReport.SpendingTx.TxHash()
- spendDetails = &chainntnfs.SpendDetail{
- SpentOutPoint: outpoint,
- SpenderTxHash: &spendingTxHash,
- SpendingTx: spendReport.SpendingTx,
- SpenderInputIndex: spendReport.SpendingInputIndex,
- SpendingHeight: int32(spendReport.SpendingTxHeight),
- }
- }
-
- // Finally, no matter whether the rescan found a spend in the past or
- // not, we'll mark our historical rescan as complete to ensure the
- // outpoint's spend hint gets updated upon connected/disconnected
- // blocks.
- errr := n.txNotifier.UpdateSpendDetails(
- ntfn.HistoricalDispatch.SpendRequest, spendDetails,
- )
- if errr != nil {
- log.Errorf("Failed to update spend details: %v", errr)
- return
- }
- }()
-
- return ntfn.Event, nil
-}
-
-// RegisterConfirmationsNtfn registers an intent to be notified once the target
-// txid/output script has reached numConfs confirmations on-chain. When
-// intending to be notified of the confirmation of an output script, a nil txid
-// must be used. The heightHint should represent the earliest height at which
-// the txid/output script could have been included in the chain.
-//
-// Progress on the number of confirmations left can be read from the 'Updates'
-// channel. Once it has reached all of its confirmations, a notification will be
-// sent across the 'Confirmed' channel.
-func (n *NeutrinoNotifier) RegisterConfirmationsNtfn(txid *chainhash.Hash,
- pkScript []byte,
- numConfs, heightHint uint32) (*chainntnfs.ConfirmationEvent, er.R) {
-
- // Register the conf notification with the TxNotifier. A non-nil value
- // for `dispatch` will be returned if we are required to perform a
- // manual scan for the confirmation. Otherwise the notifier will begin
- // watching at tip for the transaction to confirm.
- ntfn, err := n.txNotifier.RegisterConf(
- txid, pkScript, numConfs, heightHint,
- )
- if err != nil {
- return nil, err
- }
-
- // To determine whether this transaction has confirmed on-chain, we'll
- // update our filter to watch for the transaction at tip and we'll also
- // dispatch a historical rescan to determine if it has confirmed in the
- // past.
- //
- // We'll update our filter first to ensure we can immediately detect the
- // confirmation at tip. To do so, we'll map the script into an address
- // type so we can instruct neutrino to match if the transaction
- // containing the script is found in a block.
- params := n.p2pNode.ChainParams()
- _, addrs, _, err := txscript.ExtractPkScriptAddrs(pkScript, ¶ms)
- if err != nil {
- return nil, er.Errorf("unable to extract script: %v", err)
- }
-
- // We'll send the filter update request to the notifier's main event
- // handler and wait for its response.
- errChan := make(chan er.R, 1)
- select {
- case n.notificationRegistry <- &rescanFilterUpdate{
- updateOptions: []neutrino.UpdateOption{
- neutrino.AddAddrs(addrs...),
- neutrino.Rewind(ntfn.Height),
- neutrino.DisableDisconnectedNtfns(true),
- },
- errChan: errChan,
- }:
- case <-n.quit:
- return nil, chainntnfs.ErrChainNotifierShuttingDown.Default()
- }
-
- select {
- case err = <-errChan:
- case <-n.quit:
- return nil, chainntnfs.ErrChainNotifierShuttingDown.Default()
- }
- if err != nil {
- return nil, er.Errorf("unable to update filter: %v", err)
- }
-
- // If a historical rescan was not requested by the txNotifier, then we
- // can return to the caller.
- if ntfn.HistoricalDispatch == nil {
- return ntfn.Event, nil
- }
-
- // Finally, with the filter updated, we can dispatch the historical
- // rescan to ensure we can detect if the event happened in the past.
- select {
- case n.notificationRegistry <- ntfn.HistoricalDispatch:
- case <-n.quit:
- return nil, chainntnfs.ErrChainNotifierShuttingDown.Default()
- }
-
- return ntfn.Event, nil
-}
-
-// blockEpochRegistration represents a client's intent to receive a
-// notification with each newly connected block.
-type blockEpochRegistration struct {
- epochID uint64
-
- epochChan chan *chainntnfs.BlockEpoch
-
- epochQueue *queue.ConcurrentQueue
-
- cancelChan chan struct{}
-
- bestBlock *chainntnfs.BlockEpoch
-
- errorChan chan er.R
-
- wg sync.WaitGroup
-}
-
-// epochCancel is a message sent to the NeutrinoNotifier when a client wishes
-// to cancel an outstanding epoch notification that has yet to be dispatched.
-type epochCancel struct {
- epochID uint64
-}
-
-// RegisterBlockEpochNtfn returns a BlockEpochEvent which subscribes the
-// caller to receive notifications, of each new block connected to the main
-// chain. Clients have the option of passing in their best known block, which
-// the notifier uses to check if they are behind on blocks and catch them up. If
-// they do not provide one, then a notification will be dispatched immediately
-// for the current tip of the chain upon a successful registration.
-func (n *NeutrinoNotifier) RegisterBlockEpochNtfn(
- bestBlock *chainntnfs.BlockEpoch) (*chainntnfs.BlockEpochEvent, er.R) {
-
- reg := &blockEpochRegistration{
- epochQueue: queue.NewConcurrentQueue(20),
- epochChan: make(chan *chainntnfs.BlockEpoch, 20),
- cancelChan: make(chan struct{}),
- epochID: atomic.AddUint64(&n.epochClientCounter, 1),
- bestBlock: bestBlock,
- errorChan: make(chan er.R, 1),
- }
- reg.epochQueue.Start()
-
- // Before we send the request to the main goroutine, we'll launch a new
- // goroutine to proxy items added to our queue to the client itself.
- // This ensures that all notifications are received *in order*.
- reg.wg.Add(1)
- go func() {
- defer reg.wg.Done()
-
- for {
- select {
- case ntfn := <-reg.epochQueue.ChanOut():
- blockNtfn := ntfn.(*chainntnfs.BlockEpoch)
- select {
- case reg.epochChan <- blockNtfn:
-
- case <-reg.cancelChan:
- return
-
- case <-n.quit:
- return
- }
-
- case <-reg.cancelChan:
- return
-
- case <-n.quit:
- return
- }
- }
- }()
-
- select {
- case <-n.quit:
- // As we're exiting before the registration could be sent,
- // we'll stop the queue now ourselves.
- reg.epochQueue.Stop()
-
- return nil, er.New("chainntnfs: system interrupt while " +
- "attempting to register for block epoch notification.")
- case n.notificationRegistry <- reg:
- return &chainntnfs.BlockEpochEvent{
- Epochs: reg.epochChan,
- Cancel: func() {
- cancel := &epochCancel{
- epochID: reg.epochID,
- }
-
- // Submit epoch cancellation to notification dispatcher.
- select {
- case n.notificationCancels <- cancel:
- // Cancellation is being handled, drain the epoch channel until it is
- // closed before yielding to caller.
- for {
- select {
- case _, ok := <-reg.epochChan:
- if !ok {
- return
- }
- case <-n.quit:
- return
- }
- }
- case <-n.quit:
- }
- },
- }, nil
- }
-}
-
-// NeutrinoChainConn is a wrapper around neutrino's chain backend in order
-// to satisfy the chainntnfs.ChainConn interface.
-type NeutrinoChainConn struct {
- p2pNode *neutrino.ChainService
-}
-
-// GetBlockHeader returns the block header for a hash.
-func (n *NeutrinoChainConn) GetBlockHeader(blockHash *chainhash.Hash) (*wire.BlockHeader, er.R) {
- return n.p2pNode.GetBlockHeader(blockHash)
-}
-
-// GetBlockHeaderVerbose returns a verbose block header result for a hash. This
-// result only contains the height with a nil hash.
-func (n *NeutrinoChainConn) GetBlockHeaderVerbose(blockHash *chainhash.Hash) (
- *btcjson.GetBlockHeaderVerboseResult, er.R) {
-
- height, err := n.p2pNode.GetBlockHeight(blockHash)
- if err != nil {
- return nil, err
- }
- // Since only the height is used from the result, leave the hash nil.
- return &btcjson.GetBlockHeaderVerboseResult{Height: int32(height)}, nil
-}
-
-// GetBlockHash returns the hash from a block height.
-func (n *NeutrinoChainConn) GetBlockHash(blockHeight int64) (*chainhash.Hash, er.R) {
- return n.p2pNode.GetBlockHash(blockHeight)
-}
diff --git a/lnd/chainntnfs/neutrinonotify/neutrino_dev.go b/lnd/chainntnfs/neutrinonotify/neutrino_dev.go
deleted file mode 100644
index fa987def..00000000
--- a/lnd/chainntnfs/neutrinonotify/neutrino_dev.go
+++ /dev/null
@@ -1,104 +0,0 @@
-// +build dev
-
-package neutrinonotify
-
-import (
- "time"
-
- "github.com/pkt-cash/pktd/btcutil/er"
- "github.com/pkt-cash/pktd/chaincfg/chainhash"
- "github.com/pkt-cash/pktd/lnd/chainntnfs"
- "github.com/pkt-cash/pktd/neutrino"
- "github.com/pkt-cash/pktd/rpcclient"
-)
-
-// UnsafeStart starts the notifier with a specified best height and optional
-// best hash. Its bestHeight, txNotifier and neutrino node are initialized with
-// bestHeight. The parameter generateBlocks is necessary for the bitcoind
-// notifier to ensure we drain all notifications up to syncHeight, since if they
-// are generated ahead of UnsafeStart the chainConn may start up with an
-// outdated best block and miss sending ntfns. Used for testing.
-func (n *NeutrinoNotifier) UnsafeStart(bestHeight int32,
- bestHash *chainhash.Hash, syncHeight int32,
- generateBlocks func() er.R) er.R {
-
- // We'll obtain the latest block height of the p2p node. We'll
- // start the auto-rescan from this point. Once a caller actually wishes
- // to register a chain view, the rescan state will be rewound
- // accordingly.
- startingPoint, err := n.p2pNode.BestBlock()
- if err != nil {
- return err
- }
-
- // Next, we'll create our set of rescan options. Currently it's
- // required that a user MUST set an addr/outpoint/txid when creating a
- // rescan. To get around this, we'll add a "zero" outpoint, that won't
- // actually be matched.
- var zeroInput neutrino.InputWithScript
- rescanOptions := []neutrino.RescanOption{
- neutrino.StartBlock(startingPoint),
- neutrino.QuitChan(n.quit),
- neutrino.NotificationHandlers(
- rpcclient.NotificationHandlers{
- OnFilteredBlockConnected: n.onFilteredBlockConnected,
- OnFilteredBlockDisconnected: n.onFilteredBlockDisconnected,
- },
- ),
- neutrino.WatchInputs(zeroInput),
- }
-
- n.txNotifier = chainntnfs.NewTxNotifier(
- uint32(bestHeight), chainntnfs.ReorgSafetyLimit,
- n.confirmHintCache, n.spendHintCache,
- )
-
- // Finally, we'll create our rescan struct, start it, and launch all
- // the goroutines we need to operate this ChainNotifier instance.
- n.chainView = neutrino.NewRescan(
- &neutrino.RescanChainSource{
- ChainService: n.p2pNode,
- },
- rescanOptions...,
- )
- n.rescanErr = n.chainView.Start()
-
- n.chainUpdates.Start()
- n.txUpdates.Start()
-
- if generateBlocks != nil {
- // Ensure no block notifications are pending when we start the
- // notification dispatcher goroutine.
-
- // First generate the blocks, then drain the notifications
- // for the generated blocks.
- if err := generateBlocks(); err != nil {
- return err
- }
-
- timeout := time.After(60 * time.Second)
- loop:
- for {
- select {
- case ntfn := <-n.chainUpdates.ChanOut():
- lastReceivedNtfn := ntfn.(*filteredBlock)
- if lastReceivedNtfn.height >= uint32(syncHeight) {
- break loop
- }
- case <-timeout:
- return er.Errorf("unable to catch up to height %d",
- syncHeight)
- }
- }
- }
-
- // Run notificationDispatcher after setting the notifier's best height
- // to avoid a race condition.
- n.bestBlock.Hash = bestHash
- n.bestBlock.Height = bestHeight
-
- n.wg.Add(1)
- go n.notificationDispatcher()
-
- return nil
-}
diff --git a/lnd/chainntnfs/test_utils.go b/lnd/chainntnfs/test_utils.go
deleted file mode 100644
index 33be12ad..00000000
--- a/lnd/chainntnfs/test_utils.go
+++ /dev/null
@@ -1,233 +0,0 @@
-// +build dev
-
-package chainntnfs
-
-import (
- "fmt"
- "io/ioutil"
- "os"
- "path/filepath"
- "testing"
- "time"
-
- "github.com/pkt-cash/pktd/btcec"
- "github.com/pkt-cash/pktd/btcjson"
- "github.com/pkt-cash/pktd/btcutil"
- "github.com/pkt-cash/pktd/btcutil/er"
- "github.com/pkt-cash/pktd/chaincfg"
- "github.com/pkt-cash/pktd/chaincfg/chainhash"
- "github.com/pkt-cash/pktd/integration/rpctest"
- "github.com/pkt-cash/pktd/neutrino"
- "github.com/pkt-cash/pktd/pktwallet/walletdb"
- "github.com/pkt-cash/pktd/txscript"
- "github.com/pkt-cash/pktd/txscript/params"
- "github.com/pkt-cash/pktd/wire"
-)
-
-var (
- // TrickleInterval is the interval at which the miner should trickle
- // transactions to its peers. We'll set it small to ensure the miner
- // propagates transactions quickly in the tests.
- TrickleInterval = 10 * time.Millisecond
-)
-
-var (
- NetParams = &chaincfg.RegressionNetParams
-)
-
-// randPubKeyHashScript generates a P2PKH script that pays to the public key of
-// a randomly-generated private key.
-func randPubKeyHashScript() ([]byte, *btcec.PrivateKey, er.R) {
- privKey, err := btcec.NewPrivateKey(btcec.S256())
- if err != nil {
- return nil, nil, err
- }
-
- pubKeyHash := btcutil.Hash160(privKey.PubKey().SerializeCompressed())
- addrScript, err := btcutil.NewAddressPubKeyHash(pubKeyHash, NetParams)
- if err != nil {
- return nil, nil, err
- }
-
- pkScript, err := txscript.PayToAddrScript(addrScript)
- if err != nil {
- return nil, nil, err
- }
-
- return pkScript, privKey, nil
-}
-
-// GetTestTxidAndScript generate a new test transaction and returns its txid and
-// the script of the output being generated.
-func GetTestTxidAndScript(h *rpctest.Harness) (*chainhash.Hash, []byte, er.R) {
- pkScript, _, err := randPubKeyHashScript()
- if err != nil {
- return nil, nil, er.Errorf("unable to generate pkScript: %v", err)
- }
- output := &wire.TxOut{Value: 2e8, PkScript: pkScript}
- txid, err := h.SendOutputs([]*wire.TxOut{output}, 10)
- if err != nil {
- return nil, nil, err
- }
-
- return txid, pkScript, nil
-}
-
-// WaitForMempoolTx waits for the txid to be seen in the miner's mempool.
-func WaitForMempoolTx(miner *rpctest.Harness, txid *chainhash.Hash) er.R {
- timeout := time.After(10 * time.Second)
- trickle := time.After(2 * TrickleInterval)
- for {
- // Check for the harness' knowledge of the txid.
- tx, err := miner.Node.GetRawTransaction(txid)
- if err != nil {
- if btcjson.ErrRPCNoTxInfo.Is(err) {
- continue
- }
- return err
- }
-
- if tx != nil && tx.Hash().IsEqual(txid) {
- break
- }
-
- select {
- case <-time.After(100 * time.Millisecond):
- case <-timeout:
- return er.New("timed out waiting for tx")
- }
- }
-
- // To ensure any transactions propagate from the miner to the peers
- // before returning, ensure we have waited for at least
- // 2*trickleInterval before returning.
- select {
- case <-trickle:
- case <-timeout:
- return er.New("timeout waiting for trickle interval. " +
- "Trickle interval to large?")
- }
-
- return nil
-}
-
-// CreateSpendableOutput creates and returns an output that can be spent later
-// on.
-func CreateSpendableOutput(t *testing.T,
- miner *rpctest.Harness) (*wire.OutPoint, *wire.TxOut, *btcec.PrivateKey) {
-
- t.Helper()
-
- // Create a transaction that only has one output, the one destined for
- // the recipient.
- pkScript, privKey, err := randPubKeyHashScript()
- if err != nil {
- t.Fatalf("unable to generate pkScript: %v", err)
- }
- output := &wire.TxOut{Value: 2e8, PkScript: pkScript}
- txid, err := miner.SendOutputsWithoutChange([]*wire.TxOut{output}, 10)
- if err != nil {
- t.Fatalf("unable to create tx: %v", err)
- }
-
- // Mine the transaction to mark the output as spendable.
- if err := WaitForMempoolTx(miner, txid); err != nil {
- t.Fatalf("tx not relayed to miner: %v", err)
- }
- if _, err := miner.Node.Generate(1); err != nil {
- t.Fatalf("unable to generate single block: %v", err)
- }
-
- return wire.NewOutPoint(txid, 0), output, privKey
-}
-
-// CreateSpendTx creates a transaction spending the specified output.
-func CreateSpendTx(t *testing.T, prevOutPoint *wire.OutPoint,
- prevOutput *wire.TxOut, privKey *btcec.PrivateKey) *wire.MsgTx {
-
- t.Helper()
-
- spendingTx := wire.NewMsgTx(1)
- spendingTx.AddTxIn(&wire.TxIn{PreviousOutPoint: *prevOutPoint})
- spendingTx.AddTxOut(&wire.TxOut{Value: 1e8, PkScript: prevOutput.PkScript})
-
- sigScript, err := txscript.SignatureScript(
- spendingTx, 0, prevOutput.PkScript, params.SigHashAll,
- privKey, true,
- )
- if err != nil {
- t.Fatalf("unable to sign tx: %v", err)
- }
- spendingTx.TxIn[0].SignatureScript = sigScript
-
- return spendingTx
-}
-
-// NewMiner spawns testing harness backed by a btcd node that can serve as a
-// miner.
-func NewMiner(t *testing.T, extraArgs []string, createChain bool,
- spendableOutputs uint32) (*rpctest.Harness, func()) {
-
- t.Helper()
-
- // Add the trickle interval argument to the extra args.
- trickle := fmt.Sprintf("--trickleinterval=%v", TrickleInterval)
- extraArgs = append(extraArgs, trickle, "--tls")
-
- node, err := rpctest.New(NetParams, nil, extraArgs)
- if err != nil {
- t.Fatalf("unable to create backend node: %v", err)
- }
- if err := node.SetUp(createChain, spendableOutputs); err != nil {
- node.TearDown()
- t.Fatalf("unable to set up backend node: %v", err)
- }
-
- return node, func() { node.TearDown() }
-}
-
-// NewNeutrinoBackend spawns a new neutrino node that connects to a miner at
-// the specified address.
-func NewNeutrinoBackend(t *testing.T, minerAddr string) (*neutrino.ChainService, func()) {
- t.Helper()
-
- spvDir, errr := ioutil.TempDir("", "neutrino")
- if errr != nil {
- t.Fatalf("unable to create temp dir: %v", errr)
- }
-
- dbName := filepath.Join(spvDir, "neutrino.db")
- spvDatabase, err := walletdb.Create("bdb", dbName, true)
- if err != nil {
- os.RemoveAll(spvDir)
- t.Fatalf("unable to create walletdb: %v", err)
- }
-
- // Create an instance of neutrino connected to the running btcd
- // instance.
- spvConfig := neutrino.Config{
- DataDir: spvDir,
- Database: spvDatabase,
- ChainParams: *NetParams,
- ConnectPeers: []string{minerAddr},
- }
- spvNode, err := neutrino.NewChainService(spvConfig)
- if err != nil {
- os.RemoveAll(spvDir)
- spvDatabase.Close()
- t.Fatalf("unable to create neutrino: %v", err)
- }
-
- // We'll also wait for the instance to sync up fully to the chain
- // generated by the btcd instance.
- spvNode.Start()
- for !spvNode.IsCurrent() {
- time.Sleep(time.Millisecond * 100)
- }
-
- return spvNode, func() {
- spvNode.Stop()
- spvDatabase.Close()
- os.RemoveAll(spvDir)
- }
-}
diff --git a/lnd/chainntnfs/txnotifier.go b/lnd/chainntnfs/txnotifier.go
deleted file mode 100644
index b339c9cb..00000000
--- a/lnd/chainntnfs/txnotifier.go
+++ /dev/null
@@ -1,1979 +0,0 @@
-package chainntnfs
-
-import (
- "bytes"
- "fmt"
- "sync"
- "sync/atomic"
-
- "github.com/pkt-cash/pktd/btcutil"
- "github.com/pkt-cash/pktd/btcutil/er"
- "github.com/pkt-cash/pktd/chaincfg/chainhash"
- "github.com/pkt-cash/pktd/lnd/channeldb"
- "github.com/pkt-cash/pktd/pktlog/log"
- "github.com/pkt-cash/pktd/txscript"
- "github.com/pkt-cash/pktd/wire"
-)
-
-const (
- // ReorgSafetyLimit is the chain depth beyond which it is assumed a
- // block will not be reorganized out of the chain. This is used to
- // determine when to prune old confirmation requests so that reorgs are
- // handled correctly. The average number of blocks in a day is a
- // reasonable value to use.
- ReorgSafetyLimit = 144
-
- // MaxNumConfs is the maximum number of confirmations that can be
- // requested on a transaction.
- MaxNumConfs = ReorgSafetyLimit
-)
-
-var (
- // ZeroHash is the value that should be used as the txid when
- // registering for the confirmation of a script on-chain. This allows
- // the notifier to match _and_ dispatch upon the inclusion of the script
- // on-chain, rather than the txid.
- ZeroHash chainhash.Hash
-
- // ZeroOutPoint is the value that should be used as the outpoint when
- // registering for the spend of a script on-chain. This allows the
- // notifier to match _and_ dispatch upon detecting the spend of the
- // script on-chain, rather than the outpoint.
- ZeroOutPoint wire.OutPoint
-)
-
-var (
- // ErrTxNotifierExiting is an error returned when attempting to interact
- // with the TxNotifier but it been shut down.
- ErrTxNotifierExiting = Err.CodeWithDetail("ErrTxNotifierExiting", "TxNotifier is exiting")
-
- // ErrNoScript is an error returned when a confirmation/spend
- // registration is attempted without providing an accompanying output
- // script.
- ErrNoScript = Err.CodeWithDetail("ErrNoScript", "an output script must be provided")
-
- // ErrNoHeightHint is an error returned when a confirmation/spend
- // registration is attempted without providing an accompanying height
- // hint.
- ErrNoHeightHint = Err.CodeWithDetail("ErrNoHeightHint",
- "a height hint greater than 0 must be provided")
-
- // ErrNumConfsOutOfRange is an error returned when a confirmation/spend
- // registration is attempted and the number of confirmations provided is
- // out of range.
- ErrNumConfsOutOfRange = Err.CodeWithDetail("ErrNumConfsOutOfRange",
- fmt.Sprintf("number of confirmations must be "+
- "between %d and %d", 1, MaxNumConfs))
-)
-
-// rescanState indicates the progression of a registration before the notifier
-// can begin dispatching confirmations at tip.
-type rescanState byte
-
-const (
- // rescanNotStarted is the initial state, denoting that a historical
- // dispatch may be required.
- rescanNotStarted rescanState = iota
-
- // rescanPending indicates that a dispatch has already been made, and we
- // are waiting for its completion. No other rescans should be dispatched
- // while in this state.
- rescanPending
-
- // rescanComplete signals either that a rescan was dispatched and has
- // completed, or that we began watching at tip immediately. In either
- // case, the notifier can only dispatch notifications from tip when in
- // this state.
- rescanComplete
-)
-
-// confNtfnSet holds all known, registered confirmation notifications for a
-// txid/output script. If duplicates notifications are requested, only one
-// historical dispatch will be spawned to ensure redundant scans are not
-// permitted. A single conf detail will be constructed and dispatched to all
-// interested
-// clients.
-type confNtfnSet struct {
- // ntfns keeps tracks of all the active client notification requests for
- // a transaction/output script
- ntfns map[uint64]*ConfNtfn
-
- // rescanStatus represents the current rescan state for the
- // transaction/output script.
- rescanStatus rescanState
-
- // details serves as a cache of the confirmation details of a
- // transaction that we'll use to determine if a transaction/output
- // script has already confirmed at the time of registration.
- // details is also used to make sure that in case of an address reuse
- // (funds sent to a previously confirmed script) no additional
- // notification is registered which would lead to an inconsistent state.
- details *TxConfirmation
-}
-
-// newConfNtfnSet constructs a fresh confNtfnSet for a group of clients
-// interested in a notification for a particular txid.
-func newConfNtfnSet() *confNtfnSet {
- return &confNtfnSet{
- ntfns: make(map[uint64]*ConfNtfn),
- rescanStatus: rescanNotStarted,
- }
-}
-
-// spendNtfnSet holds all known, registered spend notifications for a spend
-// request (outpoint/output script). If duplicate notifications are requested,
-// only one historical dispatch will be spawned to ensure redundant scans are
-// not permitted.
-type spendNtfnSet struct {
- // ntfns keeps tracks of all the active client notification requests for
- // an outpoint/output script.
- ntfns map[uint64]*SpendNtfn
-
- // rescanStatus represents the current rescan state for the spend
- // request (outpoint/output script).
- rescanStatus rescanState
-
- // details serves as a cache of the spend details for an outpoint/output
- // script that we'll use to determine if it has already been spent at
- // the time of registration.
- details *SpendDetail
-}
-
-// newSpendNtfnSet constructs a new spend notification set.
-func newSpendNtfnSet() *spendNtfnSet {
- return &spendNtfnSet{
- ntfns: make(map[uint64]*SpendNtfn),
- rescanStatus: rescanNotStarted,
- }
-}
-
-// ConfRequest encapsulates a request for a confirmation notification of either
-// a txid or output script.
-type ConfRequest struct {
- // TxID is the hash of the transaction for which confirmation
- // notifications are requested. If set to a zero hash, then a
- // confirmation notification will be dispatched upon inclusion of the
- // _script_, rather than the txid.
- TxID chainhash.Hash
-
- // PkScript is the public key script of an outpoint created in this
- // transaction.
- PkScript txscript.PkScript
-}
-
-// NewConfRequest creates a request for a confirmation notification of either a
-// txid or output script. A nil txid or an allocated ZeroHash can be used to
-// dispatch the confirmation notification on the script.
-func NewConfRequest(txid *chainhash.Hash, pkScript []byte) (ConfRequest, er.R) {
- var r ConfRequest
- outputScript, err := txscript.ParsePkScript(pkScript)
- if err != nil {
- return r, err
- }
-
- // We'll only set a txid for which we'll dispatch a confirmation
- // notification on this request if one was provided. Otherwise, we'll
- // default to dispatching on the confirmation of the script instead.
- if txid != nil {
- r.TxID = *txid
- }
- r.PkScript = outputScript
-
- return r, nil
-}
-
-// String returns the string representation of the ConfRequest.
-func (r ConfRequest) String() string {
- if r.TxID != ZeroHash {
- return fmt.Sprintf("txid=%v", r.TxID)
- }
- return fmt.Sprintf("script=%v", r.PkScript)
-}
-
-// ConfHintKey returns the key that will be used to index the confirmation
-// request's hint within the height hint cache.
-func (r ConfRequest) ConfHintKey() ([]byte, er.R) {
- if r.TxID == ZeroHash {
- return r.PkScript.Script(), nil
- }
-
- var txid bytes.Buffer
- if err := channeldb.WriteElement(&txid, r.TxID); err != nil {
- return nil, err
- }
-
- return txid.Bytes(), nil
-}
-
-// MatchesTx determines whether the given transaction satisfies the confirmation
-// request. If the confirmation request is for a script, then we'll check all of
-// the outputs of the transaction to determine if it matches. Otherwise, we'll
-// match on the txid.
-func (r ConfRequest) MatchesTx(tx *wire.MsgTx) bool {
- scriptMatches := func() bool {
- pkScript := r.PkScript.Script()
- for _, txOut := range tx.TxOut {
- if bytes.Equal(txOut.PkScript, pkScript) {
- return true
- }
- }
-
- return false
- }
-
- if r.TxID != ZeroHash {
- return r.TxID == tx.TxHash() && scriptMatches()
- }
-
- return scriptMatches()
-}
-
-// ConfNtfn represents a notifier client's request to receive a notification
-// once the target transaction/output script gets sufficient confirmations. The
-// client is asynchronously notified via the ConfirmationEvent channels.
-type ConfNtfn struct {
- // ConfID uniquely identifies the confirmation notification request for
- // the specified transaction/output script.
- ConfID uint64
-
- // ConfRequest represents either the txid or script we should detect
- // inclusion of within the chain.
- ConfRequest
-
- // NumConfirmations is the number of confirmations after which the
- // notification is to be sent.
- NumConfirmations uint32
-
- // Event contains references to the channels that the notifications are to
- // be sent over.
- Event *ConfirmationEvent
-
- // HeightHint is the minimum height in the chain that we expect to find
- // this txid.
- HeightHint uint32
-
- // dispatched is false if the confirmed notification has not been sent yet.
- dispatched bool
-}
-
-// HistoricalConfDispatch parameterizes a manual rescan for a particular
-// transaction/output script. The parameters include the start and end block
-// heights specifying the range of blocks to scan.
-type HistoricalConfDispatch struct {
- // ConfRequest represents either the txid or script we should detect
- // inclusion of within the chain.
- ConfRequest
-
- // StartHeight specifies the block height at which to begin the
- // historical rescan.
- StartHeight uint32
-
- // EndHeight specifies the last block height (inclusive) that the
- // historical scan should consider.
- EndHeight uint32
-}
-
-// ConfRegistration encompasses all of the information required for callers to
-// retrieve details about a confirmation event.
-type ConfRegistration struct {
- // Event contains references to the channels that the notifications are
- // to be sent over.
- Event *ConfirmationEvent
-
- // HistoricalDispatch, if non-nil, signals to the client who registered
- // the notification that they are responsible for attempting to manually
- // rescan blocks for the txid/output script between the start and end
- // heights.
- HistoricalDispatch *HistoricalConfDispatch
-
- // Height is the height of the TxNotifier at the time the confirmation
- // notification was registered. This can be used so that backends can
- // request to be notified of confirmations from this point forwards.
- Height uint32
-}
-
-// SpendRequest encapsulates a request for a spend notification of either an
-// outpoint or output script.
-type SpendRequest struct {
- // OutPoint is the outpoint for which a client has requested a spend
- // notification for. If set to a zero outpoint, then a spend
- // notification will be dispatched upon detecting the spend of the
- // _script_, rather than the outpoint.
- OutPoint wire.OutPoint
-
- // PkScript is the script of the outpoint. If a zero outpoint is set,
- // then this can be an arbitrary script.
- PkScript txscript.PkScript
-}
-
-// NewSpendRequest creates a request for a spend notification of either an
-// outpoint or output script. A nil outpoint or an allocated ZeroOutPoint can be
-// used to dispatch the confirmation notification on the script.
-func NewSpendRequest(op *wire.OutPoint, pkScript []byte) (SpendRequest, er.R) {
- var r SpendRequest
- outputScript, err := txscript.ParsePkScript(pkScript)
- if err != nil {
- return r, err
- }
-
- // We'll only set an outpoint for which we'll dispatch a spend
- // notification on this request if one was provided. Otherwise, we'll
- // default to dispatching on the spend of the script instead.
- if op != nil {
- r.OutPoint = *op
- }
- r.PkScript = outputScript
-
- return r, nil
-}
-
-// String returns the string representation of the SpendRequest.
-func (r SpendRequest) String() string {
- if r.OutPoint != ZeroOutPoint {
- return fmt.Sprintf("outpoint=%v, script=%v", r.OutPoint,
- r.PkScript)
- }
- return fmt.Sprintf("outpoint=, script=%v", r.PkScript)
-}
-
-// SpendHintKey returns the key that will be used to index the spend request's
-// hint within the height hint cache.
-func (r SpendRequest) SpendHintKey() ([]byte, er.R) {
- if r.OutPoint == ZeroOutPoint {
- return r.PkScript.Script(), nil
- }
-
- var outpoint bytes.Buffer
- err := channeldb.WriteElement(&outpoint, r.OutPoint)
- if err != nil {
- return nil, err
- }
-
- return outpoint.Bytes(), nil
-}
-
-// MatchesTx determines whether the given transaction satisfies the spend
-// request. If the spend request is for an outpoint, then we'll check all of
-// the outputs being spent by the inputs of the transaction to determine if it
-// matches. Otherwise, we'll need to match on the output script being spent, so
-// we'll recompute it for each input of the transaction to determine if it
-// matches.
-func (r SpendRequest) MatchesTx(tx *wire.MsgTx) (bool, uint32, er.R) {
- if r.OutPoint != ZeroOutPoint {
- for i, txIn := range tx.TxIn {
- if txIn.PreviousOutPoint == r.OutPoint {
- return true, uint32(i), nil
- }
- }
-
- return false, 0, nil
- }
-
- for i, txIn := range tx.TxIn {
- pkScript, err := txscript.ComputePkScript(
- txIn.SignatureScript, txIn.Witness,
- )
- if txscript.ErrUnsupportedScriptType.Is(err) {
- continue
- }
- if err != nil {
- return false, 0, err
- }
-
- if bytes.Equal(pkScript.Script(), r.PkScript.Script()) {
- return true, uint32(i), nil
- }
- }
-
- return false, 0, nil
-}
-
-// SpendNtfn represents a client's request to receive a notification once an
-// outpoint/output script has been spent on-chain. The client is asynchronously
-// notified via the SpendEvent channels.
-type SpendNtfn struct {
- // SpendID uniquely identies the spend notification request for the
- // specified outpoint/output script.
- SpendID uint64
-
- // SpendRequest represents either the outpoint or script we should
- // detect the spend of.
- SpendRequest
-
- // Event contains references to the channels that the notifications are
- // to be sent over.
- Event *SpendEvent
-
- // HeightHint is the earliest height in the chain that we expect to find
- // the spending transaction of the specified outpoint/output script.
- // This value will be overridden by the spend hint cache if it contains
- // an entry for it.
- HeightHint uint32
-
- // dispatched signals whether a spend notification has been disptached
- // to the client.
- dispatched bool
-}
-
-// HistoricalSpendDispatch parameterizes a manual rescan to determine the
-// spending details (if any) of an outpoint/output script. The parameters
-// include the start and end block heights specifying the range of blocks to
-// scan.
-type HistoricalSpendDispatch struct {
- // SpendRequest represents either the outpoint or script we should
- // detect the spend of.
- SpendRequest
-
- // StartHeight specified the block height at which to begin the
- // historical rescan.
- StartHeight uint32
-
- // EndHeight specifies the last block height (inclusive) that the
- // historical rescan should consider.
- EndHeight uint32
-}
-
-// SpendRegistration encompasses all of the information required for callers to
-// retrieve details about a spend event.
-type SpendRegistration struct {
- // Event contains references to the channels that the notifications are
- // to be sent over.
- Event *SpendEvent
-
- // HistoricalDispatch, if non-nil, signals to the client who registered
- // the notification that they are responsible for attempting to manually
- // rescan blocks for the txid/output script between the start and end
- // heights.
- HistoricalDispatch *HistoricalSpendDispatch
-
- // Height is the height of the TxNotifier at the time the spend
- // notification was registered. This can be used so that backends can
- // request to be notified of spends from this point forwards.
- Height uint32
-}
-
-// TxNotifier is a struct responsible for delivering transaction notifications
-// to subscribers. These notifications can be of two different types:
-// transaction/output script confirmations and/or outpoint/output script spends.
-// The TxNotifier will watch the blockchain as new blocks come in, in order to
-// satisfy its client requests.
-type TxNotifier struct {
- confClientCounter uint64 // To be used atomically.
- spendClientCounter uint64 // To be used atomically.
-
- // currentHeight is the height of the tracked blockchain. It is used to
- // determine the number of confirmations a tx has and ensure blocks are
- // connected and disconnected in order.
- currentHeight uint32
-
- // reorgSafetyLimit is the chain depth beyond which it is assumed a
- // block will not be reorganized out of the chain. This is used to
- // determine when to prune old notification requests so that reorgs are
- // handled correctly. The coinbase maturity period is a reasonable value
- // to use.
- reorgSafetyLimit uint32
-
- // reorgDepth is the depth of a chain organization that this system is
- // being informed of. This is incremented as long as a sequence of
- // blocks are disconnected without being interrupted by a new block.
- reorgDepth uint32
-
- // confNotifications is an index of confirmation notification requests
- // by transaction hash/output script.
- confNotifications map[ConfRequest]*confNtfnSet
-
- // confsByInitialHeight is an index of watched transactions/output
- // scripts by the height that they are included at in the chain. This
- // is tracked so that incorrect notifications are not sent if a
- // transaction/output script is reorged out of the chain and so that
- // negative confirmations can be recognized.
- confsByInitialHeight map[uint32]map[ConfRequest]struct{}
-
- // ntfnsByConfirmHeight is an index of notification requests by the
- // height at which the transaction/output script will have sufficient
- // confirmations.
- ntfnsByConfirmHeight map[uint32]map[*ConfNtfn]struct{}
-
- // spendNotifications is an index of all active notification requests
- // per outpoint/output script.
- spendNotifications map[SpendRequest]*spendNtfnSet
-
- // spendsByHeight is an index that keeps tracks of the spending height
- // of outpoints/output scripts we are currently tracking notifications
- // for. This is used in order to recover from spending transactions
- // being reorged out of the chain.
- spendsByHeight map[uint32]map[SpendRequest]struct{}
-
- // confirmHintCache is a cache used to maintain the latest height hints
- // for transactions/output scripts. Each height hint represents the
- // earliest height at which they scripts could have been confirmed
- // within the chain.
- confirmHintCache ConfirmHintCache
-
- // spendHintCache is a cache used to maintain the latest height hints
- // for outpoints/output scripts. Each height hint represents the
- // earliest height at which they could have been spent within the chain.
- spendHintCache SpendHintCache
-
- // quit is closed in order to signal that the notifier is gracefully
- // exiting.
- quit chan struct{}
-
- sync.Mutex
-}
-
-// NewTxNotifier creates a TxNotifier. The current height of the blockchain is
-// accepted as a parameter. The different hint caches (confirm and spend) are
-// used as an optimization in order to retrieve a better starting point when
-// dispatching a recan for a historical event in the chain.
-func NewTxNotifier(startHeight uint32, reorgSafetyLimit uint32,
- confirmHintCache ConfirmHintCache,
- spendHintCache SpendHintCache) *TxNotifier {
-
- return &TxNotifier{
- currentHeight: startHeight,
- reorgSafetyLimit: reorgSafetyLimit,
- confNotifications: make(map[ConfRequest]*confNtfnSet),
- confsByInitialHeight: make(map[uint32]map[ConfRequest]struct{}),
- ntfnsByConfirmHeight: make(map[uint32]map[*ConfNtfn]struct{}),
- spendNotifications: make(map[SpendRequest]*spendNtfnSet),
- spendsByHeight: make(map[uint32]map[SpendRequest]struct{}),
- confirmHintCache: confirmHintCache,
- spendHintCache: spendHintCache,
- quit: make(chan struct{}),
- }
-}
-
-// newConfNtfn validates all of the parameters required to successfully create
-// and register a confirmation notification.
-func (n *TxNotifier) newConfNtfn(txid *chainhash.Hash,
- pkScript []byte, numConfs, heightHint uint32) (*ConfNtfn, er.R) {
-
- // An accompanying output script must always be provided.
- if len(pkScript) == 0 {
- return nil, ErrNoScript.Default()
- }
-
- // Enforce that we will not dispatch confirmations beyond the reorg
- // safety limit.
- if numConfs == 0 || numConfs > n.reorgSafetyLimit {
- return nil, ErrNumConfsOutOfRange.Default()
- }
-
- // A height hint must be provided to prevent scanning from the genesis
- // block.
- if heightHint == 0 {
- return nil, ErrNoHeightHint.Default()
- }
-
- // Ensure the output script is of a supported type.
- confRequest, err := NewConfRequest(txid, pkScript)
- if err != nil {
- return nil, err
- }
-
- confID := atomic.AddUint64(&n.confClientCounter, 1)
- return &ConfNtfn{
- ConfID: confID,
- ConfRequest: confRequest,
- NumConfirmations: numConfs,
- Event: NewConfirmationEvent(numConfs, func() {
- n.CancelConf(confRequest, confID)
- }),
- HeightHint: heightHint,
- }, nil
-}
-
-// RegisterConf handles a new confirmation notification request. The client will
-// be notified when the transaction/output script gets a sufficient number of
-// confirmations in the blockchain.
-//
-// NOTE: If the transaction/output script has already been included in a block
-// on the chain, the confirmation details must be provided with the
-// UpdateConfDetails method, otherwise we will wait for the transaction/output
-// script to confirm even though it already has.
-func (n *TxNotifier) RegisterConf(txid *chainhash.Hash, pkScript []byte,
- numConfs, heightHint uint32) (*ConfRegistration, er.R) {
-
- select {
- case <-n.quit:
- return nil, ErrTxNotifierExiting.Default()
- default:
- }
-
- // We'll start by performing a series of validation checks.
- ntfn, err := n.newConfNtfn(txid, pkScript, numConfs, heightHint)
- if err != nil {
- return nil, err
- }
-
- // Before proceeding to register the notification, we'll query our
- // height hint cache to determine whether a better one exists.
- //
- // TODO(conner): verify that all submitted height hints are identical.
- startHeight := ntfn.HeightHint
- hint, err := n.confirmHintCache.QueryConfirmHint(ntfn.ConfRequest)
- if err == nil {
- if hint > startHeight {
- log.Debugf("Using height hint %d retrieved from cache "+
- "for %v instead of %d", hint, ntfn.ConfRequest,
- startHeight)
- startHeight = hint
- }
- } else if !ErrConfirmHintNotFound.Is(err) {
- log.Errorf("Unable to query confirm hint for %v: %v",
- ntfn.ConfRequest, err)
- }
-
- log.Infof("New confirmation subscription: conf_id=%d, %v, "+
- "num_confs=%v height_hint=%d", ntfn.ConfID, ntfn.ConfRequest,
- numConfs, startHeight)
-
- n.Lock()
- defer n.Unlock()
-
- confSet, ok := n.confNotifications[ntfn.ConfRequest]
- if !ok {
- // If this is the first registration for this request, construct
- // a confSet to coalesce all notifications for the same request.
- confSet = newConfNtfnSet()
- n.confNotifications[ntfn.ConfRequest] = confSet
- }
- confSet.ntfns[ntfn.ConfID] = ntfn
-
- switch confSet.rescanStatus {
-
- // A prior rescan has already completed and we are actively watching at
- // tip for this request.
- case rescanComplete:
- // If the confirmation details for this set of notifications has
- // already been found, we'll attempt to deliver them immediately
- // to this client.
- log.Debugf("Attempting to dispatch confirmation for %v on "+
- "registration since rescan has finished",
- ntfn.ConfRequest)
-
- err := n.dispatchConfDetails(ntfn, confSet.details)
- if err != nil {
- return nil, err
- }
-
- return &ConfRegistration{
- Event: ntfn.Event,
- HistoricalDispatch: nil,
- Height: n.currentHeight,
- }, nil
-
- // A rescan is already in progress, return here to prevent dispatching
- // another. When the rescan returns, this notification's details will be
- // updated as well.
- case rescanPending:
- log.Debugf("Waiting for pending rescan to finish before "+
- "notifying %v at tip", ntfn.ConfRequest)
-
- return &ConfRegistration{
- Event: ntfn.Event,
- HistoricalDispatch: nil,
- Height: n.currentHeight,
- }, nil
-
- // If no rescan has been dispatched, attempt to do so now.
- case rescanNotStarted:
- }
-
- // If the provided or cached height hint indicates that the
- // transaction with the given txid/output script is to be confirmed at a
- // height greater than the notifier's current height, we'll refrain from
- // spawning a historical dispatch.
- if startHeight > n.currentHeight {
- log.Debugf("Height hint is above current height, not "+
- "dispatching historical confirmation rescan for %v",
- ntfn.ConfRequest)
-
- // Set the rescan status to complete, which will allow the
- // notifier to start delivering messages for this set
- // immediately.
- confSet.rescanStatus = rescanComplete
- return &ConfRegistration{
- Event: ntfn.Event,
- HistoricalDispatch: nil,
- Height: n.currentHeight,
- }, nil
- }
-
- log.Debugf("Dispatching historical confirmation rescan for %v",
- ntfn.ConfRequest)
-
- // Construct the parameters for historical dispatch, scanning the range
- // of blocks between our best known height hint and the notifier's
- // current height. The notifier will begin also watching for
- // confirmations at tip starting with the next block.
- dispatch := &HistoricalConfDispatch{
- ConfRequest: ntfn.ConfRequest,
- StartHeight: startHeight,
- EndHeight: n.currentHeight,
- }
-
- // Set this confSet's status to pending, ensuring subsequent
- // registrations don't also attempt a dispatch.
- confSet.rescanStatus = rescanPending
-
- return &ConfRegistration{
- Event: ntfn.Event,
- HistoricalDispatch: dispatch,
- Height: n.currentHeight,
- }, nil
-}
-
-// CancelConf cancels an existing request for a spend notification of an
-// outpoint/output script. The request is identified by its spend ID.
-func (n *TxNotifier) CancelConf(confRequest ConfRequest, confID uint64) {
- select {
- case <-n.quit:
- return
- default:
- }
-
- n.Lock()
- defer n.Unlock()
-
- confSet, ok := n.confNotifications[confRequest]
- if !ok {
- return
- }
- ntfn, ok := confSet.ntfns[confID]
- if !ok {
- return
- }
-
- log.Infof("Canceling confirmation notification: conf_id=%d, %v", confID,
- confRequest)
-
- // We'll close all the notification channels to let the client know
- // their cancel request has been fulfilled.
- close(ntfn.Event.Confirmed)
- close(ntfn.Event.Updates)
- close(ntfn.Event.NegativeConf)
-
- // Finally, we'll clean up any lingering references to this
- // notification.
- delete(confSet.ntfns, confID)
-
- // Remove the queued confirmation notification if the transaction has
- // already confirmed, but hasn't met its required number of
- // confirmations.
- if confSet.details != nil {
- confHeight := confSet.details.BlockHeight +
- ntfn.NumConfirmations - 1
- delete(n.ntfnsByConfirmHeight[confHeight], ntfn)
- }
-}
-
-// UpdateConfDetails attempts to update the confirmation details for an active
-// notification within the notifier. This should only be used in the case of a
-// transaction/output script that has confirmed before the notifier's current
-// height.
-//
-// NOTE: The notification should be registered first to ensure notifications are
-// dispatched correctly.
-func (n *TxNotifier) UpdateConfDetails(confRequest ConfRequest,
- details *TxConfirmation) er.R {
-
- select {
- case <-n.quit:
- return ErrTxNotifierExiting.Default()
- default:
- }
-
- // Ensure we hold the lock throughout handling the notification to
- // prevent the notifier from advancing its height underneath us.
- n.Lock()
- defer n.Unlock()
-
- // First, we'll determine whether we have an active confirmation
- // notification for the given txid/script.
- confSet, ok := n.confNotifications[confRequest]
- if !ok {
- return er.Errorf("confirmation notification for %v not found",
- confRequest)
- }
-
- // If the confirmation details were already found at tip, all existing
- // notifications will have been dispatched or queued for dispatch. We
- // can exit early to avoid sending too many notifications on the
- // buffered channels.
- if confSet.details != nil {
- return nil
- }
-
- // The historical dispatch has been completed for this confSet. We'll
- // update the rescan status and cache any details that were found. If
- // the details are nil, that implies we did not find them and will
- // continue to watch for them at tip.
- confSet.rescanStatus = rescanComplete
-
- // The notifier has yet to reach the height at which the
- // transaction/output script was included in a block, so we should defer
- // until handling it then within ConnectTip.
- if details == nil {
- log.Debugf("Confirmation details for %v not found during "+
- "historical dispatch, waiting to dispatch at tip",
- confRequest)
-
- // We'll commit the current height as the confirm hint to
- // prevent another potentially long rescan if we restart before
- // a new block comes in.
- err := n.confirmHintCache.CommitConfirmHint(
- n.currentHeight, confRequest,
- )
- if err != nil {
- // The error is not fatal as this is an optimistic
- // optimization, so we'll avoid returning an error.
- log.Debugf("Unable to update confirm hint to %d for "+
- "%v: %v", n.currentHeight, confRequest, err)
- }
-
- return nil
- }
-
- if details.BlockHeight > n.currentHeight {
- log.Debugf("Confirmation details for %v found above current "+
- "height, waiting to dispatch at tip", confRequest)
-
- return nil
- }
-
- log.Debugf("Updating confirmation details for %v", confRequest)
-
- err := n.confirmHintCache.CommitConfirmHint(
- details.BlockHeight, confRequest,
- )
- if err != nil {
- // The error is not fatal, so we should not return an error to
- // the caller.
- log.Errorf("Unable to update confirm hint to %d for %v: %v",
- details.BlockHeight, confRequest, err)
- }
-
- // Cache the details found in the rescan and attempt to dispatch any
- // notifications that have not yet been delivered.
- confSet.details = details
- for _, ntfn := range confSet.ntfns {
- err = n.dispatchConfDetails(ntfn, details)
- if err != nil {
- return err
- }
- }
-
- return nil
-}
-
-// dispatchConfDetails attempts to cache and dispatch details to a particular
-// client if the transaction/output script has sufficiently confirmed. If the
-// provided details are nil, this method will be a no-op.
-func (n *TxNotifier) dispatchConfDetails(
- ntfn *ConfNtfn, details *TxConfirmation) er.R {
-
- // If no details are provided, return early as we can't dispatch.
- if details == nil {
- log.Debugf("Unable to dispatch %v, no details provided",
- ntfn.ConfRequest)
-
- return nil
- }
-
- // Now, we'll examine whether the transaction/output script of this
- // request has reached its required number of confirmations. If it has,
- // we'll dispatch a confirmation notification to the caller.
- confHeight := details.BlockHeight + ntfn.NumConfirmations - 1
- if confHeight <= n.currentHeight {
- log.Infof("Dispatching %v confirmation notification for %v",
- ntfn.NumConfirmations, ntfn.ConfRequest)
-
- // We'll send a 0 value to the Updates channel,
- // indicating that the transaction/output script has already
- // been confirmed.
- select {
- case ntfn.Event.Updates <- 0:
- case <-n.quit:
- return ErrTxNotifierExiting.Default()
- }
-
- select {
- case ntfn.Event.Confirmed <- details:
- ntfn.dispatched = true
- case <-n.quit:
- return ErrTxNotifierExiting.Default()
- }
- } else {
- log.Debugf("Queueing %v confirmation notification for %v at tip ",
- ntfn.NumConfirmations, ntfn.ConfRequest)
-
- // Otherwise, we'll keep track of the notification
- // request by the height at which we should dispatch the
- // confirmation notification.
- ntfnSet, exists := n.ntfnsByConfirmHeight[confHeight]
- if !exists {
- ntfnSet = make(map[*ConfNtfn]struct{})
- n.ntfnsByConfirmHeight[confHeight] = ntfnSet
- }
- ntfnSet[ntfn] = struct{}{}
-
- // We'll also send an update to the client of how many
- // confirmations are left for the transaction/output script to
- // be confirmed.
- numConfsLeft := confHeight - n.currentHeight
- select {
- case ntfn.Event.Updates <- numConfsLeft:
- case <-n.quit:
- return ErrTxNotifierExiting.Default()
- }
- }
-
- // As a final check, we'll also watch the transaction/output script if
- // it's still possible for it to get reorged out of the chain.
- reorgSafeHeight := details.BlockHeight + n.reorgSafetyLimit
- if reorgSafeHeight > n.currentHeight {
- txSet, exists := n.confsByInitialHeight[details.BlockHeight]
- if !exists {
- txSet = make(map[ConfRequest]struct{})
- n.confsByInitialHeight[details.BlockHeight] = txSet
- }
- txSet[ntfn.ConfRequest] = struct{}{}
- }
-
- return nil
-}
-
-// newSpendNtfn validates all of the parameters required to successfully create
-// and register a spend notification.
-func (n *TxNotifier) newSpendNtfn(outpoint *wire.OutPoint,
- pkScript []byte, heightHint uint32) (*SpendNtfn, er.R) {
-
- // An accompanying output script must always be provided.
- if len(pkScript) == 0 {
- return nil, ErrNoScript.Default()
- }
-
- // A height hint must be provided to prevent scanning from the genesis
- // block.
- if heightHint == 0 {
- return nil, ErrNoHeightHint.Default()
- }
-
- // Ensure the output script is of a supported type.
- spendRequest, err := NewSpendRequest(outpoint, pkScript)
- if err != nil {
- return nil, err
- }
-
- spendID := atomic.AddUint64(&n.spendClientCounter, 1)
- return &SpendNtfn{
- SpendID: spendID,
- SpendRequest: spendRequest,
- Event: NewSpendEvent(func() {
- n.CancelSpend(spendRequest, spendID)
- }),
- HeightHint: heightHint,
- }, nil
-}
-
-// RegisterSpend handles a new spend notification request. The client will be
-// notified once the outpoint/output script is detected as spent within the
-// chain.
-//
-// NOTE: If the outpoint/output script has already been spent within the chain
-// before the notifier's current tip, the spend details must be provided with
-// the UpdateSpendDetails method, otherwise we will wait for the outpoint/output
-// script to be spent at tip, even though it already has.
-func (n *TxNotifier) RegisterSpend(outpoint *wire.OutPoint, pkScript []byte,
- heightHint uint32) (*SpendRegistration, er.R) {
-
- select {
- case <-n.quit:
- return nil, ErrTxNotifierExiting.Default()
- default:
- }
-
- // We'll start by performing a series of validation checks.
- ntfn, err := n.newSpendNtfn(outpoint, pkScript, heightHint)
- if err != nil {
- return nil, err
- }
-
- // Before proceeding to register the notification, we'll query our spend
- // hint cache to determine whether a better one exists.
- startHeight := ntfn.HeightHint
- hint, err := n.spendHintCache.QuerySpendHint(ntfn.SpendRequest)
- if err == nil {
- if hint > startHeight {
- log.Debugf("Using height hint %d retrieved from cache "+
- "for %v instead of %d", hint, ntfn.SpendRequest,
- startHeight)
- startHeight = hint
- }
- } else if !ErrSpendHintNotFound.Is(err) {
- log.Errorf("Unable to query spend hint for %v: %v",
- ntfn.SpendRequest, err)
- }
-
- n.Lock()
- defer n.Unlock()
-
- log.Infof("New spend subscription: spend_id=%d, %v, height_hint=%d",
- ntfn.SpendID, ntfn.SpendRequest, startHeight)
-
- // Keep track of the notification request so that we can properly
- // dispatch a spend notification later on.
- spendSet, ok := n.spendNotifications[ntfn.SpendRequest]
- if !ok {
- // If this is the first registration for the request, we'll
- // construct a spendNtfnSet to coalesce all notifications.
- spendSet = newSpendNtfnSet()
- n.spendNotifications[ntfn.SpendRequest] = spendSet
- }
- spendSet.ntfns[ntfn.SpendID] = ntfn
-
- // We'll now let the caller know whether a historical rescan is needed
- // depending on the current rescan status.
- switch spendSet.rescanStatus {
-
- // If the spending details for this request have already been determined
- // and cached, then we can use them to immediately dispatch the spend
- // notification to the client.
- case rescanComplete:
- log.Debugf("Attempting to dispatch spend for %v on "+
- "registration since rescan has finished",
- ntfn.SpendRequest)
-
- err := n.dispatchSpendDetails(ntfn, spendSet.details)
- if err != nil {
- return nil, err
- }
-
- return &SpendRegistration{
- Event: ntfn.Event,
- HistoricalDispatch: nil,
- Height: n.currentHeight,
- }, nil
-
- // If there is an active rescan to determine whether the request has
- // been spent, then we won't trigger another one.
- case rescanPending:
- log.Debugf("Waiting for pending rescan to finish before "+
- "notifying %v at tip", ntfn.SpendRequest)
-
- return &SpendRegistration{
- Event: ntfn.Event,
- HistoricalDispatch: nil,
- Height: n.currentHeight,
- }, nil
-
- // Otherwise, we'll fall through and let the caller know that a rescan
- // should be dispatched to determine whether the request has already
- // been spent.
- case rescanNotStarted:
- }
-
- // However, if the spend hint, either provided by the caller or
- // retrieved from the cache, is found to be at a later height than the
- // TxNotifier is aware of, then we'll refrain from dispatching a
- // historical rescan and wait for the spend to come in at tip.
- if startHeight > n.currentHeight {
- log.Debugf("Spend hint of %d for %v is above current height %d",
- startHeight, ntfn.SpendRequest, n.currentHeight)
-
- // We'll also set the rescan status as complete to ensure that
- // spend hints for this request get updated upon
- // connected/disconnected blocks.
- spendSet.rescanStatus = rescanComplete
- return &SpendRegistration{
- Event: ntfn.Event,
- HistoricalDispatch: nil,
- Height: n.currentHeight,
- }, nil
- }
-
- // We'll set the rescan status to pending to ensure subsequent
- // notifications don't also attempt a historical dispatch.
- spendSet.rescanStatus = rescanPending
-
- log.Infof("Dispatching historical spend rescan for %v, start=%d, "+
- "end=%d", ntfn.SpendRequest, startHeight, n.currentHeight)
-
- return &SpendRegistration{
- Event: ntfn.Event,
- HistoricalDispatch: &HistoricalSpendDispatch{
- SpendRequest: ntfn.SpendRequest,
- StartHeight: startHeight,
- EndHeight: n.currentHeight,
- },
- Height: n.currentHeight,
- }, nil
-}
-
-// CancelSpend cancels an existing request for a spend notification of an
-// outpoint/output script. The request is identified by its spend ID.
-func (n *TxNotifier) CancelSpend(spendRequest SpendRequest, spendID uint64) {
- select {
- case <-n.quit:
- return
- default:
- }
-
- n.Lock()
- defer n.Unlock()
-
- spendSet, ok := n.spendNotifications[spendRequest]
- if !ok {
- return
- }
- ntfn, ok := spendSet.ntfns[spendID]
- if !ok {
- return
- }
-
- log.Infof("Canceling spend notification: spend_id=%d, %v", spendID,
- spendRequest)
-
- // We'll close all the notification channels to let the client know
- // their cancel request has been fulfilled.
- close(ntfn.Event.Spend)
- close(ntfn.Event.Reorg)
- close(ntfn.Event.Done)
- delete(spendSet.ntfns, spendID)
-}
-
-// ProcessRelevantSpendTx processes a transaction provided externally. This will
-// check whether the transaction is relevant to the notifier if it spends any
-// outpoints/output scripts for which we currently have registered notifications
-// for. If it is relevant, spend notifications will be dispatched to the caller.
-func (n *TxNotifier) ProcessRelevantSpendTx(tx *btcutil.Tx,
- blockHeight uint32) er.R {
-
- select {
- case <-n.quit:
- return ErrTxNotifierExiting.Default()
- default:
- }
-
- // Ensure we hold the lock throughout handling the notification to
- // prevent the notifier from advancing its height underneath us.
- n.Lock()
- defer n.Unlock()
-
- // We'll use a channel to coalesce all the spend requests that this
- // transaction fulfills.
- type spend struct {
- request *SpendRequest
- details *SpendDetail
- }
-
- // We'll set up the onSpend filter callback to gather all the fulfilled
- // spends requests within this transaction.
- var spends []spend
- onSpend := func(request SpendRequest, details *SpendDetail) {
- spends = append(spends, spend{&request, details})
- }
- n.filterTx(tx, nil, blockHeight, nil, onSpend)
-
- // After the transaction has been filtered, we can finally dispatch
- // notifications for each request.
- for _, spend := range spends {
- err := n.updateSpendDetails(*spend.request, spend.details)
- if err != nil {
- return err
- }
- }
-
- return nil
-}
-
-// UpdateSpendDetails attempts to update the spend details for all active spend
-// notification requests for an outpoint/output script. This method should be
-// used once a historical scan of the chain has finished. If the historical scan
-// did not find a spending transaction for it, the spend details may be nil.
-//
-// NOTE: A notification request for the outpoint/output script must be
-// registered first to ensure notifications are delivered.
-func (n *TxNotifier) UpdateSpendDetails(spendRequest SpendRequest,
- details *SpendDetail) er.R {
-
- select {
- case <-n.quit:
- return ErrTxNotifierExiting.Default()
- default:
- }
-
- // Ensure we hold the lock throughout handling the notification to
- // prevent the notifier from advancing its height underneath us.
- n.Lock()
- defer n.Unlock()
-
- return n.updateSpendDetails(spendRequest, details)
-}
-
-// updateSpendDetails attempts to update the spend details for all active spend
-// notification requests for an outpoint/output script. This method should be
-// used once a historical scan of the chain has finished. If the historical scan
-// did not find a spending transaction for it, the spend details may be nil.
-//
-// NOTE: This method must be called with the TxNotifier's lock held.
-func (n *TxNotifier) updateSpendDetails(spendRequest SpendRequest,
- details *SpendDetail) er.R {
-
- // Mark the ongoing historical rescan for this request as finished. This
- // will allow us to update the spend hints for it at tip.
- spendSet, ok := n.spendNotifications[spendRequest]
- if !ok {
- return er.Errorf("spend notification for %v not found",
- spendRequest)
- }
-
- // If the spend details have already been found either at tip, then the
- // notifications should have already been dispatched, so we can exit
- // early to prevent sending duplicate notifications.
- if spendSet.details != nil {
- return nil
- }
-
- // Since the historical rescan has completed for this request, we'll
- // mark its rescan status as complete in order to ensure that the
- // TxNotifier can properly update its spend hints upon
- // connected/disconnected blocks.
- spendSet.rescanStatus = rescanComplete
-
- // If the historical rescan was not able to find a spending transaction
- // for this request, then we can track the spend at tip.
- if details == nil {
- // We'll commit the current height as the spend hint to prevent
- // another potentially long rescan if we restart before a new
- // block comes in.
- err := n.spendHintCache.CommitSpendHint(
- n.currentHeight, spendRequest,
- )
- if err != nil {
- // The error is not fatal as this is an optimistic
- // optimization, so we'll avoid returning an error.
- log.Debugf("Unable to update spend hint to %d for %v: %v",
- n.currentHeight, spendRequest, err)
- }
-
- log.Debugf("Updated spend hint to height=%v for unconfirmed "+
- "spend request %v", n.currentHeight, spendRequest)
- return nil
- }
-
- // If the historical rescan found the spending transaction for this
- // request, but it's at a later height than the notifier (this can
- // happen due to latency with the backend during a reorg), then we'll
- // defer handling the notification until the notifier has caught up to
- // such height.
- if uint32(details.SpendingHeight) > n.currentHeight {
- return nil
- }
-
- // Now that we've determined the request has been spent, we'll commit
- // its spending height as its hint in the cache and dispatch
- // notifications to all of its respective clients.
- err := n.spendHintCache.CommitSpendHint(
- uint32(details.SpendingHeight), spendRequest,
- )
- if err != nil {
- // The error is not fatal as this is an optimistic optimization,
- // so we'll avoid returning an error.
- log.Debugf("Unable to update spend hint to %d for %v: %v",
- details.SpendingHeight, spendRequest, err)
- }
-
- log.Debugf("Updated spend hint to height=%v for confirmed spend "+
- "request %v", details.SpendingHeight, spendRequest)
-
- spendSet.details = details
- for _, ntfn := range spendSet.ntfns {
- err := n.dispatchSpendDetails(ntfn, spendSet.details)
- if err != nil {
- return err
- }
- }
-
- return nil
-}
-
-// dispatchSpendDetails dispatches a spend notification to the client.
-//
-// NOTE: This must be called with the TxNotifier's lock held.
-func (n *TxNotifier) dispatchSpendDetails(ntfn *SpendNtfn, details *SpendDetail) er.R {
- // If there are no spend details to dispatch or if the notification has
- // already been dispatched, then we can skip dispatching to this client.
- if details == nil || ntfn.dispatched {
- log.Debugf("Skipping dispatch of spend details(%v) for "+
- "request %v, dispatched=%v", details, ntfn.SpendRequest,
- ntfn.dispatched)
- return nil
- }
-
- log.Infof("Dispatching confirmed spend notification for %v at "+
- "current height=%d: %v", ntfn.SpendRequest, n.currentHeight,
- details)
-
- select {
- case ntfn.Event.Spend <- details:
- ntfn.dispatched = true
- case <-n.quit:
- return ErrTxNotifierExiting.Default()
- }
-
- return nil
-}
-
-// ConnectTip handles a new block extending the current chain. It will go
-// through every transaction and determine if it is relevant to any of its
-// clients. A transaction can be relevant in either of the following two ways:
-//
-// 1. One of the inputs in the transaction spends an outpoint/output script
-// for which we currently have an active spend registration for.
-//
-// 2. The transaction has a txid or output script for which we currently have
-// an active confirmation registration for.
-//
-// In the event that the transaction is relevant, a confirmation/spend
-// notification will be queued for dispatch to the relevant clients.
-// Confirmation notifications will only be dispatched for transactions/output
-// scripts that have met the required number of confirmations required by the
-// client.
-//
-// NOTE: In order to actually dispatch the relevant transaction notifications to
-// clients, NotifyHeight must be called with the same block height in order to
-// maintain correctness.
-func (n *TxNotifier) ConnectTip(blockHash *chainhash.Hash, blockHeight uint32,
- txns []*btcutil.Tx) er.R {
-
- select {
- case <-n.quit:
- return ErrTxNotifierExiting.Default()
- default:
- }
-
- n.Lock()
- defer n.Unlock()
-
- if blockHeight != n.currentHeight+1 {
- return er.Errorf("received blocks out of order: "+
- "current height=%d, new height=%d",
- n.currentHeight, blockHeight)
- }
- n.currentHeight++
- n.reorgDepth = 0
-
- // First, we'll iterate over all the transactions found in this block to
- // determine if it includes any relevant transactions to the TxNotifier.
- log.Debugf("Filtering %d txns for %d spend requests at height %d",
- len(txns), len(n.spendNotifications), blockHeight)
- for _, tx := range txns {
- n.filterTx(
- tx, blockHash, blockHeight, n.handleConfDetailsAtTip,
- n.handleSpendDetailsAtTip,
- )
- }
-
- // Now that we've determined which requests were confirmed and spent
- // within the new block, we can update their entries in their respective
- // caches, along with all of our unconfirmed and unspent requests.
- n.updateHints(blockHeight)
-
- // Finally, we'll clear the entries from our set of notifications for
- // requests that are no longer under the risk of being reorged out of
- // the chain.
- if blockHeight >= n.reorgSafetyLimit {
- matureBlockHeight := blockHeight - n.reorgSafetyLimit
- for confRequest := range n.confsByInitialHeight[matureBlockHeight] {
- confSet := n.confNotifications[confRequest]
- for _, ntfn := range confSet.ntfns {
- select {
- case ntfn.Event.Done <- struct{}{}:
- case <-n.quit:
- return ErrTxNotifierExiting.Default()
- }
- }
-
- delete(n.confNotifications, confRequest)
- }
- delete(n.confsByInitialHeight, matureBlockHeight)
-
- for spendRequest := range n.spendsByHeight[matureBlockHeight] {
- spendSet := n.spendNotifications[spendRequest]
- for _, ntfn := range spendSet.ntfns {
- select {
- case ntfn.Event.Done <- struct{}{}:
- case <-n.quit:
- return ErrTxNotifierExiting.Default()
- }
- }
-
- log.Debugf("Deleting mature spend request %v at "+
- "height=%d", spendRequest, blockHeight)
- delete(n.spendNotifications, spendRequest)
- }
- delete(n.spendsByHeight, matureBlockHeight)
- }
-
- return nil
-}
-
-// filterTx determines whether the transaction spends or confirms any
-// outstanding pending requests. The onConf and onSpend callbacks can be used to
-// retrieve all the requests fulfilled by this transaction as they occur.
-func (n *TxNotifier) filterTx(tx *btcutil.Tx, blockHash *chainhash.Hash,
- blockHeight uint32, onConf func(ConfRequest, *TxConfirmation),
- onSpend func(SpendRequest, *SpendDetail)) {
-
- // In order to determine if this transaction is relevant to the
- // notifier, we'll check its inputs for any outstanding spend
- // requests.
- txHash := tx.Hash()
- if onSpend != nil {
- // notifyDetails is a helper closure that will construct the
- // spend details of a request and hand them off to the onSpend
- // callback.
- notifyDetails := func(spendRequest SpendRequest,
- prevOut wire.OutPoint, inputIdx uint32) {
-
- log.Debugf("Found spend of %v: spend_tx=%v, "+
- "block_height=%d", spendRequest, txHash,
- blockHeight)
-
- onSpend(spendRequest, &SpendDetail{
- SpentOutPoint: &prevOut,
- SpenderTxHash: txHash,
- SpendingTx: tx.MsgTx(),
- SpenderInputIndex: inputIdx,
- SpendingHeight: int32(blockHeight),
- })
- }
-
- for i, txIn := range tx.MsgTx().TxIn {
- // We'll re-derive the script of the output being spent
- // to determine if the inputs spends any registered
- // requests.
- prevOut := txIn.PreviousOutPoint
- pkScript, err := txscript.ComputePkScript(
- txIn.SignatureScript, txIn.Witness,
- )
- if err != nil {
- continue
- }
- spendRequest := SpendRequest{
- OutPoint: prevOut,
- PkScript: pkScript,
- }
-
- // If we have any, we'll record their spend height so
- // that notifications get dispatched to the respective
- // clients.
- if _, ok := n.spendNotifications[spendRequest]; ok {
- notifyDetails(spendRequest, prevOut, uint32(i))
- }
- spendRequest.OutPoint = ZeroOutPoint
- if _, ok := n.spendNotifications[spendRequest]; ok {
- notifyDetails(spendRequest, prevOut, uint32(i))
- }
- }
- }
-
- // We'll also check its outputs to determine if there are any
- // outstanding confirmation requests.
- if onConf != nil {
- // notifyDetails is a helper closure that will construct the
- // confirmation details of a request and hand them off to the
- // onConf callback.
- notifyDetails := func(confRequest ConfRequest) {
- log.Debugf("Found initial confirmation of %v: "+
- "height=%d, hash=%v", confRequest,
- blockHeight, blockHash)
-
- details := &TxConfirmation{
- Tx: tx.MsgTx(),
- BlockHash: blockHash,
- BlockHeight: blockHeight,
- TxIndex: uint32(tx.Index()),
- }
-
- onConf(confRequest, details)
- }
-
- for _, txOut := range tx.MsgTx().TxOut {
- // We'll parse the script of the output to determine if
- // we have any registered requests for it or the
- // transaction itself.
- pkScript, err := txscript.ParsePkScript(txOut.PkScript)
- if err != nil {
- continue
- }
- confRequest := ConfRequest{
- TxID: *txHash,
- PkScript: pkScript,
- }
-
- // If we have any, we'll record their confirmed height
- // so that notifications get dispatched when they
- // reaches the clients' desired number of confirmations.
- if _, ok := n.confNotifications[confRequest]; ok {
- notifyDetails(confRequest)
- }
- confRequest.TxID = ZeroHash
- if _, ok := n.confNotifications[confRequest]; ok {
- notifyDetails(confRequest)
- }
- }
- }
-}
-
-// handleConfDetailsAtTip tracks the confirmation height of the txid/output
-// script in order to properly dispatch a confirmation notification after
-// meeting each request's desired number of confirmations for all current and
-// future registered clients.
-func (n *TxNotifier) handleConfDetailsAtTip(confRequest ConfRequest,
- details *TxConfirmation) {
-
- // TODO(wilmer): cancel pending historical rescans if any?
- confSet := n.confNotifications[confRequest]
-
- // If we already have details for this request, we don't want to add it
- // again since we have already dispatched notifications for it.
- if confSet.details != nil {
- log.Warnf("Ignoring address reuse for %s at height %d.",
- confRequest, details.BlockHeight)
- return
- }
-
- confSet.rescanStatus = rescanComplete
- confSet.details = details
-
- for _, ntfn := range confSet.ntfns {
- // In the event that this notification was aware that the
- // transaction/output script was reorged out of the chain, we'll
- // consume the reorg notification if it hasn't been done yet
- // already.
- select {
- case <-ntfn.Event.NegativeConf:
- default:
- }
-
- // We'll note this client's required number of confirmations so
- // that we can notify them when expected.
- confHeight := details.BlockHeight + ntfn.NumConfirmations - 1
- ntfnSet, exists := n.ntfnsByConfirmHeight[confHeight]
- if !exists {
- ntfnSet = make(map[*ConfNtfn]struct{})
- n.ntfnsByConfirmHeight[confHeight] = ntfnSet
- }
- ntfnSet[ntfn] = struct{}{}
- }
-
- // We'll also note the initial confirmation height in order to correctly
- // handle dispatching notifications when the transaction/output script
- // gets reorged out of the chain.
- txSet, exists := n.confsByInitialHeight[details.BlockHeight]
- if !exists {
- txSet = make(map[ConfRequest]struct{})
- n.confsByInitialHeight[details.BlockHeight] = txSet
- }
- txSet[confRequest] = struct{}{}
-}
-
-// handleSpendDetailsAtTip tracks the spend height of the outpoint/output script
-// in order to properly dispatch a spend notification for all current and future
-// registered clients.
-func (n *TxNotifier) handleSpendDetailsAtTip(spendRequest SpendRequest,
- details *SpendDetail) {
-
- // TODO(wilmer): cancel pending historical rescans if any?
- spendSet := n.spendNotifications[spendRequest]
- spendSet.rescanStatus = rescanComplete
- spendSet.details = details
-
- for _, ntfn := range spendSet.ntfns {
- // In the event that this notification was aware that the
- // spending transaction of its outpoint/output script was
- // reorged out of the chain, we'll consume the reorg
- // notification if it hasn't been done yet already.
- select {
- case <-ntfn.Event.Reorg:
- default:
- }
- }
-
- // We'll note the spending height of the request in order to correctly
- // handle dispatching notifications when the spending transactions gets
- // reorged out of the chain.
- spendHeight := uint32(details.SpendingHeight)
- opSet, exists := n.spendsByHeight[spendHeight]
- if !exists {
- opSet = make(map[SpendRequest]struct{})
- n.spendsByHeight[spendHeight] = opSet
- }
- opSet[spendRequest] = struct{}{}
-
- log.Debugf("Spend request %v spent at tip=%d", spendRequest,
- spendHeight)
-}
-
-// NotifyHeight dispatches confirmation and spend notifications to the clients
-// who registered for a notification which has been fulfilled at the passed
-// height.
-func (n *TxNotifier) NotifyHeight(height uint32) er.R {
- n.Lock()
- defer n.Unlock()
-
- // First, we'll dispatch an update to all of the notification clients
- // for our watched requests with the number of confirmations left at
- // this new height.
- for _, confRequests := range n.confsByInitialHeight {
- for confRequest := range confRequests {
- confSet := n.confNotifications[confRequest]
- for _, ntfn := range confSet.ntfns {
- txConfHeight := confSet.details.BlockHeight +
- ntfn.NumConfirmations - 1
- numConfsLeft := txConfHeight - height
-
- // Since we don't clear notifications until
- // transactions/output scripts are no longer
- // under the risk of being reorganized out of
- // the chain, we'll skip sending updates for
- // those that have already been confirmed.
- if int32(numConfsLeft) < 0 {
- continue
- }
-
- select {
- case ntfn.Event.Updates <- numConfsLeft:
- case <-n.quit:
- return ErrTxNotifierExiting.Default()
- }
- }
- }
- }
-
- // Then, we'll dispatch notifications for all the requests that have
- // become confirmed at this new block height.
- for ntfn := range n.ntfnsByConfirmHeight[height] {
- confSet := n.confNotifications[ntfn.ConfRequest]
-
- log.Infof("Dispatching %v confirmation notification for %v",
- ntfn.NumConfirmations, ntfn.ConfRequest)
-
- select {
- case ntfn.Event.Confirmed <- confSet.details:
- ntfn.dispatched = true
- case <-n.quit:
- return ErrTxNotifierExiting.Default()
- }
- }
- delete(n.ntfnsByConfirmHeight, height)
-
- // Finally, we'll dispatch spend notifications for all the requests that
- // were spent at this new block height.
- for spendRequest := range n.spendsByHeight[height] {
- spendSet := n.spendNotifications[spendRequest]
- for _, ntfn := range spendSet.ntfns {
- err := n.dispatchSpendDetails(ntfn, spendSet.details)
- if err != nil {
- return err
- }
- }
- }
-
- return nil
-}
-
-// DisconnectTip handles the tip of the current chain being disconnected during
-// a chain reorganization. If any watched requests were included in this block,
-// internal structures are updated to ensure confirmation/spend notifications
-// are consumed (if not already), and reorg notifications are dispatched
-// instead. Confirmation/spend notifications will be dispatched again upon block
-// inclusion.
-func (n *TxNotifier) DisconnectTip(blockHeight uint32) er.R {
- select {
- case <-n.quit:
- return ErrTxNotifierExiting.Default()
- default:
- }
-
- n.Lock()
- defer n.Unlock()
-
- if blockHeight != n.currentHeight {
- return er.Errorf("received blocks out of order: "+
- "current height=%d, disconnected height=%d",
- n.currentHeight, blockHeight)
- }
- n.currentHeight--
- n.reorgDepth++
-
- // With the block disconnected, we'll update the confirm and spend hints
- // for our notification requests to reflect the new height, except for
- // those that have confirmed/spent at previous heights.
- n.updateHints(blockHeight)
-
- // We'll go through all of our watched confirmation requests and attempt
- // to drain their notification channels to ensure sending notifications
- // to the clients is always non-blocking.
- for initialHeight, txHashes := range n.confsByInitialHeight {
- for txHash := range txHashes {
- // If the transaction/output script has been reorged out
- // of the chain, we'll make sure to remove the cached
- // confirmation details to prevent notifying clients
- // with old information.
- confSet := n.confNotifications[txHash]
- if initialHeight == blockHeight {
- confSet.details = nil
- }
-
- for _, ntfn := range confSet.ntfns {
- // First, we'll attempt to drain an update
- // from each notification to ensure sends to the
- // Updates channel are always non-blocking.
- select {
- case <-ntfn.Event.Updates:
- case <-n.quit:
- return ErrTxNotifierExiting.Default()
- default:
- }
-
- // Then, we'll check if the current
- // transaction/output script was included in the
- // block currently being disconnected. If it
- // was, we'll need to dispatch a reorg
- // notification to the client.
- if initialHeight == blockHeight {
- err := n.dispatchConfReorg(
- ntfn, blockHeight,
- )
- if err != nil {
- return err
- }
- }
- }
- }
- }
-
- // We'll also go through our watched spend requests and attempt to drain
- // their dispatched notifications to ensure dispatching notifications to
- // clients later on is always non-blocking. We're only interested in
- // requests whose spending transaction was included at the height being
- // disconnected.
- for op := range n.spendsByHeight[blockHeight] {
- // Since the spending transaction is being reorged out of the
- // chain, we'll need to clear out the spending details of the
- // request.
- spendSet := n.spendNotifications[op]
- spendSet.details = nil
-
- // For all requests which have had a spend notification
- // dispatched, we'll attempt to drain it and send a reorg
- // notification instead.
- for _, ntfn := range spendSet.ntfns {
- if err := n.dispatchSpendReorg(ntfn); err != nil {
- return err
- }
- }
- }
-
- // Finally, we can remove the requests that were confirmed and/or spent
- // at the height being disconnected. We'll still continue to track them
- // until they have been confirmed/spent and are no longer under the risk
- // of being reorged out of the chain again.
- delete(n.confsByInitialHeight, blockHeight)
- delete(n.spendsByHeight, blockHeight)
-
- return nil
-}
-
-// updateHints attempts to update the confirm and spend hints for all relevant
-// requests respectively. The height parameter is used to determine which
-// requests we should update based on whether a new block is being
-// connected/disconnected.
-//
-// NOTE: This must be called with the TxNotifier's lock held and after its
-// height has already been reflected by a block being connected/disconnected.
-func (n *TxNotifier) updateHints(height uint32) {
- // TODO(wilmer): update under one database transaction.
- //
- // To update the height hint for all the required confirmation requests
- // under one database transaction, we'll gather the set of unconfirmed
- // requests along with the ones that confirmed at the height being
- // connected/disconnected.
- confRequests := n.unconfirmedRequests()
- for confRequest := range n.confsByInitialHeight[height] {
- confRequests = append(confRequests, confRequest)
- }
- err := n.confirmHintCache.CommitConfirmHint(
- n.currentHeight, confRequests...,
- )
- if err != nil {
- // The error is not fatal as this is an optimistic optimization,
- // so we'll avoid returning an error.
- log.Debugf("Unable to update confirm hints to %d for "+
- "%v: %v", n.currentHeight, confRequests, err)
- }
-
- // Similarly, to update the height hint for all the required spend
- // requests under one database transaction, we'll gather the set of
- // unspent requests along with the ones that were spent at the height
- // being connected/disconnected.
- spendRequests := n.unspentRequests()
- for spendRequest := range n.spendsByHeight[height] {
- spendRequests = append(spendRequests, spendRequest)
- }
- err = n.spendHintCache.CommitSpendHint(n.currentHeight, spendRequests...)
- if err != nil {
- // The error is not fatal as this is an optimistic optimization,
- // so we'll avoid returning an error.
- log.Debugf("Unable to update spend hints to %d for "+
- "%v: %v", n.currentHeight, spendRequests, err)
- }
-}
-
-// unconfirmedRequests returns the set of confirmation requests that are
-// still seen as unconfirmed by the TxNotifier.
-//
-// NOTE: This method must be called with the TxNotifier's lock held.
-func (n *TxNotifier) unconfirmedRequests() []ConfRequest {
- var unconfirmed []ConfRequest
- for confRequest, confNtfnSet := range n.confNotifications {
- // If the notification is already aware of its confirmation
- // details, or it's in the process of learning them, we'll skip
- // it as we can't yet determine if it's confirmed or not.
- if confNtfnSet.rescanStatus != rescanComplete ||
- confNtfnSet.details != nil {
- continue
- }
-
- unconfirmed = append(unconfirmed, confRequest)
- }
-
- return unconfirmed
-}
-
-// unspentRequests returns the set of spend requests that are still seen as
-// unspent by the TxNotifier.
-//
-// NOTE: This method must be called with the TxNotifier's lock held.
-func (n *TxNotifier) unspentRequests() []SpendRequest {
- var unspent []SpendRequest
- for spendRequest, spendNtfnSet := range n.spendNotifications {
- // If the notification is already aware of its spend details, or
- // it's in the process of learning them, we'll skip it as we
- // can't yet determine if it's unspent or not.
- if spendNtfnSet.rescanStatus != rescanComplete ||
- spendNtfnSet.details != nil {
- continue
- }
-
- unspent = append(unspent, spendRequest)
- }
-
- return unspent
-}
-
-// dispatchConfReorg dispatches a reorg notification to the client if the
-// confirmation notification was already delivered.
-//
-// NOTE: This must be called with the TxNotifier's lock held.
-func (n *TxNotifier) dispatchConfReorg(ntfn *ConfNtfn,
- heightDisconnected uint32) er.R {
-
- // If the request's confirmation notification has yet to be dispatched,
- // we'll need to clear its entry within the ntfnsByConfirmHeight index
- // to prevent from notifying the client once the notifier reaches the
- // confirmation height.
- if !ntfn.dispatched {
- confHeight := heightDisconnected + ntfn.NumConfirmations - 1
- ntfnSet, exists := n.ntfnsByConfirmHeight[confHeight]
- if exists {
- delete(ntfnSet, ntfn)
- }
- return nil
- }
-
- // Otherwise, the entry within the ntfnsByConfirmHeight has already been
- // deleted, so we'll attempt to drain the confirmation notification to
- // ensure sends to the Confirmed channel are always non-blocking.
- select {
- case <-ntfn.Event.Confirmed:
- case <-n.quit:
- return ErrTxNotifierExiting.Default()
- default:
- }
-
- ntfn.dispatched = false
-
- // Send a negative confirmation notification to the client indicating
- // how many blocks have been disconnected successively.
- select {
- case ntfn.Event.NegativeConf <- int32(n.reorgDepth):
- case <-n.quit:
- return ErrTxNotifierExiting.Default()
- }
-
- return nil
-}
-
-// dispatchSpendReorg dispatches a reorg notification to the client if a spend
-// notiification was already delivered.
-//
-// NOTE: This must be called with the TxNotifier's lock held.
-func (n *TxNotifier) dispatchSpendReorg(ntfn *SpendNtfn) er.R {
- if !ntfn.dispatched {
- return nil
- }
-
- // Attempt to drain the spend notification to ensure sends to the Spend
- // channel are always non-blocking.
- select {
- case <-ntfn.Event.Spend:
- default:
- }
-
- // Send a reorg notification to the client in order for them to
- // correctly handle reorgs.
- select {
- case ntfn.Event.Reorg <- struct{}{}:
- case <-n.quit:
- return ErrTxNotifierExiting.Default()
- }
-
- ntfn.dispatched = false
-
- return nil
-}
-
-// TearDown is to be called when the owner of the TxNotifier is exiting. This
-// closes the event channels of all registered notifications that have not been
-// dispatched yet.
-func (n *TxNotifier) TearDown() {
- close(n.quit)
-
- n.Lock()
- defer n.Unlock()
-
- for _, confSet := range n.confNotifications {
- for confID, ntfn := range confSet.ntfns {
- close(ntfn.Event.Confirmed)
- close(ntfn.Event.Updates)
- close(ntfn.Event.NegativeConf)
- close(ntfn.Event.Done)
- delete(confSet.ntfns, confID)
- }
- }
-
- for _, spendSet := range n.spendNotifications {
- for spendID, ntfn := range spendSet.ntfns {
- close(ntfn.Event.Spend)
- close(ntfn.Event.Reorg)
- close(ntfn.Event.Done)
- delete(spendSet.ntfns, spendID)
- }
- }
-}
diff --git a/lnd/chainntnfs/txnotifier_test.go b/lnd/chainntnfs/txnotifier_test.go
deleted file mode 100644
index ef0dd4d4..00000000
--- a/lnd/chainntnfs/txnotifier_test.go
+++ /dev/null
@@ -1,2674 +0,0 @@
-package chainntnfs_test
-
-import (
- "bytes"
- "sync"
- "testing"
- "time"
-
- "github.com/pkt-cash/pktd/btcutil"
- "github.com/pkt-cash/pktd/btcutil/er"
- "github.com/pkt-cash/pktd/chaincfg/chainhash"
- "github.com/pkt-cash/pktd/lnd/chainntnfs"
- "github.com/pkt-cash/pktd/wire"
-)
-
-var (
- testRawScript = []byte{
- // OP_HASH160
- 0xa9,
- // OP_DATA_20
- 0x14,
- // <20-byte script hash>
- 0x90, 0x1c, 0x86, 0x94, 0xc0, 0x3f, 0xaf, 0xd5,
- 0x52, 0x28, 0x10, 0xe0, 0x33, 0x0f, 0x26, 0xe6,
- 0x7a, 0x85, 0x33, 0xcd,
- // OP_EQUAL
- 0x87,
- }
- testSigScript = []byte{
- // OP_DATA_16
- 0x16,
- // <22-byte redeem script>
- 0x00, 0x14, 0x1d, 0x7c, 0xd6, 0xc7, 0x5c, 0x2e,
- 0x86, 0xf4, 0xcb, 0xf9, 0x8e, 0xae, 0xd2, 0x21,
- 0xb3, 0x0b, 0xd9, 0xa0, 0xb9, 0x28,
- }
-)
-
-type mockHintCache struct {
- mu sync.Mutex
- confHints map[chainntnfs.ConfRequest]uint32
- spendHints map[chainntnfs.SpendRequest]uint32
-}
-
-var _ chainntnfs.SpendHintCache = (*mockHintCache)(nil)
-var _ chainntnfs.ConfirmHintCache = (*mockHintCache)(nil)
-
-func (c *mockHintCache) CommitSpendHint(heightHint uint32,
- spendRequests ...chainntnfs.SpendRequest) er.R {
-
- c.mu.Lock()
- defer c.mu.Unlock()
-
- for _, spendRequest := range spendRequests {
- c.spendHints[spendRequest] = heightHint
- }
-
- return nil
-}
-
-func (c *mockHintCache) QuerySpendHint(spendRequest chainntnfs.SpendRequest) (uint32, er.R) {
- c.mu.Lock()
- defer c.mu.Unlock()
-
- hint, ok := c.spendHints[spendRequest]
- if !ok {
- return 0, chainntnfs.ErrSpendHintNotFound.Default()
- }
-
- return hint, nil
-}
-
-func (c *mockHintCache) PurgeSpendHint(spendRequests ...chainntnfs.SpendRequest) er.R {
- c.mu.Lock()
- defer c.mu.Unlock()
-
- for _, spendRequest := range spendRequests {
- delete(c.spendHints, spendRequest)
- }
-
- return nil
-}
-
-func (c *mockHintCache) CommitConfirmHint(heightHint uint32,
- confRequests ...chainntnfs.ConfRequest) er.R {
-
- c.mu.Lock()
- defer c.mu.Unlock()
-
- for _, confRequest := range confRequests {
- c.confHints[confRequest] = heightHint
- }
-
- return nil
-}
-
-func (c *mockHintCache) QueryConfirmHint(confRequest chainntnfs.ConfRequest) (uint32, er.R) {
- c.mu.Lock()
- defer c.mu.Unlock()
-
- hint, ok := c.confHints[confRequest]
- if !ok {
- return 0, chainntnfs.ErrConfirmHintNotFound.Default()
- }
-
- return hint, nil
-}
-
-func (c *mockHintCache) PurgeConfirmHint(confRequests ...chainntnfs.ConfRequest) er.R {
- c.mu.Lock()
- defer c.mu.Unlock()
-
- for _, confRequest := range confRequests {
- delete(c.confHints, confRequest)
- }
-
- return nil
-}
-
-func newMockHintCache() *mockHintCache {
- return &mockHintCache{
- confHints: make(map[chainntnfs.ConfRequest]uint32),
- spendHints: make(map[chainntnfs.SpendRequest]uint32),
- }
-}
-
-// TestTxNotifierRegistrationValidation ensures that we are not able to register
-// requests with invalid parameters.
-func TestTxNotifierRegistrationValidation(t *testing.T) {
- t.Parallel()
-
- testCases := []struct {
- name string
- pkScript []byte
- numConfs uint32
- heightHint uint32
- checkSpend bool
- err *er.ErrorCode
- }{
- {
- name: "empty output script",
- pkScript: nil,
- numConfs: 1,
- heightHint: 1,
- checkSpend: true,
- err: chainntnfs.ErrNoScript,
- },
- {
- name: "zero num confs",
- pkScript: testRawScript,
- numConfs: 0,
- heightHint: 1,
- err: chainntnfs.ErrNumConfsOutOfRange,
- },
- {
- name: "exceed max num confs",
- pkScript: testRawScript,
- numConfs: chainntnfs.MaxNumConfs + 1,
- heightHint: 1,
- err: chainntnfs.ErrNumConfsOutOfRange,
- },
- {
- name: "empty height hint",
- pkScript: testRawScript,
- numConfs: 1,
- heightHint: 0,
- checkSpend: true,
- err: chainntnfs.ErrNoHeightHint,
- },
- }
-
- for _, testCase := range testCases {
- testCase := testCase
- t.Run(testCase.name, func(t *testing.T) {
- hintCache := newMockHintCache()
- n := chainntnfs.NewTxNotifier(
- 10, chainntnfs.ReorgSafetyLimit, hintCache, hintCache,
- )
-
- _, err := n.RegisterConf(
- &chainntnfs.ZeroHash, testCase.pkScript,
- testCase.numConfs, testCase.heightHint,
- )
- if testCase.err == nil && err == nil {
- } else if testCase.err == nil || !testCase.err.Is(err) {
- t.Fatalf("conf registration expected error "+
- "\"%v\", got \"%v\"", testCase.err, err)
- }
-
- if !testCase.checkSpend {
- return
- }
-
- _, err = n.RegisterSpend(
- &chainntnfs.ZeroOutPoint, testCase.pkScript,
- testCase.heightHint,
- )
- if testCase.err == nil && err == nil {
- } else if testCase.err == nil || !testCase.err.Is(err) {
- t.Fatalf("spend registration expected error "+
- "\"%v\", got \"%v\"", testCase.err, err)
- }
- })
- }
-}
-
-// TestTxNotifierFutureConfDispatch tests that the TxNotifier dispatches
-// registered notifications when a transaction confirms after registration.
-func TestTxNotifierFutureConfDispatch(t *testing.T) {
- t.Parallel()
-
- const (
- tx1NumConfs uint32 = 1
- tx2NumConfs uint32 = 2
- )
-
- hintCache := newMockHintCache()
- n := chainntnfs.NewTxNotifier(
- 10, chainntnfs.ReorgSafetyLimit, hintCache, hintCache,
- )
-
- // Create the test transactions and register them with the TxNotifier
- // before including them in a block to receive future
- // notifications.
- tx1 := wire.MsgTx{Version: 1}
- tx1.AddTxOut(&wire.TxOut{PkScript: testRawScript})
- tx1Hash := tx1.TxHash()
- ntfn1, err := n.RegisterConf(&tx1Hash, testRawScript, tx1NumConfs, 1)
- if err != nil {
- t.Fatalf("unable to register ntfn: %v", err)
- }
-
- tx2 := wire.MsgTx{Version: 2}
- tx2.AddTxOut(&wire.TxOut{PkScript: testRawScript})
- tx2Hash := tx2.TxHash()
- ntfn2, err := n.RegisterConf(&tx2Hash, testRawScript, tx2NumConfs, 1)
- if err != nil {
- t.Fatalf("unable to register ntfn: %v", err)
- }
-
- // We should not receive any notifications from both transactions
- // since they have not been included in a block yet.
- select {
- case <-ntfn1.Event.Updates:
- t.Fatal("Received unexpected confirmation update for tx1")
- case txConf := <-ntfn1.Event.Confirmed:
- t.Fatalf("Received unexpected confirmation for tx1: %v", txConf)
- default:
- }
-
- select {
- case <-ntfn2.Event.Updates:
- t.Fatal("Received unexpected confirmation update for tx2")
- case txConf := <-ntfn2.Event.Confirmed:
- t.Fatalf("Received unexpected confirmation for tx2: %v", txConf)
- default:
- }
-
- // Include the transactions in a block and add it to the TxNotifier.
- // This should confirm tx1, but not tx2.
- block1 := btcutil.NewBlock(&wire.MsgBlock{
- Transactions: []*wire.MsgTx{&tx1, &tx2},
- })
-
- err = n.ConnectTip(block1.Hash(), 11, block1.Transactions())
- if err != nil {
- t.Fatalf("Failed to connect block: %v", err)
- }
- if err := n.NotifyHeight(11); err != nil {
- t.Fatalf("unable to dispatch notifications: %v", err)
- }
-
- // We should only receive one update for tx1 since it only requires
- // one confirmation and it already met it.
- select {
- case numConfsLeft := <-ntfn1.Event.Updates:
- const expected = 0
- if numConfsLeft != expected {
- t.Fatalf("Received incorrect confirmation update: tx1 "+
- "expected %d confirmations left, got %d",
- expected, numConfsLeft)
- }
- default:
- t.Fatal("Expected confirmation update for tx1")
- }
-
- // A confirmation notification for this tranaction should be dispatched,
- // as it only required one confirmation.
- select {
- case txConf := <-ntfn1.Event.Confirmed:
- expectedConf := chainntnfs.TxConfirmation{
- BlockHash: block1.Hash(),
- BlockHeight: 11,
- TxIndex: 0,
- Tx: &tx1,
- }
- assertConfDetails(t, txConf, &expectedConf)
- default:
- t.Fatalf("Expected confirmation for tx1")
- }
-
- // We should only receive one update for tx2 since it only has one
- // confirmation so far and it requires two.
- select {
- case numConfsLeft := <-ntfn2.Event.Updates:
- const expected = 1
- if numConfsLeft != expected {
- t.Fatalf("Received incorrect confirmation update: tx2 "+
- "expected %d confirmations left, got %d",
- expected, numConfsLeft)
- }
- default:
- t.Fatal("Expected confirmation update for tx2")
- }
-
- // A confirmation notification for tx2 should not be dispatched yet, as
- // it requires one more confirmation.
- select {
- case txConf := <-ntfn2.Event.Confirmed:
- t.Fatalf("Received unexpected confirmation for tx2: %v", txConf)
- default:
- }
-
- // Create a new block and add it to the TxNotifier at the next height.
- // This should confirm tx2.
- block2 := btcutil.NewBlock(&wire.MsgBlock{})
- err = n.ConnectTip(block2.Hash(), 12, block2.Transactions())
- if err != nil {
- t.Fatalf("Failed to connect block: %v", err)
- }
- if err := n.NotifyHeight(12); err != nil {
- t.Fatalf("unable to dispatch notifications: %v", err)
- }
-
- // We should not receive any event notifications for tx1 since it has
- // already been confirmed.
- select {
- case <-ntfn1.Event.Updates:
- t.Fatal("Received unexpected confirmation update for tx1")
- case txConf := <-ntfn1.Event.Confirmed:
- t.Fatalf("Received unexpected confirmation for tx1: %v", txConf)
- default:
- }
-
- // We should only receive one update since the last at the new height,
- // indicating how many confirmations are still left.
- select {
- case numConfsLeft := <-ntfn2.Event.Updates:
- const expected = 0
- if numConfsLeft != expected {
- t.Fatalf("Received incorrect confirmation update: tx2 "+
- "expected %d confirmations left, got %d",
- expected, numConfsLeft)
- }
- default:
- t.Fatal("Expected confirmation update for tx2")
- }
-
- // A confirmation notification for tx2 should be dispatched, since it
- // now meets its required number of confirmations.
- select {
- case txConf := <-ntfn2.Event.Confirmed:
- expectedConf := chainntnfs.TxConfirmation{
- BlockHash: block1.Hash(),
- BlockHeight: 11,
- TxIndex: 1,
- Tx: &tx2,
- }
- assertConfDetails(t, txConf, &expectedConf)
- default:
- t.Fatalf("Expected confirmation for tx2")
- }
-}
-
-// TestTxNotifierHistoricalConfDispatch tests that the TxNotifier dispatches
-// registered notifications when the transaction is confirmed before
-// registration.
-func TestTxNotifierHistoricalConfDispatch(t *testing.T) {
- t.Parallel()
-
- const (
- tx1NumConfs uint32 = 1
- tx2NumConfs uint32 = 3
- )
-
- var (
- tx1 = wire.MsgTx{Version: 1}
- tx2 = wire.MsgTx{Version: 2}
- tx3 = wire.MsgTx{Version: 3}
- )
-
- hintCache := newMockHintCache()
- n := chainntnfs.NewTxNotifier(
- 10, chainntnfs.ReorgSafetyLimit, hintCache, hintCache,
- )
-
- // Create the test transactions at a height before the TxNotifier's
- // starting height so that they are confirmed once registering them.
- tx1Hash := tx1.TxHash()
- ntfn1, err := n.RegisterConf(&tx1Hash, testRawScript, tx1NumConfs, 1)
- if err != nil {
- t.Fatalf("unable to register ntfn: %v", err)
- }
-
- tx2Hash := tx2.TxHash()
- ntfn2, err := n.RegisterConf(&tx2Hash, testRawScript, tx2NumConfs, 1)
- if err != nil {
- t.Fatalf("unable to register ntfn: %v", err)
- }
-
- // Update tx1 with its confirmation details. We should only receive one
- // update since it only requires one confirmation and it already met it.
- txConf1 := chainntnfs.TxConfirmation{
- BlockHash: &chainntnfs.ZeroHash,
- BlockHeight: 9,
- TxIndex: 1,
- Tx: &tx1,
- }
- err = n.UpdateConfDetails(ntfn1.HistoricalDispatch.ConfRequest, &txConf1)
- if err != nil {
- t.Fatalf("unable to update conf details: %v", err)
- }
- select {
- case numConfsLeft := <-ntfn1.Event.Updates:
- const expected = 0
- if numConfsLeft != expected {
- t.Fatalf("Received incorrect confirmation update: tx1 "+
- "expected %d confirmations left, got %d",
- expected, numConfsLeft)
- }
- default:
- t.Fatal("Expected confirmation update for tx1")
- }
-
- // A confirmation notification for tx1 should also be dispatched.
- select {
- case txConf := <-ntfn1.Event.Confirmed:
- assertConfDetails(t, txConf, &txConf1)
- default:
- t.Fatalf("Expected confirmation for tx1")
- }
-
- // Update tx2 with its confirmation details. This should not trigger a
- // confirmation notification since it hasn't reached its required number
- // of confirmations, but we should receive a confirmation update
- // indicating how many confirmation are left.
- txConf2 := chainntnfs.TxConfirmation{
- BlockHash: &chainntnfs.ZeroHash,
- BlockHeight: 9,
- TxIndex: 2,
- Tx: &tx2,
- }
- err = n.UpdateConfDetails(ntfn2.HistoricalDispatch.ConfRequest, &txConf2)
- if err != nil {
- t.Fatalf("unable to update conf details: %v", err)
- }
- select {
- case numConfsLeft := <-ntfn2.Event.Updates:
- const expected = 1
- if numConfsLeft != expected {
- t.Fatalf("Received incorrect confirmation update: tx2 "+
- "expected %d confirmations left, got %d",
- expected, numConfsLeft)
- }
- default:
- t.Fatal("Expected confirmation update for tx2")
- }
-
- select {
- case txConf := <-ntfn2.Event.Confirmed:
- t.Fatalf("Received unexpected confirmation for tx2: %v", txConf)
- default:
- }
-
- // Create a new block and add it to the TxNotifier at the next height.
- // This should confirm tx2.
- block := btcutil.NewBlock(&wire.MsgBlock{
- Transactions: []*wire.MsgTx{&tx3},
- })
-
- err = n.ConnectTip(block.Hash(), 11, block.Transactions())
- if err != nil {
- t.Fatalf("Failed to connect block: %v", err)
- }
- if err := n.NotifyHeight(11); err != nil {
- t.Fatalf("unable to dispatch notifications: %v", err)
- }
-
- // We should not receive any event notifications for tx1 since it has
- // already been confirmed.
- select {
- case <-ntfn1.Event.Updates:
- t.Fatal("Received unexpected confirmation update for tx1")
- case txConf := <-ntfn1.Event.Confirmed:
- t.Fatalf("Received unexpected confirmation for tx1: %v", txConf)
- default:
- }
-
- // We should only receive one update for tx2 since the last one,
- // indicating how many confirmations are still left.
- select {
- case numConfsLeft := <-ntfn2.Event.Updates:
- const expected = 0
- if numConfsLeft != expected {
- t.Fatalf("Received incorrect confirmation update: tx2 "+
- "expected %d confirmations left, got %d",
- expected, numConfsLeft)
- }
- default:
- t.Fatal("Expected confirmation update for tx2")
- }
-
- // A confirmation notification for tx2 should be dispatched, as it met
- // its required number of confirmations.
- select {
- case txConf := <-ntfn2.Event.Confirmed:
- assertConfDetails(t, txConf, &txConf2)
- default:
- t.Fatalf("Expected confirmation for tx2")
- }
-}
-
-// TestTxNotifierFutureSpendDispatch tests that the TxNotifier dispatches
-// registered notifications when an outpoint is spent after registration.
-func TestTxNotifierFutureSpendDispatch(t *testing.T) {
- t.Parallel()
-
- hintCache := newMockHintCache()
- n := chainntnfs.NewTxNotifier(
- 10, chainntnfs.ReorgSafetyLimit, hintCache, hintCache,
- )
-
- // We'll start off by registering for a spend notification of an
- // outpoint.
- op := wire.OutPoint{Index: 1}
- ntfn, err := n.RegisterSpend(&op, testRawScript, 1)
- if err != nil {
- t.Fatalf("unable to register spend ntfn: %v", err)
- }
-
- // We should not receive a notification as the outpoint has not been
- // spent yet.
- select {
- case <-ntfn.Event.Spend:
- t.Fatal("received unexpected spend notification")
- default:
- }
-
- // Construct the details of the spending transaction of the outpoint
- // above. We'll include it in the next block, which should trigger a
- // spend notification.
- spendTx := wire.NewMsgTx(2)
- spendTx.AddTxIn(&wire.TxIn{
- PreviousOutPoint: op,
- SignatureScript: testSigScript,
- })
- spendTxHash := spendTx.TxHash()
- block := btcutil.NewBlock(&wire.MsgBlock{
- Transactions: []*wire.MsgTx{spendTx},
- })
- err = n.ConnectTip(block.Hash(), 11, block.Transactions())
- if err != nil {
- t.Fatalf("unable to connect block: %v", err)
- }
- if err := n.NotifyHeight(11); err != nil {
- t.Fatalf("unable to dispatch notifications: %v", err)
- }
-
- expectedSpendDetails := &chainntnfs.SpendDetail{
- SpentOutPoint: &op,
- SpenderTxHash: &spendTxHash,
- SpendingTx: spendTx,
- SpenderInputIndex: 0,
- SpendingHeight: 11,
- }
-
- // Ensure that the details of the notification match as expected.
- select {
- case spendDetails := <-ntfn.Event.Spend:
- assertSpendDetails(t, spendDetails, expectedSpendDetails)
- default:
- t.Fatal("expected to receive spend details")
- }
-
- // Finally, we'll ensure that if the spending transaction has also been
- // spent, then we don't receive another spend notification.
- prevOut := wire.OutPoint{Hash: spendTxHash, Index: 0}
- spendOfSpend := wire.NewMsgTx(2)
- spendOfSpend.AddTxIn(&wire.TxIn{
- PreviousOutPoint: prevOut,
- SignatureScript: testSigScript,
- })
- block = btcutil.NewBlock(&wire.MsgBlock{
- Transactions: []*wire.MsgTx{spendOfSpend},
- })
- err = n.ConnectTip(block.Hash(), 12, block.Transactions())
- if err != nil {
- t.Fatalf("unable to connect block: %v", err)
- }
- if err := n.NotifyHeight(12); err != nil {
- t.Fatalf("unable to dispatch notifications: %v", err)
- }
-
- select {
- case <-ntfn.Event.Spend:
- t.Fatal("received unexpected spend notification")
- default:
- }
-}
-
-// TestTxNotifierFutureConfDispatchReuseSafe tests that the notifier does not
-// misbehave even if two confirmation requests for the same script are issued
-// at different block heights (which means funds are being sent to the same
-// script multiple times).
-func TestTxNotifierFutureConfDispatchReuseSafe(t *testing.T) {
- t.Parallel()
-
- currentBlock := uint32(10)
- hintCache := newMockHintCache()
- n := chainntnfs.NewTxNotifier(
- currentBlock, 2, hintCache, hintCache,
- )
-
- // We'll register a TX that sends to our test script and put it into a
- // block. Additionally we register a notification request for just the
- // script which should also be confirmed with that block.
- tx1 := wire.MsgTx{Version: 1}
- tx1.AddTxOut(&wire.TxOut{PkScript: testRawScript})
- tx1Hash := tx1.TxHash()
- ntfn1, err := n.RegisterConf(&tx1Hash, testRawScript, 1, 1)
- if err != nil {
- t.Fatalf("unable to register ntfn: %v", err)
- }
- scriptNtfn1, err := n.RegisterConf(nil, testRawScript, 1, 1)
- if err != nil {
- t.Fatalf("unable to register ntfn: %v", err)
- }
- block := btcutil.NewBlock(&wire.MsgBlock{
- Transactions: []*wire.MsgTx{&tx1},
- })
- currentBlock++
- err = n.ConnectTip(block.Hash(), currentBlock, block.Transactions())
- if err != nil {
- t.Fatalf("unable to connect block: %v", err)
- }
- if err := n.NotifyHeight(currentBlock); err != nil {
- t.Fatalf("unable to dispatch notifications: %v", err)
- }
-
- // Expect an update and confirmation of TX 1 at this point. We save the
- // confirmation details because we expect to receive the same details
- // for all further registrations.
- var confDetails *chainntnfs.TxConfirmation
- select {
- case <-ntfn1.Event.Updates:
- default:
- t.Fatal("expected update of TX 1")
- }
- select {
- case confDetails = <-ntfn1.Event.Confirmed:
- if confDetails.BlockHeight != currentBlock {
- t.Fatalf("expected TX to be confirmed in latest block")
- }
- default:
- t.Fatal("expected confirmation of TX 1")
- }
-
- // The notification for the script should also have received a
- // confirmation.
- select {
- case <-scriptNtfn1.Event.Updates:
- default:
- t.Fatal("expected update of script ntfn")
- }
- select {
- case details := <-scriptNtfn1.Event.Confirmed:
- assertConfDetails(t, details, confDetails)
- default:
- t.Fatal("expected update of script ntfn")
- }
-
- // Now register a second TX that spends to two outputs with the same
- // script so we have a different TXID. And again register a confirmation
- // for just the script.
- tx2 := wire.MsgTx{Version: 1}
- tx2.AddTxOut(&wire.TxOut{PkScript: testRawScript})
- tx2.AddTxOut(&wire.TxOut{PkScript: testRawScript})
- tx2Hash := tx2.TxHash()
- ntfn2, err := n.RegisterConf(&tx2Hash, testRawScript, 1, 1)
- if err != nil {
- t.Fatalf("unable to register ntfn: %v", err)
- }
- scriptNtfn2, err := n.RegisterConf(nil, testRawScript, 1, 1)
- if err != nil {
- t.Fatalf("unable to register ntfn: %v", err)
- }
- block2 := btcutil.NewBlock(&wire.MsgBlock{
- Transactions: []*wire.MsgTx{&tx2},
- })
- currentBlock++
- err = n.ConnectTip(block2.Hash(), currentBlock, block2.Transactions())
- if err != nil {
- t.Fatalf("unable to connect block: %v", err)
- }
- if err := n.NotifyHeight(currentBlock); err != nil {
- t.Fatalf("unable to dispatch notifications: %v", err)
- }
-
- // Transaction 2 should get a confirmation here too. Since it was
- // a different TXID we wouldn't get the cached details here but the TX
- // should be confirmed right away still.
- select {
- case <-ntfn2.Event.Updates:
- default:
- t.Fatal("expected update of TX 2")
- }
- select {
- case details := <-ntfn2.Event.Confirmed:
- if details.BlockHeight != currentBlock {
- t.Fatalf("expected TX to be confirmed in latest block")
- }
- default:
- t.Fatal("expected update of TX 2")
- }
-
- // The second notification for the script should also have received a
- // confirmation. Since it's the same script, we expect to get the cached
- // details from the first TX back immediately. Nothing should be
- // registered at the notifier for the current block height for that
- // script any more.
- select {
- case <-scriptNtfn2.Event.Updates:
- default:
- t.Fatal("expected update of script ntfn")
- }
- select {
- case details := <-scriptNtfn2.Event.Confirmed:
- assertConfDetails(t, details, confDetails)
- default:
- t.Fatal("expected update of script ntfn")
- }
-
- // Finally, mine a few empty blocks and expect both TXs to be confirmed.
- for currentBlock < 15 {
- block := btcutil.NewBlock(&wire.MsgBlock{})
- currentBlock++
- err = n.ConnectTip(
- block.Hash(), currentBlock, block.Transactions(),
- )
- if err != nil {
- t.Fatalf("unable to connect block: %v", err)
- }
- if err := n.NotifyHeight(currentBlock); err != nil {
- t.Fatalf("unable to dispatch notifications: %v", err)
- }
- }
-
- // Events for both confirmation requests should have been dispatched.
- select {
- case <-ntfn1.Event.Done:
- default:
- t.Fatal("expected notifications for TX 1 to be done")
- }
- select {
- case <-ntfn2.Event.Done:
- default:
- t.Fatal("expected notifications for TX 2 to be done")
- }
-}
-
-// TestTxNotifierHistoricalSpendDispatch tests that the TxNotifier dispatches
-// registered notifications when an outpoint is spent before registration.
-func TestTxNotifierHistoricalSpendDispatch(t *testing.T) {
- t.Parallel()
-
- const startingHeight = 10
-
- hintCache := newMockHintCache()
- n := chainntnfs.NewTxNotifier(
- startingHeight, chainntnfs.ReorgSafetyLimit, hintCache,
- hintCache,
- )
-
- // We'll start by constructing the spending details of the outpoint
- // below.
- spentOutpoint := wire.OutPoint{Index: 1}
- spendTx := wire.NewMsgTx(2)
- spendTx.AddTxIn(&wire.TxIn{
- PreviousOutPoint: spentOutpoint,
- SignatureScript: testSigScript,
- })
- spendTxHash := spendTx.TxHash()
-
- expectedSpendDetails := &chainntnfs.SpendDetail{
- SpentOutPoint: &spentOutpoint,
- SpenderTxHash: &spendTxHash,
- SpendingTx: spendTx,
- SpenderInputIndex: 0,
- SpendingHeight: startingHeight - 1,
- }
-
- // We'll register for a spend notification of the outpoint and ensure
- // that a notification isn't dispatched.
- ntfn, err := n.RegisterSpend(&spentOutpoint, testRawScript, 1)
- if err != nil {
- t.Fatalf("unable to register spend ntfn: %v", err)
- }
-
- select {
- case <-ntfn.Event.Spend:
- t.Fatal("received unexpected spend notification")
- default:
- }
-
- // Because we're interested in testing the case of a historical spend,
- // we'll hand off the spending details of the outpoint to the notifier
- // as it is not possible for it to view historical events in the chain.
- // By doing this, we replicate the functionality of the ChainNotifier.
- err = n.UpdateSpendDetails(
- ntfn.HistoricalDispatch.SpendRequest, expectedSpendDetails,
- )
- if err != nil {
- t.Fatalf("unable to update spend details: %v", err)
- }
-
- // Now that we have the spending details, we should receive a spend
- // notification. We'll ensure that the details match as intended.
- select {
- case spendDetails := <-ntfn.Event.Spend:
- assertSpendDetails(t, spendDetails, expectedSpendDetails)
- default:
- t.Fatalf("expected to receive spend details")
- }
-
- // Finally, we'll ensure that if the spending transaction has also been
- // spent, then we don't receive another spend notification.
- prevOut := wire.OutPoint{Hash: spendTxHash, Index: 0}
- spendOfSpend := wire.NewMsgTx(2)
- spendOfSpend.AddTxIn(&wire.TxIn{
- PreviousOutPoint: prevOut,
- SignatureScript: testSigScript,
- })
- block := btcutil.NewBlock(&wire.MsgBlock{
- Transactions: []*wire.MsgTx{spendOfSpend},
- })
- err = n.ConnectTip(block.Hash(), startingHeight+1, block.Transactions())
- if err != nil {
- t.Fatalf("unable to connect block: %v", err)
- }
- if err := n.NotifyHeight(startingHeight + 1); err != nil {
- t.Fatalf("unable to dispatch notifications: %v", err)
- }
-
- select {
- case <-ntfn.Event.Spend:
- t.Fatal("received unexpected spend notification")
- default:
- }
-}
-
-// TestTxNotifierMultipleHistoricalRescans ensures that we don't attempt to
-// request multiple historical confirmation rescans per transactions.
-func TestTxNotifierMultipleHistoricalConfRescans(t *testing.T) {
- t.Parallel()
-
- const startingHeight = 10
- hintCache := newMockHintCache()
- n := chainntnfs.NewTxNotifier(
- startingHeight, chainntnfs.ReorgSafetyLimit, hintCache,
- hintCache,
- )
-
- // The first registration for a transaction in the notifier should
- // request a historical confirmation rescan as it does not have a
- // historical view of the chain.
- ntfn1, err := n.RegisterConf(&chainntnfs.ZeroHash, testRawScript, 1, 1)
- if err != nil {
- t.Fatalf("unable to register spend ntfn: %v", err)
- }
- if ntfn1.HistoricalDispatch == nil {
- t.Fatal("expected to receive historical dispatch request")
- }
-
- // We'll register another confirmation notification for the same
- // transaction. This should not request a historical confirmation rescan
- // since the first one is still pending.
- ntfn2, err := n.RegisterConf(&chainntnfs.ZeroHash, testRawScript, 1, 1)
- if err != nil {
- t.Fatalf("unable to register spend ntfn: %v", err)
- }
- if ntfn2.HistoricalDispatch != nil {
- t.Fatal("received unexpected historical rescan request")
- }
-
- // Finally, we'll mark the ongoing historical rescan as complete and
- // register another notification. We should also expect not to see a
- // historical rescan request since the confirmation details should be
- // cached.
- confDetails := &chainntnfs.TxConfirmation{
- BlockHeight: startingHeight - 1,
- }
- err = n.UpdateConfDetails(ntfn1.HistoricalDispatch.ConfRequest, confDetails)
- if err != nil {
- t.Fatalf("unable to update conf details: %v", err)
- }
-
- ntfn3, err := n.RegisterConf(&chainntnfs.ZeroHash, testRawScript, 1, 1)
- if err != nil {
- t.Fatalf("unable to register spend ntfn: %v", err)
- }
- if ntfn3.HistoricalDispatch != nil {
- t.Fatal("received unexpected historical rescan request")
- }
-}
-
-// TestTxNotifierMultipleHistoricalRescans ensures that we don't attempt to
-// request multiple historical spend rescans per outpoints.
-func TestTxNotifierMultipleHistoricalSpendRescans(t *testing.T) {
- t.Parallel()
-
- const startingHeight = 10
- hintCache := newMockHintCache()
- n := chainntnfs.NewTxNotifier(
- startingHeight, chainntnfs.ReorgSafetyLimit, hintCache,
- hintCache,
- )
-
- // The first registration for an outpoint in the notifier should request
- // a historical spend rescan as it does not have a historical view of
- // the chain.
- op := wire.OutPoint{Index: 1}
- ntfn1, err := n.RegisterSpend(&op, testRawScript, 1)
- if err != nil {
- t.Fatalf("unable to register spend ntfn: %v", err)
- }
- if ntfn1.HistoricalDispatch == nil {
- t.Fatal("expected to receive historical dispatch request")
- }
-
- // We'll register another spend notification for the same outpoint. This
- // should not request a historical spend rescan since the first one is
- // still pending.
- ntfn2, err := n.RegisterSpend(&op, testRawScript, 1)
- if err != nil {
- t.Fatalf("unable to register spend ntfn: %v", err)
- }
- if ntfn2.HistoricalDispatch != nil {
- t.Fatal("received unexpected historical rescan request")
- }
-
- // Finally, we'll mark the ongoing historical rescan as complete and
- // register another notification. We should also expect not to see a
- // historical rescan request since the confirmation details should be
- // cached.
- spendDetails := &chainntnfs.SpendDetail{
- SpentOutPoint: &op,
- SpenderTxHash: &chainntnfs.ZeroHash,
- SpendingTx: wire.NewMsgTx(2),
- SpenderInputIndex: 0,
- SpendingHeight: startingHeight - 1,
- }
- err = n.UpdateSpendDetails(
- ntfn1.HistoricalDispatch.SpendRequest, spendDetails,
- )
- if err != nil {
- t.Fatalf("unable to update spend details: %v", err)
- }
-
- ntfn3, err := n.RegisterSpend(&op, testRawScript, 1)
- if err != nil {
- t.Fatalf("unable to register spend ntfn: %v", err)
- }
- if ntfn3.HistoricalDispatch != nil {
- t.Fatal("received unexpected historical rescan request")
- }
-}
-
-// TestTxNotifierMultipleHistoricalNtfns ensures that the TxNotifier will only
-// request one rescan for a transaction/outpoint when having multiple client
-// registrations. Once the rescan has completed and retrieved the
-// confirmation/spend details, a notification should be dispatched to _all_
-// clients.
-func TestTxNotifierMultipleHistoricalNtfns(t *testing.T) {
- t.Parallel()
-
- const (
- numNtfns = 5
- startingHeight = 10
- )
-
- hintCache := newMockHintCache()
- n := chainntnfs.NewTxNotifier(
- startingHeight, chainntnfs.ReorgSafetyLimit, hintCache,
- hintCache,
- )
-
- var txid chainhash.Hash
- copy(txid[:], bytes.Repeat([]byte{0x01}, 32))
-
- // We'll start off by registered 5 clients for a confirmation
- // notification on the same transaction.
- confNtfns := make([]*chainntnfs.ConfRegistration, numNtfns)
- for i := uint64(0); i < numNtfns; i++ {
- ntfn, err := n.RegisterConf(&txid, testRawScript, 1, 1)
- if err != nil {
- t.Fatalf("unable to register conf ntfn #%d: %v", i, err)
- }
- confNtfns[i] = ntfn
- }
-
- // Ensure none of them have received the confirmation details.
- for i, ntfn := range confNtfns {
- select {
- case <-ntfn.Event.Confirmed:
- t.Fatalf("request #%d received unexpected confirmation "+
- "notification", i)
- default:
- }
- }
-
- // We'll assume a historical rescan was dispatched and found the
- // following confirmation details. We'll let the notifier know so that
- // it can stop watching at tip.
- expectedConfDetails := &chainntnfs.TxConfirmation{
- BlockHeight: startingHeight - 1,
- Tx: wire.NewMsgTx(1),
- }
- err := n.UpdateConfDetails(
- confNtfns[0].HistoricalDispatch.ConfRequest, expectedConfDetails,
- )
- if err != nil {
- t.Fatalf("unable to update conf details: %v", err)
- }
-
- // With the confirmation details retrieved, each client should now have
- // been notified of the confirmation.
- for i, ntfn := range confNtfns {
- select {
- case confDetails := <-ntfn.Event.Confirmed:
- assertConfDetails(t, confDetails, expectedConfDetails)
- default:
- t.Fatalf("request #%d expected to received "+
- "confirmation notification", i)
- }
- }
-
- // In order to ensure that the confirmation details are properly cached,
- // we'll register another client for the same transaction. We should not
- // see a historical rescan request and the confirmation notification
- // should come through immediately.
- extraConfNtfn, err := n.RegisterConf(&txid, testRawScript, 1, 1)
- if err != nil {
- t.Fatalf("unable to register conf ntfn: %v", err)
- }
- if extraConfNtfn.HistoricalDispatch != nil {
- t.Fatal("received unexpected historical rescan request")
- }
-
- select {
- case confDetails := <-extraConfNtfn.Event.Confirmed:
- assertConfDetails(t, confDetails, expectedConfDetails)
- default:
- t.Fatal("expected to receive spend notification")
- }
-
- // Similarly, we'll do the same thing but for spend notifications.
- op := wire.OutPoint{Index: 1}
- spendNtfns := make([]*chainntnfs.SpendRegistration, numNtfns)
- for i := uint64(0); i < numNtfns; i++ {
- ntfn, err := n.RegisterSpend(&op, testRawScript, 1)
- if err != nil {
- t.Fatalf("unable to register spend ntfn #%d: %v", i, err)
- }
- spendNtfns[i] = ntfn
- }
-
- // Ensure none of them have received the spend details.
- for i, ntfn := range spendNtfns {
- select {
- case <-ntfn.Event.Spend:
- t.Fatalf("request #%d received unexpected spend "+
- "notification", i)
- default:
- }
- }
-
- // We'll assume a historical rescan was dispatched and found the
- // following spend details. We'll let the notifier know so that it can
- // stop watching at tip.
- expectedSpendDetails := &chainntnfs.SpendDetail{
- SpentOutPoint: &op,
- SpenderTxHash: &chainntnfs.ZeroHash,
- SpendingTx: wire.NewMsgTx(2),
- SpenderInputIndex: 0,
- SpendingHeight: startingHeight - 1,
- }
- err = n.UpdateSpendDetails(
- spendNtfns[0].HistoricalDispatch.SpendRequest, expectedSpendDetails,
- )
- if err != nil {
- t.Fatalf("unable to update spend details: %v", err)
- }
-
- // With the spend details retrieved, each client should now have been
- // notified of the spend.
- for i, ntfn := range spendNtfns {
- select {
- case spendDetails := <-ntfn.Event.Spend:
- assertSpendDetails(t, spendDetails, expectedSpendDetails)
- default:
- t.Fatalf("request #%d expected to received spend "+
- "notification", i)
- }
- }
-
- // Finally, in order to ensure that the spend details are properly
- // cached, we'll register another client for the same outpoint. We
- // should not see a historical rescan request and the spend notification
- // should come through immediately.
- extraSpendNtfn, err := n.RegisterSpend(&op, testRawScript, 1)
- if err != nil {
- t.Fatalf("unable to register spend ntfn: %v", err)
- }
- if extraSpendNtfn.HistoricalDispatch != nil {
- t.Fatal("received unexpected historical rescan request")
- }
-
- select {
- case spendDetails := <-extraSpendNtfn.Event.Spend:
- assertSpendDetails(t, spendDetails, expectedSpendDetails)
- default:
- t.Fatal("expected to receive spend notification")
- }
-}
-
-// TestTxNotifierCancelConf ensures that a confirmation notification after a
-// client has canceled their intent to receive one.
-func TestTxNotifierCancelConf(t *testing.T) {
- t.Parallel()
-
- const startingHeight = 10
- hintCache := newMockHintCache()
- n := chainntnfs.NewTxNotifier(startingHeight, 100, hintCache, hintCache)
-
- // We'll register four notification requests. The last three will be
- // canceled.
- tx1 := wire.NewMsgTx(1)
- tx1.AddTxOut(&wire.TxOut{PkScript: testRawScript})
- tx1Hash := tx1.TxHash()
- ntfn1, err := n.RegisterConf(&tx1Hash, testRawScript, 1, 1)
- if err != nil {
- t.Fatalf("unable to register spend ntfn: %v", err)
- }
-
- tx2 := wire.NewMsgTx(2)
- tx2.AddTxOut(&wire.TxOut{PkScript: testRawScript})
- tx2Hash := tx2.TxHash()
- ntfn2, err := n.RegisterConf(&tx2Hash, testRawScript, 1, 1)
- if err != nil {
- t.Fatalf("unable to register spend ntfn: %v", err)
- }
- ntfn3, err := n.RegisterConf(&tx2Hash, testRawScript, 1, 1)
- if err != nil {
- t.Fatalf("unable to register spend ntfn: %v", err)
- }
-
- // This request will have a three block num confs.
- ntfn4, err := n.RegisterConf(&tx2Hash, testRawScript, 3, 1)
- if err != nil {
- t.Fatalf("unable to register spend ntfn: %v", err)
- }
-
- // Extend the chain with a block that will confirm both transactions.
- // This will queue confirmation notifications to dispatch once their
- // respective heights have been met.
- block := btcutil.NewBlock(&wire.MsgBlock{
- Transactions: []*wire.MsgTx{tx1, tx2},
- })
- tx1ConfDetails := &chainntnfs.TxConfirmation{
- BlockHeight: startingHeight + 1,
- BlockHash: block.Hash(),
- TxIndex: 0,
- Tx: tx1,
- }
-
- // Cancel the second notification before connecting the block.
- ntfn2.Event.Cancel()
-
- err = n.ConnectTip(block.Hash(), startingHeight+1, block.Transactions())
- if err != nil {
- t.Fatalf("unable to connect block: %v", err)
- }
-
- // Cancel the third notification before notifying to ensure its queued
- // confirmation notification gets removed as well.
- ntfn3.Event.Cancel()
-
- if err := n.NotifyHeight(startingHeight + 1); err != nil {
- t.Fatalf("unable to dispatch notifications: %v", err)
- }
-
- // The first request should still be active, so we should receive a
- // confirmation notification with the correct details.
- select {
- case confDetails := <-ntfn1.Event.Confirmed:
- assertConfDetails(t, confDetails, tx1ConfDetails)
- default:
- t.Fatalf("expected to receive confirmation notification")
- }
-
- // The second and third, however, should not have. The event's Confirmed
- // channel must have also been closed to indicate the caller that the
- // TxNotifier can no longer fulfill their canceled request.
- select {
- case _, ok := <-ntfn2.Event.Confirmed:
- if ok {
- t.Fatal("expected Confirmed channel to be closed")
- }
- default:
- t.Fatal("expected Confirmed channel to be closed")
- }
- select {
- case _, ok := <-ntfn3.Event.Confirmed:
- if ok {
- t.Fatal("expected Confirmed channel to be closed")
- }
- default:
- t.Fatal("expected Confirmed channel to be closed")
- }
-
- // Connect yet another block.
- block1 := btcutil.NewBlock(&wire.MsgBlock{
- Transactions: []*wire.MsgTx{},
- })
-
- err = n.ConnectTip(block1.Hash(), startingHeight+2, block1.Transactions())
- if err != nil {
- t.Fatalf("unable to connect block: %v", err)
- }
-
- if err := n.NotifyHeight(startingHeight + 2); err != nil {
- t.Fatalf("unable to dispatch notifications: %v", err)
- }
-
- // Since neither it reached the set confirmation height or was
- // canceled, nothing should happen to ntfn4 in this block.
- select {
- case <-ntfn4.Event.Confirmed:
- t.Fatal("expected nothing to happen")
- case <-time.After(10 * time.Millisecond):
- }
-
- // Now cancel the notification.
- ntfn4.Event.Cancel()
- select {
- case _, ok := <-ntfn4.Event.Confirmed:
- if ok {
- t.Fatal("expected Confirmed channel to be closed")
- }
- default:
- t.Fatal("expected Confirmed channel to be closed")
- }
-
- // Finally, confirm a block that would trigger ntfn4 confirmation
- // hadn't it already been canceled.
- block2 := btcutil.NewBlock(&wire.MsgBlock{
- Transactions: []*wire.MsgTx{},
- })
-
- err = n.ConnectTip(block2.Hash(), startingHeight+3, block2.Transactions())
- if err != nil {
- t.Fatalf("unable to connect block: %v", err)
- }
-
- if err := n.NotifyHeight(startingHeight + 3); err != nil {
- t.Fatalf("unable to dispatch notifications: %v", err)
- }
-}
-
-// TestTxNotifierCancelSpend ensures that a spend notification after a client
-// has canceled their intent to receive one.
-func TestTxNotifierCancelSpend(t *testing.T) {
- t.Parallel()
-
- const startingHeight = 10
- hintCache := newMockHintCache()
- n := chainntnfs.NewTxNotifier(
- startingHeight, chainntnfs.ReorgSafetyLimit, hintCache,
- hintCache,
- )
-
- // We'll register two notification requests. Only the second one will be
- // canceled.
- op1 := wire.OutPoint{Index: 1}
- ntfn1, err := n.RegisterSpend(&op1, testRawScript, 1)
- if err != nil {
- t.Fatalf("unable to register spend ntfn: %v", err)
- }
-
- op2 := wire.OutPoint{Index: 2}
- ntfn2, err := n.RegisterSpend(&op2, testRawScript, 1)
- if err != nil {
- t.Fatalf("unable to register spend ntfn: %v", err)
- }
-
- // Construct the spending details of the outpoint and create a dummy
- // block containing it.
- spendTx := wire.NewMsgTx(2)
- spendTx.AddTxIn(&wire.TxIn{
- PreviousOutPoint: op1,
- SignatureScript: testSigScript,
- })
- spendTxHash := spendTx.TxHash()
- expectedSpendDetails := &chainntnfs.SpendDetail{
- SpentOutPoint: &op1,
- SpenderTxHash: &spendTxHash,
- SpendingTx: spendTx,
- SpenderInputIndex: 0,
- SpendingHeight: startingHeight + 1,
- }
-
- block := btcutil.NewBlock(&wire.MsgBlock{
- Transactions: []*wire.MsgTx{spendTx},
- })
-
- // Before extending the notifier's tip with the dummy block above, we'll
- // cancel the second request.
- n.CancelSpend(ntfn2.HistoricalDispatch.SpendRequest, 2)
-
- err = n.ConnectTip(block.Hash(), startingHeight+1, block.Transactions())
- if err != nil {
- t.Fatalf("unable to connect block: %v", err)
- }
- if err := n.NotifyHeight(startingHeight + 1); err != nil {
- t.Fatalf("unable to dispatch notifications: %v", err)
- }
-
- // The first request should still be active, so we should receive a
- // spend notification with the correct spending details.
- select {
- case spendDetails := <-ntfn1.Event.Spend:
- assertSpendDetails(t, spendDetails, expectedSpendDetails)
- default:
- t.Fatalf("expected to receive spend notification")
- }
-
- // The second one, however, should not have. The event's Spend channel
- // must have also been closed to indicate the caller that the TxNotifier
- // can no longer fulfill their canceled request.
- select {
- case _, ok := <-ntfn2.Event.Spend:
- if ok {
- t.Fatal("expected Spend channel to be closed")
- }
- default:
- t.Fatal("expected Spend channel to be closed")
- }
-}
-
-// TestTxNotifierConfReorg ensures that clients are notified of a reorg when a
-// transaction for which they registered a confirmation notification has been
-// reorged out of the chain.
-func TestTxNotifierConfReorg(t *testing.T) {
- t.Parallel()
-
- const (
- tx1NumConfs uint32 = 2
- tx2NumConfs uint32 = 1
- tx3NumConfs uint32 = 2
- )
-
- hintCache := newMockHintCache()
- n := chainntnfs.NewTxNotifier(
- 7, chainntnfs.ReorgSafetyLimit, hintCache, hintCache,
- )
-
- // Tx 1 will be confirmed in block 9 and requires 2 confs.
- tx1 := wire.MsgTx{Version: 1}
- tx1.AddTxOut(&wire.TxOut{PkScript: testRawScript})
- tx1Hash := tx1.TxHash()
- ntfn1, err := n.RegisterConf(&tx1Hash, testRawScript, tx1NumConfs, 1)
- if err != nil {
- t.Fatalf("unable to register ntfn: %v", err)
- }
-
- err = n.UpdateConfDetails(ntfn1.HistoricalDispatch.ConfRequest, nil)
- if err != nil {
- t.Fatalf("unable to deliver conf details: %v", err)
- }
-
- // Tx 2 will be confirmed in block 10 and requires 1 conf.
- tx2 := wire.MsgTx{Version: 2}
- tx2.AddTxOut(&wire.TxOut{PkScript: testRawScript})
- tx2Hash := tx2.TxHash()
- ntfn2, err := n.RegisterConf(&tx2Hash, testRawScript, tx2NumConfs, 1)
- if err != nil {
- t.Fatalf("unable to register ntfn: %v", err)
- }
-
- err = n.UpdateConfDetails(ntfn2.HistoricalDispatch.ConfRequest, nil)
- if err != nil {
- t.Fatalf("unable to deliver conf details: %v", err)
- }
-
- // Tx 3 will be confirmed in block 10 and requires 2 confs.
- tx3 := wire.MsgTx{Version: 3}
- tx3.AddTxOut(&wire.TxOut{PkScript: testRawScript})
- tx3Hash := tx3.TxHash()
- ntfn3, err := n.RegisterConf(&tx3Hash, testRawScript, tx3NumConfs, 1)
- if err != nil {
- t.Fatalf("unable to register ntfn: %v", err)
- }
-
- err = n.UpdateConfDetails(ntfn3.HistoricalDispatch.ConfRequest, nil)
- if err != nil {
- t.Fatalf("unable to deliver conf details: %v", err)
- }
-
- // Sync chain to block 10. Txs 1 & 2 should be confirmed.
- block1 := btcutil.NewBlock(&wire.MsgBlock{
- Transactions: []*wire.MsgTx{&tx1},
- })
- if err := n.ConnectTip(nil, 8, block1.Transactions()); err != nil {
- t.Fatalf("Failed to connect block: %v", err)
- }
- if err := n.NotifyHeight(8); err != nil {
- t.Fatalf("unable to dispatch notifications: %v", err)
- }
- if err := n.ConnectTip(nil, 9, nil); err != nil {
- t.Fatalf("Failed to connect block: %v", err)
- }
- if err := n.NotifyHeight(9); err != nil {
- t.Fatalf("unable to dispatch notifications: %v", err)
- }
-
- block2 := btcutil.NewBlock(&wire.MsgBlock{
- Transactions: []*wire.MsgTx{&tx2, &tx3},
- })
- if err := n.ConnectTip(nil, 10, block2.Transactions()); err != nil {
- t.Fatalf("Failed to connect block: %v", err)
- }
- if err := n.NotifyHeight(10); err != nil {
- t.Fatalf("unable to dispatch notifications: %v", err)
- }
-
- // We should receive two updates for tx1 since it requires two
- // confirmations and it has already met them.
- for i := 0; i < 2; i++ {
- select {
- case <-ntfn1.Event.Updates:
- default:
- t.Fatal("Expected confirmation update for tx1")
- }
- }
-
- // A confirmation notification for tx1 should be dispatched, as it met
- // its required number of confirmations.
- select {
- case <-ntfn1.Event.Confirmed:
- default:
- t.Fatalf("Expected confirmation for tx1")
- }
-
- // We should only receive one update for tx2 since it only requires
- // one confirmation and it already met it.
- select {
- case <-ntfn2.Event.Updates:
- default:
- t.Fatal("Expected confirmation update for tx2")
- }
-
- // A confirmation notification for tx2 should be dispatched, as it met
- // its required number of confirmations.
- select {
- case <-ntfn2.Event.Confirmed:
- default:
- t.Fatalf("Expected confirmation for tx2")
- }
-
- // We should only receive one update for tx3 since it only has one
- // confirmation so far and it requires two.
- select {
- case <-ntfn3.Event.Updates:
- default:
- t.Fatal("Expected confirmation update for tx3")
- }
-
- // A confirmation notification for tx3 should not be dispatched yet, as
- // it requires one more confirmation.
- select {
- case txConf := <-ntfn3.Event.Confirmed:
- t.Fatalf("Received unexpected confirmation for tx3: %v", txConf)
- default:
- }
-
- // The block that included tx2 and tx3 is disconnected and two next
- // blocks without them are connected.
- if err := n.DisconnectTip(10); err != nil {
- t.Fatalf("Failed to connect block: %v", err)
- }
-
- if err := n.ConnectTip(nil, 10, nil); err != nil {
- t.Fatalf("Failed to connect block: %v", err)
- }
- if err := n.NotifyHeight(10); err != nil {
- t.Fatalf("unable to dispatch notifications: %v", err)
- }
-
- if err := n.ConnectTip(nil, 11, nil); err != nil {
- t.Fatalf("Failed to connect block: %v", err)
- }
- if err := n.NotifyHeight(11); err != nil {
- t.Fatalf("unable to dispatch notifications: %v", err)
- }
-
- select {
- case reorgDepth := <-ntfn2.Event.NegativeConf:
- if reorgDepth != 1 {
- t.Fatalf("Incorrect value for negative conf notification: "+
- "expected %d, got %d", 1, reorgDepth)
- }
- default:
- t.Fatalf("Expected negative conf notification for tx1")
- }
-
- // We should not receive any event notifications from all of the
- // transactions because tx1 has already been confirmed and tx2 and tx3
- // have not been included in the chain since the reorg.
- select {
- case <-ntfn1.Event.Updates:
- t.Fatal("Received unexpected confirmation update for tx1")
- case txConf := <-ntfn1.Event.Confirmed:
- t.Fatalf("Received unexpected confirmation for tx1: %v", txConf)
- default:
- }
-
- select {
- case <-ntfn2.Event.Updates:
- t.Fatal("Received unexpected confirmation update for tx2")
- case txConf := <-ntfn2.Event.Confirmed:
- t.Fatalf("Received unexpected confirmation for tx2: %v", txConf)
- default:
- }
-
- select {
- case <-ntfn3.Event.Updates:
- t.Fatal("Received unexpected confirmation update for tx3")
- case txConf := <-ntfn3.Event.Confirmed:
- t.Fatalf("Received unexpected confirmation for tx3: %v", txConf)
- default:
- }
-
- // Now transactions 2 & 3 are re-included in a new block.
- block3 := btcutil.NewBlock(&wire.MsgBlock{
- Transactions: []*wire.MsgTx{&tx2, &tx3},
- })
- block4 := btcutil.NewBlock(&wire.MsgBlock{})
-
- err = n.ConnectTip(block3.Hash(), 12, block3.Transactions())
- if err != nil {
- t.Fatalf("Failed to connect block: %v", err)
- }
- if err := n.NotifyHeight(12); err != nil {
- t.Fatalf("unable to dispatch notifications: %v", err)
- }
-
- err = n.ConnectTip(block4.Hash(), 13, block4.Transactions())
- if err != nil {
- t.Fatalf("Failed to connect block: %v", err)
- }
- if err := n.NotifyHeight(13); err != nil {
- t.Fatalf("unable to dispatch notifications: %v", err)
- }
-
- // We should only receive one update for tx2 since it only requires
- // one confirmation and it already met it.
- select {
- case numConfsLeft := <-ntfn2.Event.Updates:
- const expected = 0
- if numConfsLeft != expected {
- t.Fatalf("Received incorrect confirmation update: tx2 "+
- "expected %d confirmations left, got %d",
- expected, numConfsLeft)
- }
- default:
- t.Fatal("Expected confirmation update for tx2")
- }
-
- // A confirmation notification for tx2 should be dispatched, as it met
- // its required number of confirmations.
- select {
- case txConf := <-ntfn2.Event.Confirmed:
- expectedConf := chainntnfs.TxConfirmation{
- BlockHash: block3.Hash(),
- BlockHeight: 12,
- TxIndex: 0,
- Tx: &tx2,
- }
- assertConfDetails(t, txConf, &expectedConf)
- default:
- t.Fatalf("Expected confirmation for tx2")
- }
-
- // We should receive two updates for tx3 since it requires two
- // confirmations and it has already met them.
- for i := uint32(1); i <= 2; i++ {
- select {
- case numConfsLeft := <-ntfn3.Event.Updates:
- expected := tx3NumConfs - i
- if numConfsLeft != expected {
- t.Fatalf("Received incorrect confirmation update: tx3 "+
- "expected %d confirmations left, got %d",
- expected, numConfsLeft)
- }
- default:
- t.Fatal("Expected confirmation update for tx2")
- }
- }
-
- // A confirmation notification for tx3 should be dispatched, as it met
- // its required number of confirmations.
- select {
- case txConf := <-ntfn3.Event.Confirmed:
- expectedConf := chainntnfs.TxConfirmation{
- BlockHash: block3.Hash(),
- BlockHeight: 12,
- TxIndex: 1,
- Tx: &tx3,
- }
- assertConfDetails(t, txConf, &expectedConf)
- default:
- t.Fatalf("Expected confirmation for tx3")
- }
-}
-
-// TestTxNotifierSpendReorg ensures that clients are notified of a reorg when
-// the spending transaction of an outpoint for which they registered a spend
-// notification for has been reorged out of the chain.
-func TestTxNotifierSpendReorg(t *testing.T) {
- t.Parallel()
-
- const startingHeight = 10
- hintCache := newMockHintCache()
- n := chainntnfs.NewTxNotifier(
- startingHeight, chainntnfs.ReorgSafetyLimit, hintCache,
- hintCache,
- )
-
- // We'll have two outpoints that will be spent throughout the test. The
- // first will be spent and will not experience a reorg, while the second
- // one will.
- op1 := wire.OutPoint{Index: 1}
- spendTx1 := wire.NewMsgTx(2)
- spendTx1.AddTxIn(&wire.TxIn{
- PreviousOutPoint: op1,
- SignatureScript: testSigScript,
- })
- spendTxHash1 := spendTx1.TxHash()
- expectedSpendDetails1 := &chainntnfs.SpendDetail{
- SpentOutPoint: &op1,
- SpenderTxHash: &spendTxHash1,
- SpendingTx: spendTx1,
- SpenderInputIndex: 0,
- SpendingHeight: startingHeight + 1,
- }
-
- op2 := wire.OutPoint{Index: 2}
- spendTx2 := wire.NewMsgTx(2)
- spendTx2.AddTxIn(&wire.TxIn{
- PreviousOutPoint: chainntnfs.ZeroOutPoint,
- SignatureScript: testSigScript,
- })
- spendTx2.AddTxIn(&wire.TxIn{
- PreviousOutPoint: op2,
- SignatureScript: testSigScript,
- })
- spendTxHash2 := spendTx2.TxHash()
-
- // The second outpoint will experience a reorg and get re-spent at a
- // different height, so we'll need to construct the spend details for
- // before and after the reorg.
- expectedSpendDetails2BeforeReorg := chainntnfs.SpendDetail{
- SpentOutPoint: &op2,
- SpenderTxHash: &spendTxHash2,
- SpendingTx: spendTx2,
- SpenderInputIndex: 1,
- SpendingHeight: startingHeight + 2,
- }
-
- // The spend details after the reorg will be exactly the same, except
- // for the spend confirming at the next height.
- expectedSpendDetails2AfterReorg := expectedSpendDetails2BeforeReorg
- expectedSpendDetails2AfterReorg.SpendingHeight++
-
- // We'll register for a spend notification for each outpoint above.
- ntfn1, err := n.RegisterSpend(&op1, testRawScript, 1)
- if err != nil {
- t.Fatalf("unable to register spend ntfn: %v", err)
- }
-
- ntfn2, err := n.RegisterSpend(&op2, testRawScript, 1)
- if err != nil {
- t.Fatalf("unable to register spend ntfn: %v", err)
- }
-
- // We'll extend the chain by connecting a new block at tip. This block
- // will only contain the spending transaction of the first outpoint.
- block1 := btcutil.NewBlock(&wire.MsgBlock{
- Transactions: []*wire.MsgTx{spendTx1},
- })
- err = n.ConnectTip(block1.Hash(), startingHeight+1, block1.Transactions())
- if err != nil {
- t.Fatalf("unable to connect block: %v", err)
- }
- if err := n.NotifyHeight(startingHeight + 1); err != nil {
- t.Fatalf("unable to dispatch notifications: %v", err)
- }
-
- // We should receive a spend notification for the first outpoint with
- // its correct spending details.
- select {
- case spendDetails := <-ntfn1.Event.Spend:
- assertSpendDetails(t, spendDetails, expectedSpendDetails1)
- default:
- t.Fatal("expected to receive spend details")
- }
-
- // We should not, however, receive one for the second outpoint as it has
- // yet to be spent.
- select {
- case <-ntfn2.Event.Spend:
- t.Fatal("received unexpected spend notification")
- default:
- }
-
- // Now, we'll extend the chain again, this time with a block containing
- // the spending transaction of the second outpoint.
- block2 := btcutil.NewBlock(&wire.MsgBlock{
- Transactions: []*wire.MsgTx{spendTx2},
- })
- err = n.ConnectTip(block2.Hash(), startingHeight+2, block2.Transactions())
- if err != nil {
- t.Fatalf("unable to connect block: %v", err)
- }
- if err := n.NotifyHeight(startingHeight + 2); err != nil {
- t.Fatalf("unable to dispatch notifications: %v", err)
- }
-
- // We should not receive another spend notification for the first
- // outpoint.
- select {
- case <-ntfn1.Event.Spend:
- t.Fatal("received unexpected spend notification")
- default:
- }
-
- // We should receive one for the second outpoint.
- select {
- case spendDetails := <-ntfn2.Event.Spend:
- assertSpendDetails(
- t, spendDetails, &expectedSpendDetails2BeforeReorg,
- )
- default:
- t.Fatal("expected to receive spend details")
- }
-
- // Now, to replicate a chain reorg, we'll disconnect the block that
- // contained the spending transaction of the second outpoint.
- if err := n.DisconnectTip(startingHeight + 2); err != nil {
- t.Fatalf("unable to disconnect block: %v", err)
- }
-
- // No notifications should be dispatched for the first outpoint as it
- // was spent at a previous height.
- select {
- case <-ntfn1.Event.Spend:
- t.Fatal("received unexpected spend notification")
- case <-ntfn1.Event.Reorg:
- t.Fatal("received unexpected spend reorg notification")
- default:
- }
-
- // We should receive a reorg notification for the second outpoint.
- select {
- case <-ntfn2.Event.Spend:
- t.Fatal("received unexpected spend notification")
- case <-ntfn2.Event.Reorg:
- default:
- t.Fatal("expected spend reorg notification")
- }
-
- // We'll now extend the chain with an empty block, to ensure that we can
- // properly detect when an outpoint has been re-spent at a later height.
- emptyBlock := btcutil.NewBlock(&wire.MsgBlock{})
- err = n.ConnectTip(
- emptyBlock.Hash(), startingHeight+2, emptyBlock.Transactions(),
- )
- if err != nil {
- t.Fatalf("unable to disconnect block: %v", err)
- }
- if err := n.NotifyHeight(startingHeight + 2); err != nil {
- t.Fatalf("unable to dispatch notifications: %v", err)
- }
-
- // We shouldn't receive notifications for either of the outpoints.
- select {
- case <-ntfn1.Event.Spend:
- t.Fatal("received unexpected spend notification")
- case <-ntfn1.Event.Reorg:
- t.Fatal("received unexpected spend reorg notification")
- case <-ntfn2.Event.Spend:
- t.Fatal("received unexpected spend notification")
- case <-ntfn2.Event.Reorg:
- t.Fatal("received unexpected spend reorg notification")
- default:
- }
-
- // Finally, extend the chain with another block containing the same
- // spending transaction of the second outpoint.
- err = n.ConnectTip(
- block2.Hash(), startingHeight+3, block2.Transactions(),
- )
- if err != nil {
- t.Fatalf("unable to connect block: %v", err)
- }
- if err := n.NotifyHeight(startingHeight + 3); err != nil {
- t.Fatalf("unable to dispatch notifications: %v", err)
- }
-
- // We should now receive a spend notification once again for the second
- // outpoint containing the new spend details.
- select {
- case spendDetails := <-ntfn2.Event.Spend:
- assertSpendDetails(
- t, spendDetails, &expectedSpendDetails2AfterReorg,
- )
- default:
- t.Fatalf("expected to receive spend notification")
- }
-
- // Once again, we should not receive one for the first outpoint.
- select {
- case <-ntfn1.Event.Spend:
- t.Fatal("received unexpected spend notification")
- default:
- }
-}
-
-// TestTxNotifierConfirmHintCache ensures that the height hints for transactions
-// are kept track of correctly with each new block connected/disconnected. This
-// test also asserts that the height hints are not updated until the simulated
-// historical dispatches have returned, and we know the transactions aren't
-// already in the chain.
-func TestTxNotifierConfirmHintCache(t *testing.T) {
- t.Parallel()
-
- const (
- startingHeight = 200
- txDummyHeight = 201
- tx1Height = 202
- tx2Height = 203
- )
-
- // Initialize our TxNotifier instance backed by a height hint cache.
- hintCache := newMockHintCache()
- n := chainntnfs.NewTxNotifier(
- startingHeight, chainntnfs.ReorgSafetyLimit, hintCache,
- hintCache,
- )
-
- // Create two test transactions and register them for notifications.
- tx1 := wire.MsgTx{Version: 1}
- tx1.AddTxOut(&wire.TxOut{PkScript: testRawScript})
- tx1Hash := tx1.TxHash()
- ntfn1, err := n.RegisterConf(&tx1Hash, testRawScript, 1, 1)
- if err != nil {
- t.Fatalf("unable to register tx1: %v", err)
- }
-
- tx2 := wire.MsgTx{Version: 2}
- tx2.AddTxOut(&wire.TxOut{PkScript: testRawScript})
- tx2Hash := tx2.TxHash()
- ntfn2, err := n.RegisterConf(&tx2Hash, testRawScript, 2, 1)
- if err != nil {
- t.Fatalf("unable to register tx2: %v", err)
- }
-
- // Both transactions should not have a height hint set, as RegisterConf
- // should not alter the cache state.
- _, err = hintCache.QueryConfirmHint(ntfn1.HistoricalDispatch.ConfRequest)
- if !chainntnfs.ErrConfirmHintNotFound.Is(err) {
- t.Fatalf("unexpected error when querying for height hint "+
- "want: %v, got %v",
- chainntnfs.ErrConfirmHintNotFound, err)
- }
-
- _, err = hintCache.QueryConfirmHint(ntfn2.HistoricalDispatch.ConfRequest)
- if !chainntnfs.ErrConfirmHintNotFound.Is(err) {
- t.Fatalf("unexpected error when querying for height hint "+
- "want: %v, got %v",
- chainntnfs.ErrConfirmHintNotFound, err)
- }
-
- // Create a new block that will include the dummy transaction and extend
- // the chain.
- txDummy := wire.MsgTx{Version: 3}
- block1 := btcutil.NewBlock(&wire.MsgBlock{
- Transactions: []*wire.MsgTx{&txDummy},
- })
-
- err = n.ConnectTip(block1.Hash(), txDummyHeight, block1.Transactions())
- if err != nil {
- t.Fatalf("Failed to connect block: %v", err)
- }
- if err := n.NotifyHeight(txDummyHeight); err != nil {
- t.Fatalf("unable to dispatch notifications: %v", err)
- }
-
- // Since UpdateConfDetails has not been called for either transaction,
- // the height hints should remain unchanged. This simulates blocks
- // confirming while the historical dispatch is processing the
- // registration.
- hint, err := hintCache.QueryConfirmHint(ntfn1.HistoricalDispatch.ConfRequest)
- if !chainntnfs.ErrConfirmHintNotFound.Is(err) {
- t.Fatalf("unexpected error when querying for height hint "+
- "want: %v, got %v",
- chainntnfs.ErrConfirmHintNotFound, err)
- }
-
- hint, err = hintCache.QueryConfirmHint(ntfn2.HistoricalDispatch.ConfRequest)
- if !chainntnfs.ErrConfirmHintNotFound.Is(err) {
- t.Fatalf("unexpected error when querying for height hint "+
- "want: %v, got %v",
- chainntnfs.ErrConfirmHintNotFound, err)
- }
-
- // Now, update the conf details reporting that the neither txn was found
- // in the historical dispatch.
- err = n.UpdateConfDetails(ntfn1.HistoricalDispatch.ConfRequest, nil)
- if err != nil {
- t.Fatalf("unable to update conf details: %v", err)
- }
- err = n.UpdateConfDetails(ntfn2.HistoricalDispatch.ConfRequest, nil)
- if err != nil {
- t.Fatalf("unable to update conf details: %v", err)
- }
-
- // We'll create another block that will include the first transaction
- // and extend the chain.
- block2 := btcutil.NewBlock(&wire.MsgBlock{
- Transactions: []*wire.MsgTx{&tx1},
- })
-
- err = n.ConnectTip(block2.Hash(), tx1Height, block2.Transactions())
- if err != nil {
- t.Fatalf("Failed to connect block: %v", err)
- }
- if err := n.NotifyHeight(tx1Height); err != nil {
- t.Fatalf("unable to dispatch notifications: %v", err)
- }
-
- // Now that both notifications are waiting at tip for confirmations,
- // they should have their height hints updated to the latest block
- // height.
- hint, err = hintCache.QueryConfirmHint(ntfn1.HistoricalDispatch.ConfRequest)
- if err != nil {
- t.Fatalf("unable to query for hint: %v", err)
- }
- if hint != tx1Height {
- t.Fatalf("expected hint %d, got %d",
- tx1Height, hint)
- }
-
- hint, err = hintCache.QueryConfirmHint(ntfn2.HistoricalDispatch.ConfRequest)
- if err != nil {
- t.Fatalf("unable to query for hint: %v", err)
- }
- if hint != tx1Height {
- t.Fatalf("expected hint %d, got %d",
- tx2Height, hint)
- }
-
- // Next, we'll create another block that will include the second
- // transaction and extend the chain.
- block3 := btcutil.NewBlock(&wire.MsgBlock{
- Transactions: []*wire.MsgTx{&tx2},
- })
-
- err = n.ConnectTip(block3.Hash(), tx2Height, block3.Transactions())
- if err != nil {
- t.Fatalf("Failed to connect block: %v", err)
- }
- if err := n.NotifyHeight(tx2Height); err != nil {
- t.Fatalf("unable to dispatch notifications: %v", err)
- }
-
- // The height hint for the first transaction should remain the same.
- hint, err = hintCache.QueryConfirmHint(ntfn1.HistoricalDispatch.ConfRequest)
- if err != nil {
- t.Fatalf("unable to query for hint: %v", err)
- }
- if hint != tx1Height {
- t.Fatalf("expected hint %d, got %d",
- tx1Height, hint)
- }
-
- // The height hint for the second transaction should now be updated to
- // reflect its confirmation.
- hint, err = hintCache.QueryConfirmHint(ntfn2.HistoricalDispatch.ConfRequest)
- if err != nil {
- t.Fatalf("unable to query for hint: %v", err)
- }
- if hint != tx2Height {
- t.Fatalf("expected hint %d, got %d",
- tx2Height, hint)
- }
-
- // Finally, we'll attempt do disconnect the last block in order to
- // simulate a chain reorg.
- if err := n.DisconnectTip(tx2Height); err != nil {
- t.Fatalf("Failed to disconnect block: %v", err)
- }
-
- // This should update the second transaction's height hint within the
- // cache to the previous height.
- hint, err = hintCache.QueryConfirmHint(ntfn2.HistoricalDispatch.ConfRequest)
- if err != nil {
- t.Fatalf("unable to query for hint: %v", err)
- }
- if hint != tx1Height {
- t.Fatalf("expected hint %d, got %d",
- tx1Height, hint)
- }
-
- // The first transaction's height hint should remain at the original
- // confirmation height.
- hint, err = hintCache.QueryConfirmHint(ntfn2.HistoricalDispatch.ConfRequest)
- if err != nil {
- t.Fatalf("unable to query for hint: %v", err)
- }
- if hint != tx1Height {
- t.Fatalf("expected hint %d, got %d",
- tx1Height, hint)
- }
-}
-
-// TestTxNotifierSpendHintCache ensures that the height hints for outpoints are
-// kept track of correctly with each new block connected/disconnected. This test
-// also asserts that the height hints are not updated until the simulated
-// historical dispatches have returned, and we know the outpoints haven't
-// already been spent in the chain.
-func TestTxNotifierSpendHintCache(t *testing.T) {
- t.Parallel()
-
- const (
- startingHeight = 200
- dummyHeight = 201
- op1Height = 202
- op2Height = 203
- )
-
- // Intiialize our TxNotifier instance backed by a height hint cache.
- hintCache := newMockHintCache()
- n := chainntnfs.NewTxNotifier(
- startingHeight, chainntnfs.ReorgSafetyLimit, hintCache,
- hintCache,
- )
-
- // Create two test outpoints and register them for spend notifications.
- op1 := wire.OutPoint{Index: 1}
- ntfn1, err := n.RegisterSpend(&op1, testRawScript, 1)
- if err != nil {
- t.Fatalf("unable to register spend for op1: %v", err)
- }
- op2 := wire.OutPoint{Index: 2}
- ntfn2, err := n.RegisterSpend(&op2, testRawScript, 1)
- if err != nil {
- t.Fatalf("unable to register spend for op2: %v", err)
- }
-
- // Both outpoints should not have a spend hint set upon registration, as
- // we must first determine whether they have already been spent in the
- // chain.
- _, err = hintCache.QuerySpendHint(ntfn1.HistoricalDispatch.SpendRequest)
- if !chainntnfs.ErrSpendHintNotFound.Is(err) {
- t.Fatalf("unexpected error when querying for height hint "+
- "expected: %v, got %v", chainntnfs.ErrSpendHintNotFound,
- err)
- }
- _, err = hintCache.QuerySpendHint(ntfn2.HistoricalDispatch.SpendRequest)
- if !chainntnfs.ErrSpendHintNotFound.Is(err) {
- t.Fatalf("unexpected error when querying for height hint "+
- "expected: %v, got %v", chainntnfs.ErrSpendHintNotFound,
- err)
- }
-
- // Create a new empty block and extend the chain.
- emptyBlock := btcutil.NewBlock(&wire.MsgBlock{})
- err = n.ConnectTip(
- emptyBlock.Hash(), dummyHeight, emptyBlock.Transactions(),
- )
- if err != nil {
- t.Fatalf("unable to connect block: %v", err)
- }
- if err := n.NotifyHeight(dummyHeight); err != nil {
- t.Fatalf("unable to dispatch notifications: %v", err)
- }
-
- // Since we haven't called UpdateSpendDetails on any of the test
- // outpoints, this implies that there is a still a pending historical
- // rescan for them, so their spend hints should not be created/updated.
- _, err = hintCache.QuerySpendHint(ntfn1.HistoricalDispatch.SpendRequest)
- if !chainntnfs.ErrSpendHintNotFound.Is(err) {
- t.Fatalf("unexpected error when querying for height hint "+
- "expected: %v, got %v", chainntnfs.ErrSpendHintNotFound,
- err)
- }
- _, err = hintCache.QuerySpendHint(ntfn2.HistoricalDispatch.SpendRequest)
- if !chainntnfs.ErrSpendHintNotFound.Is(err) {
- t.Fatalf("unexpected error when querying for height hint "+
- "expected: %v, got %v", chainntnfs.ErrSpendHintNotFound,
- err)
- }
-
- // Now, we'll simulate that their historical rescans have finished by
- // calling UpdateSpendDetails. This should allow their spend hints to be
- // updated upon every block connected/disconnected.
- err = n.UpdateSpendDetails(ntfn1.HistoricalDispatch.SpendRequest, nil)
- if err != nil {
- t.Fatalf("unable to update spend details: %v", err)
- }
- err = n.UpdateSpendDetails(ntfn2.HistoricalDispatch.SpendRequest, nil)
- if err != nil {
- t.Fatalf("unable to update spend details: %v", err)
- }
-
- // We'll create a new block that only contains the spending transaction
- // of the first outpoint.
- spendTx1 := wire.NewMsgTx(2)
- spendTx1.AddTxIn(&wire.TxIn{
- PreviousOutPoint: op1,
- SignatureScript: testSigScript,
- })
- block1 := btcutil.NewBlock(&wire.MsgBlock{
- Transactions: []*wire.MsgTx{spendTx1},
- })
- err = n.ConnectTip(block1.Hash(), op1Height, block1.Transactions())
- if err != nil {
- t.Fatalf("unable to connect block: %v", err)
- }
- if err := n.NotifyHeight(op1Height); err != nil {
- t.Fatalf("unable to dispatch notifications: %v", err)
- }
-
- // Both outpoints should have their spend hints reflect the height of
- // the new block being connected due to the first outpoint being spent
- // at this height, and the second outpoint still being unspent.
- op1Hint, err := hintCache.QuerySpendHint(ntfn1.HistoricalDispatch.SpendRequest)
- if err != nil {
- t.Fatalf("unable to query for spend hint of op1: %v", err)
- }
- if op1Hint != op1Height {
- t.Fatalf("expected hint %d, got %d", op1Height, op1Hint)
- }
- op2Hint, err := hintCache.QuerySpendHint(ntfn2.HistoricalDispatch.SpendRequest)
- if err != nil {
- t.Fatalf("unable to query for spend hint of op2: %v", err)
- }
- if op2Hint != op1Height {
- t.Fatalf("expected hint %d, got %d", op1Height, op2Hint)
- }
-
- // Then, we'll create another block that spends the second outpoint.
- spendTx2 := wire.NewMsgTx(2)
- spendTx2.AddTxIn(&wire.TxIn{
- PreviousOutPoint: op2,
- SignatureScript: testSigScript,
- })
- block2 := btcutil.NewBlock(&wire.MsgBlock{
- Transactions: []*wire.MsgTx{spendTx2},
- })
- err = n.ConnectTip(block2.Hash(), op2Height, block2.Transactions())
- if err != nil {
- t.Fatalf("unable to connect block: %v", err)
- }
- if err := n.NotifyHeight(op2Height); err != nil {
- t.Fatalf("unable to dispatch notifications: %v", err)
- }
-
- // Only the second outpoint should have its spend hint updated due to
- // being spent within the new block. The first outpoint's spend hint
- // should remain the same as it's already been spent before.
- op1Hint, err = hintCache.QuerySpendHint(ntfn1.HistoricalDispatch.SpendRequest)
- if err != nil {
- t.Fatalf("unable to query for spend hint of op1: %v", err)
- }
- if op1Hint != op1Height {
- t.Fatalf("expected hint %d, got %d", op1Height, op1Hint)
- }
- op2Hint, err = hintCache.QuerySpendHint(ntfn2.HistoricalDispatch.SpendRequest)
- if err != nil {
- t.Fatalf("unable to query for spend hint of op2: %v", err)
- }
- if op2Hint != op2Height {
- t.Fatalf("expected hint %d, got %d", op2Height, op2Hint)
- }
-
- // Finally, we'll attempt do disconnect the last block in order to
- // simulate a chain reorg.
- if err := n.DisconnectTip(op2Height); err != nil {
- t.Fatalf("unable to disconnect block: %v", err)
- }
-
- // This should update the second outpoint's spend hint within the cache
- // to the previous height, as that's where its spending transaction was
- // included in within the chain. The first outpoint's spend hint should
- // remain the same.
- op1Hint, err = hintCache.QuerySpendHint(ntfn1.HistoricalDispatch.SpendRequest)
- if err != nil {
- t.Fatalf("unable to query for spend hint of op1: %v", err)
- }
- if op1Hint != op1Height {
- t.Fatalf("expected hint %d, got %d", op1Height, op1Hint)
- }
- op2Hint, err = hintCache.QuerySpendHint(ntfn2.HistoricalDispatch.SpendRequest)
- if err != nil {
- t.Fatalf("unable to query for spend hint of op2: %v", err)
- }
- if op2Hint != op1Height {
- t.Fatalf("expected hint %d, got %d", op1Height, op2Hint)
- }
-}
-
-// TestTxNotifierSpendHinthistoricalRescan checks that the height hints and
-// spend notifications behave as expected when a spend is found at tip during a
-// historical rescan.
-func TestTxNotifierSpendDuringHistoricalRescan(t *testing.T) {
- t.Parallel()
-
- const (
- startingHeight = 200
- reorgSafety = 10
- )
-
- // Intiialize our TxNotifier instance backed by a height hint cache.
- hintCache := newMockHintCache()
- n := chainntnfs.NewTxNotifier(
- startingHeight, reorgSafety, hintCache, hintCache,
- )
-
- // Create a test outpoint and register it for spend notifications.
- op1 := wire.OutPoint{Index: 1}
- ntfn1, err := n.RegisterSpend(&op1, testRawScript, 1)
- if err != nil {
- t.Fatalf("unable to register spend for op1: %v", err)
- }
-
- // A historical rescan should be initiated from the height hint to the
- // current height.
- if ntfn1.HistoricalDispatch.StartHeight != 1 {
- t.Fatalf("expected historical dispatch to start at height hint")
- }
-
- if ntfn1.HistoricalDispatch.EndHeight != startingHeight {
- t.Fatalf("expected historical dispatch to end at current height")
- }
-
- // It should not have a spend hint set upon registration, as we must
- // first determine whether it has already been spent in the chain.
- _, err = hintCache.QuerySpendHint(ntfn1.HistoricalDispatch.SpendRequest)
- if !chainntnfs.ErrSpendHintNotFound.Is(err) {
- t.Fatalf("unexpected error when querying for height hint "+
- "expected: %v, got %v", chainntnfs.ErrSpendHintNotFound,
- err)
- }
-
- // Create a new empty block and extend the chain.
- height := uint32(startingHeight) + 1
- emptyBlock := btcutil.NewBlock(&wire.MsgBlock{})
- err = n.ConnectTip(
- emptyBlock.Hash(), height, emptyBlock.Transactions(),
- )
- if err != nil {
- t.Fatalf("unable to connect block: %v", err)
- }
- if err := n.NotifyHeight(height); err != nil {
- t.Fatalf("unable to dispatch notifications: %v", err)
- }
-
- // Since we haven't called UpdateSpendDetails yet, there should be no
- // spend hint found.
- _, err = hintCache.QuerySpendHint(ntfn1.HistoricalDispatch.SpendRequest)
- if !chainntnfs.ErrSpendHintNotFound.Is(err) {
- t.Fatalf("unexpected error when querying for height hint "+
- "expected: %v, got %v", chainntnfs.ErrSpendHintNotFound,
- err)
- }
-
- // Simulate a bunch of blocks being mined while the historical rescan
- // is still in progress. We make sure to not mine more than reorgSafety
- // blocks after the spend, since it will be forgotten then.
- var spendHeight uint32
- for i := 0; i < reorgSafety; i++ {
- height++
-
- // Let the outpoint we are watching be spent midway.
- var block *btcutil.Block
- if i == 5 {
- // We'll create a new block that only contains the
- // spending transaction of the outpoint.
- spendTx1 := wire.NewMsgTx(2)
- spendTx1.AddTxIn(&wire.TxIn{
- PreviousOutPoint: op1,
- SignatureScript: testSigScript,
- })
- block = btcutil.NewBlock(&wire.MsgBlock{
- Transactions: []*wire.MsgTx{spendTx1},
- })
- spendHeight = height
- } else {
- // Otherwise we just create an empty block.
- block = btcutil.NewBlock(&wire.MsgBlock{})
- }
-
- err = n.ConnectTip(
- block.Hash(), height, block.Transactions(),
- )
- if err != nil {
- t.Fatalf("unable to connect block: %v", err)
- }
- if err := n.NotifyHeight(height); err != nil {
- t.Fatalf("unable to dispatch notifications: %v", err)
- }
- }
-
- // Check that the height hint was set to the spending block.
- op1Hint, err := hintCache.QuerySpendHint(
- ntfn1.HistoricalDispatch.SpendRequest,
- )
- if err != nil {
- t.Fatalf("unable to query for spend hint of op1: %v", err)
- }
- if op1Hint != spendHeight {
- t.Fatalf("expected hint %d, got %d", spendHeight, op1Hint)
- }
-
- // We should be getting notified about the spend at this point.
- select {
- case <-ntfn1.Event.Spend:
- default:
- t.Fatal("expected to receive spend notification")
- }
-
- // Now, we'll simulate that the historical rescan finished by
- // calling UpdateSpendDetails. Since a the spend actually happened at
- // tip while the rescan was in progress, the height hint should not be
- // updated to the latest height, but stay at the spend height.
- err = n.UpdateSpendDetails(ntfn1.HistoricalDispatch.SpendRequest, nil)
- if err != nil {
- t.Fatalf("unable to update spend details: %v", err)
- }
-
- op1Hint, err = hintCache.QuerySpendHint(
- ntfn1.HistoricalDispatch.SpendRequest,
- )
- if err != nil {
- t.Fatalf("unable to query for spend hint of op1: %v", err)
- }
- if op1Hint != spendHeight {
- t.Fatalf("expected hint %d, got %d", spendHeight, op1Hint)
- }
-
- // Then, we'll create another block that spends a second outpoint.
- op2 := wire.OutPoint{Index: 2}
- spendTx2 := wire.NewMsgTx(2)
- spendTx2.AddTxIn(&wire.TxIn{
- PreviousOutPoint: op2,
- SignatureScript: testSigScript,
- })
- height++
- block2 := btcutil.NewBlock(&wire.MsgBlock{
- Transactions: []*wire.MsgTx{spendTx2},
- })
- err = n.ConnectTip(block2.Hash(), height, block2.Transactions())
- if err != nil {
- t.Fatalf("unable to connect block: %v", err)
- }
- if err := n.NotifyHeight(height); err != nil {
- t.Fatalf("unable to dispatch notifications: %v", err)
- }
-
- // The outpoint's spend hint should remain the same as it's already
- // been spent before.
- op1Hint, err = hintCache.QuerySpendHint(ntfn1.HistoricalDispatch.SpendRequest)
- if err != nil {
- t.Fatalf("unable to query for spend hint of op1: %v", err)
- }
- if op1Hint != spendHeight {
- t.Fatalf("expected hint %d, got %d", spendHeight, op1Hint)
- }
-
- // Now mine enough blocks for the spend notification to be forgotten.
- for i := 0; i < 2*reorgSafety; i++ {
- height++
- block := btcutil.NewBlock(&wire.MsgBlock{})
-
- err := n.ConnectTip(
- block.Hash(), height, block.Transactions(),
- )
- if err != nil {
- t.Fatalf("unable to connect block: %v", err)
- }
- if err := n.NotifyHeight(height); err != nil {
- t.Fatalf("unable to dispatch notifications: %v", err)
- }
- }
-
- // Attempting to update spend details at this point should fail, since
- // the spend request should be removed. This is to ensure the height
- // hint won't be overwritten if the historical rescan finishes after
- // the spend request has been notified and removed because it has
- // matured.
- err = n.UpdateSpendDetails(ntfn1.HistoricalDispatch.SpendRequest, nil)
- if err == nil {
- t.Fatalf("expcted updating spend details to fail")
- }
-
- // Finally, check that the height hint is still there, unchanged.
- op1Hint, err = hintCache.QuerySpendHint(ntfn1.HistoricalDispatch.SpendRequest)
- if err != nil {
- t.Fatalf("unable to query for spend hint of op1: %v", err)
- }
- if op1Hint != spendHeight {
- t.Fatalf("expected hint %d, got %d", spendHeight, op1Hint)
- }
-}
-
-// TestTxNotifierNtfnDone ensures that a notification is sent to registered
-// clients through the Done channel once the notification request is no longer
-// under the risk of being reorged out of the chain.
-func TestTxNotifierNtfnDone(t *testing.T) {
- t.Parallel()
-
- hintCache := newMockHintCache()
- const reorgSafetyLimit = 100
- n := chainntnfs.NewTxNotifier(10, reorgSafetyLimit, hintCache, hintCache)
-
- // We'll start by creating two notification requests: one confirmation
- // and one spend.
- confNtfn, err := n.RegisterConf(&chainntnfs.ZeroHash, testRawScript, 1, 1)
- if err != nil {
- t.Fatalf("unable to register conf ntfn: %v", err)
- }
- spendNtfn, err := n.RegisterSpend(&chainntnfs.ZeroOutPoint, testRawScript, 1)
- if err != nil {
- t.Fatalf("unable to register spend: %v", err)
- }
-
- // We'll create two transactions that will satisfy the notification
- // requests above and include them in the next block of the chain.
- tx := wire.NewMsgTx(1)
- tx.AddTxOut(&wire.TxOut{PkScript: testRawScript})
- spendTx := wire.NewMsgTx(1)
- spendTx.AddTxIn(&wire.TxIn{
- PreviousOutPoint: wire.OutPoint{Index: 1},
- SignatureScript: testSigScript,
- })
- block := btcutil.NewBlock(&wire.MsgBlock{
- Transactions: []*wire.MsgTx{tx, spendTx},
- })
-
- err = n.ConnectTip(block.Hash(), 11, block.Transactions())
- if err != nil {
- t.Fatalf("unable to connect block: %v", err)
- }
- if err := n.NotifyHeight(11); err != nil {
- t.Fatalf("unable to dispatch notifications: %v", err)
- }
-
- // With the chain extended, we should see notifications dispatched for
- // both requests.
- select {
- case <-confNtfn.Event.Confirmed:
- default:
- t.Fatal("expected to receive confirmation notification")
- }
-
- select {
- case <-spendNtfn.Event.Spend:
- default:
- t.Fatal("expected to receive spend notification")
- }
-
- // The done notifications should not be dispatched yet as the requests
- // are still under the risk of being reorged out the chain.
- select {
- case <-confNtfn.Event.Done:
- t.Fatal("received unexpected done notification for confirmation")
- case <-spendNtfn.Event.Done:
- t.Fatal("received unexpected done notification for spend")
- default:
- }
-
- // Now, we'll disconnect the block at tip to simulate a reorg. The reorg
- // notifications should be dispatched to the respective clients.
- if err := n.DisconnectTip(11); err != nil {
- t.Fatalf("unable to disconnect block: %v", err)
- }
-
- select {
- case <-confNtfn.Event.NegativeConf:
- default:
- t.Fatal("expected to receive reorg notification for confirmation")
- }
-
- select {
- case <-spendNtfn.Event.Reorg:
- default:
- t.Fatal("expected to receive reorg notification for spend")
- }
-
- // We'll reconnect the block that satisfies both of these requests.
- // We should see notifications dispatched for both once again.
- err = n.ConnectTip(block.Hash(), 11, block.Transactions())
- if err != nil {
- t.Fatalf("unable to connect block: %v", err)
- }
- if err := n.NotifyHeight(11); err != nil {
- t.Fatalf("unable to dispatch notifications: %v", err)
- }
-
- select {
- case <-confNtfn.Event.Confirmed:
- default:
- t.Fatal("expected to receive confirmation notification")
- }
-
- select {
- case <-spendNtfn.Event.Spend:
- default:
- t.Fatal("expected to receive spend notification")
- }
-
- // Finally, we'll extend the chain with blocks until the requests are no
- // longer under the risk of being reorged out of the chain. We should
- // expect the done notifications to be dispatched.
- nextHeight := uint32(12)
- for i := nextHeight; i < nextHeight+reorgSafetyLimit; i++ {
- dummyBlock := btcutil.NewBlock(&wire.MsgBlock{})
- if err := n.ConnectTip(dummyBlock.Hash(), i, nil); err != nil {
- t.Fatalf("unable to connect block: %v", err)
- }
- }
-
- select {
- case <-confNtfn.Event.Done:
- default:
- t.Fatal("expected to receive done notification for confirmation")
- }
-
- select {
- case <-spendNtfn.Event.Done:
- default:
- t.Fatal("expected to receive done notification for spend")
- }
-}
-
-// TestTxNotifierTearDown ensures that the TxNotifier properly alerts clients
-// that it is shutting down and will be unable to deliver notifications.
-func TestTxNotifierTearDown(t *testing.T) {
- t.Parallel()
-
- hintCache := newMockHintCache()
- n := chainntnfs.NewTxNotifier(
- 10, chainntnfs.ReorgSafetyLimit, hintCache, hintCache,
- )
-
- // To begin the test, we'll register for a confirmation and spend
- // notification.
- confNtfn, err := n.RegisterConf(&chainntnfs.ZeroHash, testRawScript, 1, 1)
- if err != nil {
- t.Fatalf("unable to register conf ntfn: %v", err)
- }
- spendNtfn, err := n.RegisterSpend(&chainntnfs.ZeroOutPoint, testRawScript, 1)
- if err != nil {
- t.Fatalf("unable to register spend ntfn: %v", err)
- }
-
- // With the notifications registered, we'll now tear down the notifier.
- // The notification channels should be closed for notifications, whether
- // they have been dispatched or not, so we should not expect to receive
- // any more updates.
- n.TearDown()
-
- select {
- case _, ok := <-confNtfn.Event.Confirmed:
- if ok {
- t.Fatal("expected closed Confirmed channel for conf ntfn")
- }
- case _, ok := <-confNtfn.Event.Updates:
- if ok {
- t.Fatal("expected closed Updates channel for conf ntfn")
- }
- case _, ok := <-confNtfn.Event.NegativeConf:
- if ok {
- t.Fatal("expected closed NegativeConf channel for conf ntfn")
- }
- case _, ok := <-spendNtfn.Event.Spend:
- if ok {
- t.Fatal("expected closed Spend channel for spend ntfn")
- }
- case _, ok := <-spendNtfn.Event.Reorg:
- if ok {
- t.Fatalf("expected closed Reorg channel for spend ntfn")
- }
- default:
- t.Fatalf("expected closed notification channels for all ntfns")
- }
-
- // Now that the notifier is torn down, we should no longer be able to
- // register notification requests.
- _, err = n.RegisterConf(&chainntnfs.ZeroHash, testRawScript, 1, 1)
- if err == nil {
- t.Fatal("expected confirmation registration to fail")
- }
- _, err = n.RegisterSpend(&chainntnfs.ZeroOutPoint, testRawScript, 1)
- if err == nil {
- t.Fatal("expected spend registration to fail")
- }
-}
-
-func assertConfDetails(t *testing.T, result, expected *chainntnfs.TxConfirmation) {
- t.Helper()
-
- if result.BlockHeight != expected.BlockHeight {
- t.Fatalf("Incorrect block height in confirmation details: "+
- "expected %d, got %d", expected.BlockHeight,
- result.BlockHeight)
- }
- if !result.BlockHash.IsEqual(expected.BlockHash) {
- t.Fatalf("Incorrect block hash in confirmation details: "+
- "expected %d, got %d", expected.BlockHash,
- result.BlockHash)
- }
- if result.TxIndex != expected.TxIndex {
- t.Fatalf("Incorrect tx index in confirmation details: "+
- "expected %d, got %d", expected.TxIndex, result.TxIndex)
- }
- if result.Tx.TxHash() != expected.Tx.TxHash() {
- t.Fatalf("expected tx hash %v, got %v", expected.Tx.TxHash(),
- result.Tx.TxHash())
- }
-}
-
-func assertSpendDetails(t *testing.T, result, expected *chainntnfs.SpendDetail) {
- t.Helper()
-
- if *result.SpentOutPoint != *expected.SpentOutPoint {
- t.Fatalf("expected spent outpoint %v, got %v",
- expected.SpentOutPoint, result.SpentOutPoint)
- }
- if !result.SpenderTxHash.IsEqual(expected.SpenderTxHash) {
- t.Fatalf("expected spender tx hash %v, got %v",
- expected.SpenderTxHash, result.SpenderTxHash)
- }
- if result.SpenderInputIndex != expected.SpenderInputIndex {
- t.Fatalf("expected spender input index %d, got %d",
- expected.SpenderInputIndex, result.SpenderInputIndex)
- }
- if result.SpendingHeight != expected.SpendingHeight {
- t.Fatalf("expected spending height %d, got %d",
- expected.SpendingHeight, result.SpendingHeight)
- }
-}
diff --git a/lnd/chainreg/chaincode.go b/lnd/chainreg/chaincode.go
deleted file mode 100644
index 5a75865d..00000000
--- a/lnd/chainreg/chaincode.go
+++ /dev/null
@@ -1,29 +0,0 @@
-package chainreg
-
-// ChainCode is an enum-like structure for keeping track of the chains
-// currently supported within lnd.
-type ChainCode uint32
-
-const (
- // BitcoinChain is Bitcoin's chain.
- BitcoinChain ChainCode = iota
-
- // LitecoinChain is Litecoin's chain.
- LitecoinChain
-
- PktChain
-)
-
-// String returns a string representation of the target ChainCode.
-func (c ChainCode) String() string {
- switch c {
- case BitcoinChain:
- return "bitcoin"
- case LitecoinChain:
- return "litecoin"
- case PktChain:
- return "pkt"
- default:
- return "kekcoin"
- }
-}
diff --git a/lnd/chainreg/chainparams.go b/lnd/chainreg/chainparams.go
deleted file mode 100644
index 69e732c9..00000000
--- a/lnd/chainreg/chainparams.go
+++ /dev/null
@@ -1,153 +0,0 @@
-package chainreg
-
-import (
- litecoinCfg "github.com/ltcsuite/ltcd/chaincfg"
- litecoinWire "github.com/ltcsuite/ltcd/wire"
- "github.com/pkt-cash/pktd/chaincfg"
- bitcoinCfg "github.com/pkt-cash/pktd/chaincfg"
- "github.com/pkt-cash/pktd/chaincfg/chainhash"
- "github.com/pkt-cash/pktd/lnd/keychain"
- "github.com/pkt-cash/pktd/wire/protocol"
-)
-
-// BitcoinNetParams couples the p2p parameters of a network with the
-// corresponding RPC port of a daemon running on the particular network.
-type BitcoinNetParams struct {
- *bitcoinCfg.Params
- RPCPort string
- CoinType uint32
-}
-
-// LitecoinNetParams couples the p2p parameters of a network with the
-// corresponding RPC port of a daemon running on the particular network.
-type LitecoinNetParams struct {
- *litecoinCfg.Params
- RPCPort string
- CoinType uint32
-}
-
-// BitcoinTestNetParams contains parameters specific to the 3rd version of the
-// test network.
-var BitcoinTestNetParams = BitcoinNetParams{
- Params: &bitcoinCfg.TestNet3Params,
- RPCPort: "18334",
- CoinType: keychain.CoinTypeTestnet,
-}
-
-// BitcoinMainNetParams contains parameters specific to the current Bitcoin
-// mainnet.
-var BitcoinMainNetParams = BitcoinNetParams{
- Params: &bitcoinCfg.MainNetParams,
- RPCPort: "8334",
- CoinType: keychain.CoinTypeBitcoin,
-}
-
-// BitcoinSimNetParams contains parameters specific to the simulation test
-// network.
-var BitcoinSimNetParams = BitcoinNetParams{
- Params: &bitcoinCfg.SimNetParams,
- RPCPort: "18556",
- CoinType: keychain.CoinTypeTestnet,
-}
-
-// LitecoinSimNetParams contains parameters specific to the simulation test
-// network.
-var LitecoinSimNetParams = LitecoinNetParams{
- Params: &litecoinCfg.TestNet4Params,
- RPCPort: "18556",
- CoinType: keychain.CoinTypeTestnet,
-}
-
-// LitecoinTestNetParams contains parameters specific to the 4th version of the
-// test network.
-var LitecoinTestNetParams = LitecoinNetParams{
- Params: &litecoinCfg.TestNet4Params,
- RPCPort: "19334",
- CoinType: keychain.CoinTypeTestnet,
-}
-
-// LitecoinMainNetParams contains the parameters specific to the current
-// Litecoin mainnet.
-var LitecoinMainNetParams = LitecoinNetParams{
- Params: &litecoinCfg.MainNetParams,
- RPCPort: "9334",
- CoinType: keychain.CoinTypeLitecoin,
-}
-
-// LitecoinRegTestNetParams contains parameters specific to a local litecoin
-// regtest network.
-var LitecoinRegTestNetParams = LitecoinNetParams{
- Params: &litecoinCfg.RegressionNetParams,
- RPCPort: "18334",
- CoinType: keychain.CoinTypeTestnet,
-}
-
-// BitcoinRegTestNetParams contains parameters specific to a local bitcoin
-// regtest network.
-var BitcoinRegTestNetParams = BitcoinNetParams{
- Params: &bitcoinCfg.RegressionNetParams,
- RPCPort: "18334",
- CoinType: keychain.CoinTypeTestnet,
-}
-
-// BitcoinMainNetParams contains parameters specific to the current Bitcoin
-// mainnet.
-var PktMainNetParams = BitcoinNetParams{
- Params: &bitcoinCfg.PktMainNetParams,
- RPCPort: "8334",
- CoinType: keychain.CoinTypeBitcoin,
-}
-
-// ApplyLitecoinParams applies the relevant chain configuration parameters that
-// differ for litecoin to the chain parameters typed for btcsuite derivation.
-// This function is used in place of using something like interface{} to
-// abstract over _which_ chain (or fork) the parameters are for.
-func ApplyLitecoinParams(params *BitcoinNetParams,
- litecoinParams *LitecoinNetParams) {
-
- params.Name = litecoinParams.Name
- params.Net = protocol.BitcoinNet(litecoinParams.Net)
- params.DefaultPort = litecoinParams.DefaultPort
- params.CoinbaseMaturity = litecoinParams.CoinbaseMaturity
-
- copy(params.GenesisHash[:], litecoinParams.GenesisHash[:])
-
- // Address encoding magics
- params.PubKeyHashAddrID = litecoinParams.PubKeyHashAddrID
- params.ScriptHashAddrID = litecoinParams.ScriptHashAddrID
- params.PrivateKeyID = litecoinParams.PrivateKeyID
- params.WitnessPubKeyHashAddrID = litecoinParams.WitnessPubKeyHashAddrID
- params.WitnessScriptHashAddrID = litecoinParams.WitnessScriptHashAddrID
- params.Bech32HRPSegwit = litecoinParams.Bech32HRPSegwit
-
- copy(params.HDPrivateKeyID[:], litecoinParams.HDPrivateKeyID[:])
- copy(params.HDPublicKeyID[:], litecoinParams.HDPublicKeyID[:])
-
- params.HDCoinType = litecoinParams.HDCoinType
-
- checkPoints := make([]chaincfg.Checkpoint, len(litecoinParams.Checkpoints))
- for i := 0; i < len(litecoinParams.Checkpoints); i++ {
- var chainHash chainhash.Hash
- copy(chainHash[:], litecoinParams.Checkpoints[i].Hash[:])
-
- checkPoints[i] = chaincfg.Checkpoint{
- Height: litecoinParams.Checkpoints[i].Height,
- Hash: &chainHash,
- }
- }
- params.Checkpoints = checkPoints
-
- params.RPCPort = litecoinParams.RPCPort
- params.CoinType = litecoinParams.CoinType
-}
-
-// IsTestnet tests if the givern params correspond to a testnet
-// parameter configuration.
-func IsTestnet(params *BitcoinNetParams) bool {
- switch params.Params.Net {
- case protocol.BitcoinNet(litecoinWire.TestNet4):
- return true
- default:
- return false
- }
-}
diff --git a/lnd/chainreg/chainregistry.go b/lnd/chainreg/chainregistry.go
deleted file mode 100644
index a8b611d1..00000000
--- a/lnd/chainreg/chainregistry.go
+++ /dev/null
@@ -1,751 +0,0 @@
-package chainreg
-
-import (
- "encoding/json"
- "fmt"
- "io/ioutil"
- "os"
- "strings"
- "sync"
- "time"
-
- "github.com/pkt-cash/pktd/btcutil"
- "github.com/pkt-cash/pktd/btcutil/er"
- "github.com/pkt-cash/pktd/btcutil/util"
- "github.com/pkt-cash/pktd/chaincfg/chainhash"
- "github.com/pkt-cash/pktd/lnd/chainntnfs"
- "github.com/pkt-cash/pktd/lnd/chainntnfs/btcdnotify"
- "github.com/pkt-cash/pktd/lnd/chainntnfs/neutrinonotify"
- "github.com/pkt-cash/pktd/lnd/channeldb"
- "github.com/pkt-cash/pktd/lnd/htlcswitch"
- "github.com/pkt-cash/pktd/lnd/input"
- "github.com/pkt-cash/pktd/lnd/keychain"
- "github.com/pkt-cash/pktd/lnd/lncfg"
- "github.com/pkt-cash/pktd/lnd/lnwallet"
- "github.com/pkt-cash/pktd/lnd/lnwallet/btcwallet"
- "github.com/pkt-cash/pktd/lnd/lnwallet/chainfee"
- "github.com/pkt-cash/pktd/lnd/lnwire"
- "github.com/pkt-cash/pktd/lnd/routing/chainview"
- "github.com/pkt-cash/pktd/neutrino"
- "github.com/pkt-cash/pktd/pktlog/log"
- "github.com/pkt-cash/pktd/pktwallet/chain"
- "github.com/pkt-cash/pktd/pktwallet/wallet"
- "github.com/pkt-cash/pktd/rpcclient"
-)
-
-// Config houses necessary fields that a chainControl instance needs to
-// function.
-type Config struct {
- // Bitcoin defines settings for the Bitcoin chain.
- Bitcoin *lncfg.Chain
-
- // Litecoin defines settings for the Litecoin chain.
- Litecoin *lncfg.Chain
-
- Pkt *lncfg.Chain
-
- // PrimaryChain is a function that returns our primary chain via its
- // ChainCode.
- PrimaryChain func() ChainCode
-
- // HeightHintCacheQueryDisable is a boolean that disables height hint
- // queries if true.
- HeightHintCacheQueryDisable bool
-
- // NeutrinoMode defines settings for connecting to a neutrino light-client.
- NeutrinoMode *lncfg.Neutrino
-
- // BitcoindMode defines settings for connecting to a bitcoind node.
- BitcoindMode *lncfg.Bitcoind
-
- // LitecoindMode defines settings for connecting to a litecoind node.
- LitecoindMode *lncfg.Bitcoind
-
- // BtcdMode defines settings for connecting to a btcd node.
- BtcdMode *lncfg.Btcd
-
- // LtcdMode defines settings for connecting to an ltcd node.
- LtcdMode *lncfg.Btcd
-
- // LocalChanDB is a pointer to the local backing channel database.
- LocalChanDB *channeldb.DB
-
- // RemoteChanDB is a pointer to the remote backing channel database.
- RemoteChanDB *channeldb.DB
-
- // PrivateWalletPw is the private wallet password to the underlying
- // btcwallet instance.
- PrivateWalletPw []byte
-
- // PublicWalletPw is the public wallet password to the underlying btcwallet
- // instance.
- PublicWalletPw []byte
-
- // Birthday specifies the time the wallet was initially created.
- Birthday time.Time
-
- // RecoveryWindow specifies the address look-ahead for which to scan when
- // restoring a wallet.
- RecoveryWindow uint32
-
- // Wallet is a pointer to the backing wallet instance.
- Wallet *wallet.Wallet
-
- // NeutrinoCS is a pointer to a neutrino ChainService. Must be non-nil if
- // using neutrino.
- NeutrinoCS *neutrino.ChainService
-
- // ActiveNetParams details the current chain we are on.
- ActiveNetParams BitcoinNetParams
-
- // FeeURL defines the URL for fee estimation we will use. This field is
- // optional.
- FeeURL string
-}
-
-const (
- // DefaultBitcoinMinHTLCInMSat is the default smallest value htlc this
- // node will accept. This value is proposed in the channel open sequence
- // and cannot be changed during the life of the channel. It is 1 msat by
- // default to allow maximum flexibility in deciding what size payments
- // to forward.
- //
- // All forwarded payments are subjected to the min htlc constraint of
- // the routing policy of the outgoing channel. This implicitly controls
- // the minimum htlc value on the incoming channel too.
- DefaultBitcoinMinHTLCInMSat = lnwire.MilliSatoshi(1)
-
- // DefaultBitcoinMinHTLCOutMSat is the default minimum htlc value that
- // we require for sending out htlcs. Our channel peer may have a lower
- // min htlc channel parameter, but we - by default - don't forward
- // anything under the value defined here.
- DefaultBitcoinMinHTLCOutMSat = lnwire.MilliSatoshi(1000)
-
- // DefaultBitcoinBaseFeeMSat is the default forwarding base fee.
- DefaultBitcoinBaseFeeMSat = lnwire.MilliSatoshi(1000)
-
- // DefaultBitcoinFeeRate is the default forwarding fee rate.
- DefaultBitcoinFeeRate = lnwire.MilliSatoshi(1)
-
- // DefaultBitcoinTimeLockDelta is the default forwarding time lock
- // delta.
- DefaultBitcoinTimeLockDelta = 40
-
- DefaultLitecoinMinHTLCInMSat = lnwire.MilliSatoshi(1)
- DefaultLitecoinMinHTLCOutMSat = lnwire.MilliSatoshi(1000)
- DefaultLitecoinBaseFeeMSat = lnwire.MilliSatoshi(1000)
- DefaultLitecoinFeeRate = lnwire.MilliSatoshi(1)
- DefaultLitecoinTimeLockDelta = 576
- DefaultLitecoinDustLimit = btcutil.Amount(54600)
-
- DefaultPktMinHTLCInMSat = lnwire.MilliSatoshi(1)
- DefaultPktMinHTLCOutMSat = lnwire.MilliSatoshi(1000)
- DefaultPktBaseFeeMSat = lnwire.MilliSatoshi(1000)
- DefaultPktFeeRate = lnwire.MilliSatoshi(1)
- DefaultPktTimeLockDelta = 576
- DefaultPktDustLimit = btcutil.Amount(54600)
-
- // DefaultBitcoinStaticFeePerKW is the fee rate of 50 sat/vbyte
- // expressed in sat/kw.
- DefaultBitcoinStaticFeePerKW = chainfee.SatPerKWeight(12500)
-
- // DefaultBitcoinStaticMinRelayFeeRate is the min relay fee used for
- // static estimators.
- DefaultBitcoinStaticMinRelayFeeRate = chainfee.FeePerKwFloor
-
- // DefaultLitecoinStaticFeePerKW is the fee rate of 200 sat/vbyte
- // expressed in sat/kw.
- DefaultLitecoinStaticFeePerKW = chainfee.SatPerKWeight(50000)
-
- DefaultPktStaticFeePerKW = chainfee.SatPerKWeight(1000)
-
- // BtcToLtcConversionRate is a fixed ratio used in order to scale up
- // payments when running on the Litecoin chain.
- BtcToLtcConversionRate = 60
-)
-
-// DefaultBtcChannelConstraints is the default set of channel constraints that are
-// meant to be used when initially funding a Bitcoin channel.
-//
-// TODO(halseth): make configurable at startup?
-var DefaultBtcChannelConstraints = channeldb.ChannelConstraints{
- DustLimit: lnwallet.DefaultDustLimit(),
- MaxAcceptedHtlcs: input.MaxHTLCNumber / 2,
-}
-
-// DefaultLtcChannelConstraints is the default set of channel constraints that are
-// meant to be used when initially funding a Litecoin channel.
-var DefaultLtcChannelConstraints = channeldb.ChannelConstraints{
- DustLimit: DefaultLitecoinDustLimit,
- MaxAcceptedHtlcs: input.MaxHTLCNumber / 2,
-}
-
-// ChainControl couples the three primary interfaces lnd utilizes for a
-// particular chain together. A single ChainControl instance will exist for all
-// the chains lnd is currently active on.
-type ChainControl struct {
- // ChainIO represents an abstraction over a source that can query the blockchain.
- ChainIO lnwallet.BlockChainIO
-
- // HealthCheck is a function which can be used to send a low-cost, fast
- // query to the chain backend to ensure we still have access to our
- // node.
- HealthCheck func() er.R
-
- // FeeEstimator is used to estimate an optimal fee for transactions important to us.
- FeeEstimator chainfee.Estimator
-
- // Signer is used to provide signatures over things like transactions.
- Signer input.Signer
-
- // KeyRing represents a set of keys that we have the private keys to.
- KeyRing keychain.SecretKeyRing
-
- // Wc is an abstraction over some basic wallet commands. This base set of commands
- // will be provided to the Wallet *LightningWallet raw pointer below.
- Wc lnwallet.WalletController
-
- // MsgSigner is used to sign arbitrary messages.
- MsgSigner lnwallet.MessageSigner
-
- // ChainNotifier is used to receive blockchain events that we are interested in.
- ChainNotifier chainntnfs.ChainNotifier
-
- // ChainView is used in the router for maintaining an up-to-date graph.
- ChainView chainview.FilteredChainView
-
- // Wallet is our LightningWallet that also contains the abstract Wc above. This wallet
- // handles all of the lightning operations.
- Wallet *lnwallet.LightningWallet
-
- // RoutingPolicy is the routing policy we have decided to use.
- RoutingPolicy htlcswitch.ForwardingPolicy
-
- // MinHtlcIn is the minimum HTLC we will accept.
- MinHtlcIn lnwire.MilliSatoshi
-}
-
-// NewChainControl attempts to create a ChainControl instance according
-// to the parameters in the passed configuration. Currently three
-// branches of ChainControl instances exist: one backed by a running btcd
-// full-node, another backed by a running bitcoind full-node, and the other
-// backed by a running neutrino light client instance. When running with a
-// neutrino light client instance, `neutrinoCS` must be non-nil.
-func NewChainControl(cfg *Config) (*ChainControl, er.R) {
-
- // Set the RPC config from the "home" chain. Multi-chain isn't yet
- // active, so we'll restrict usage to a particular chain for now.
- homeChainConfig := cfg.Bitcoin
- if cfg.PrimaryChain() == LitecoinChain {
- homeChainConfig = cfg.Litecoin
- }
- if cfg.PrimaryChain() == PktChain {
- homeChainConfig = cfg.Pkt
- }
- log.Infof("Primary chain is set to: %v",
- cfg.PrimaryChain())
-
- cc := &ChainControl{}
-
- switch cfg.PrimaryChain() {
- case BitcoinChain:
- cc.RoutingPolicy = htlcswitch.ForwardingPolicy{
- MinHTLCOut: cfg.Bitcoin.MinHTLCOut,
- BaseFee: cfg.Bitcoin.BaseFee,
- FeeRate: cfg.Bitcoin.FeeRate,
- TimeLockDelta: cfg.Bitcoin.TimeLockDelta,
- }
- cc.MinHtlcIn = cfg.Bitcoin.MinHTLCIn
- cc.FeeEstimator = chainfee.NewStaticEstimator(
- DefaultBitcoinStaticFeePerKW,
- DefaultBitcoinStaticMinRelayFeeRate,
- )
- case LitecoinChain:
- cc.RoutingPolicy = htlcswitch.ForwardingPolicy{
- MinHTLCOut: cfg.Litecoin.MinHTLCOut,
- BaseFee: cfg.Litecoin.BaseFee,
- FeeRate: cfg.Litecoin.FeeRate,
- TimeLockDelta: cfg.Litecoin.TimeLockDelta,
- }
- cc.MinHtlcIn = cfg.Litecoin.MinHTLCIn
- cc.FeeEstimator = chainfee.NewStaticEstimator(
- DefaultLitecoinStaticFeePerKW, 0,
- )
- case PktChain:
- cc.RoutingPolicy = htlcswitch.ForwardingPolicy{
- MinHTLCOut: cfg.Pkt.MinHTLCOut,
- BaseFee: cfg.Pkt.BaseFee,
- FeeRate: cfg.Pkt.FeeRate,
- TimeLockDelta: cfg.Pkt.TimeLockDelta,
- }
- cc.MinHtlcIn = cfg.Pkt.MinHTLCIn
- cc.FeeEstimator = chainfee.NewStaticEstimator(
- DefaultPktStaticFeePerKW, 0,
- )
- default:
- return nil, er.Errorf("default routing policy for chain %v is "+
- "unknown", cfg.PrimaryChain())
- }
-
- walletConfig := &btcwallet.Config{
- PrivatePass: cfg.PrivateWalletPw,
- PublicPass: nil,
- Birthday: cfg.Birthday,
- RecoveryWindow: cfg.RecoveryWindow,
- DataDir: homeChainConfig.ChainDir,
- NetParams: cfg.ActiveNetParams.Params,
- CoinType: cfg.ActiveNetParams.CoinType,
- Wallet: cfg.Wallet,
- }
-
- var err er.R
-
- heightHintCacheConfig := chainntnfs.CacheConfig{
- QueryDisable: cfg.HeightHintCacheQueryDisable,
- }
- if cfg.HeightHintCacheQueryDisable {
- log.Infof("Height Hint Cache Queries disabled")
- }
-
- // Initialize the height hint cache within the chain directory.
- hintCache, err := chainntnfs.NewHeightHintCache(
- heightHintCacheConfig, cfg.LocalChanDB,
- )
- if err != nil {
- return nil, er.Errorf("unable to initialize height hint "+
- "cache: %v", err)
- }
-
- // If spv mode is active, then we'll be using a distinct set of
- // chainControl interfaces that interface directly with the p2p network
- // of the selected chain.
- switch homeChainConfig.Node {
- case "neutrino":
- // We'll create ChainNotifier and FilteredChainView instances,
- // along with the wallet's ChainSource, which are all backed by
- // the neutrino light client.
- cc.ChainNotifier = neutrinonotify.New(
- cfg.NeutrinoCS, hintCache, hintCache,
- )
- cc.ChainView, err = chainview.NewCfFilteredChainView(cfg.NeutrinoCS)
- if err != nil {
- return nil, err
- }
-
- // Map the deprecated neutrino feeurl flag to the general fee
- // url.
- if cfg.NeutrinoMode.FeeURL != "" {
- if cfg.FeeURL != "" {
- return nil, er.New("feeurl and " +
- "neutrino.feeurl are mutually exclusive")
- }
-
- cfg.FeeURL = cfg.NeutrinoMode.FeeURL
- }
-
- walletConfig.ChainSource = chain.NewNeutrinoClient(
- cfg.ActiveNetParams.Params, cfg.NeutrinoCS,
- )
-
- // Get our best block as a health check.
- cc.HealthCheck = func() er.R {
- _, _, err := walletConfig.ChainSource.GetBestBlock()
- return err
- }
-
- case "btcd", "ltcd":
- // Otherwise, we'll be speaking directly via RPC to a node.
- //
- // So first we'll load btcd/ltcd's TLS cert for the RPC
- // connection. If a raw cert was specified in the config, then
- // we'll set that directly. Otherwise, we attempt to read the
- // cert from the path specified in the config.
- var btcdMode *lncfg.Btcd
- switch {
- case cfg.Bitcoin.Active:
- btcdMode = cfg.BtcdMode
- case cfg.Litecoin.Active:
- btcdMode = cfg.LtcdMode
- }
- var rpcCert []byte
- if btcdMode.RawRPCCert != "" {
- rpcCert, err = util.DecodeHex(btcdMode.RawRPCCert)
- if err != nil {
- return nil, err
- }
- } else {
- certFile, err := os.Open(btcdMode.RPCCert)
- if err != nil {
- return nil, er.E(err)
- }
- rpcCert, err = ioutil.ReadAll(certFile)
- if err != nil {
- return nil, er.E(err)
- }
- if err := certFile.Close(); err != nil {
- return nil, er.E(err)
- }
- }
-
- // If the specified host for the btcd/ltcd RPC server already
- // has a port specified, then we use that directly. Otherwise,
- // we assume the default port according to the selected chain
- // parameters.
- var btcdHost string
- if strings.Contains(btcdMode.RPCHost, ":") {
- btcdHost = btcdMode.RPCHost
- } else {
- btcdHost = fmt.Sprintf("%v:%v", btcdMode.RPCHost,
- cfg.ActiveNetParams.RPCPort)
- }
-
- btcdUser := btcdMode.RPCUser
- btcdPass := btcdMode.RPCPass
- rpcConfig := &rpcclient.ConnConfig{
- Host: btcdHost,
- Endpoint: "ws",
- User: btcdUser,
- Pass: btcdPass,
- Certificates: rpcCert,
- DisableTLS: false,
- DisableConnectOnNew: true,
- DisableAutoReconnect: false,
- }
- cc.ChainNotifier, err = btcdnotify.New(
- rpcConfig, cfg.ActiveNetParams.Params, hintCache, hintCache,
- )
- if err != nil {
- return nil, err
- }
-
- // Finally, we'll create an instance of the default chain view to be
- // used within the routing layer.
- cc.ChainView, err = chainview.NewBtcdFilteredChainView(*rpcConfig)
- if err != nil {
- log.Errorf("unable to create chain view: %v", err)
- return nil, err
- }
-
- // Create a special websockets rpc client for btcd which will be used
- // by the wallet for notifications, calls, etc.
- chainRPC, err := chain.NewRPCClient(cfg.ActiveNetParams.Params, btcdHost,
- btcdUser, btcdPass, rpcCert, false, 20)
- if err != nil {
- return nil, err
- }
-
- walletConfig.ChainSource = chainRPC
-
- // Use a query for our best block as a health check.
- cc.HealthCheck = func() er.R {
- _, _, err := walletConfig.ChainSource.GetBestBlock()
- return err
- }
-
- // If we're not in simnet or regtest mode, then we'll attempt
- // to use a proper fee estimator for testnet.
- if !cfg.Bitcoin.SimNet && !cfg.Litecoin.SimNet &&
- !cfg.Bitcoin.RegTest && !cfg.Litecoin.RegTest {
-
- log.Info("Initializing btcd backed fee estimator")
-
- // Finally, we'll re-initialize the fee estimator, as
- // if we're using btcd as a backend, then we can use
- // live fee estimates, rather than a statically coded
- // value.
- fallBackFeeRate := chainfee.SatPerKVByte(25 * 1000)
- cc.FeeEstimator, err = chainfee.NewBtcdEstimator(
- *rpcConfig, fallBackFeeRate.FeePerKWeight(),
- )
- if err != nil {
- return nil, err
- }
- }
- default:
- return nil, er.Errorf("unknown node type: %s",
- homeChainConfig.Node)
- }
-
- // Override default fee estimator if an external service is specified.
- if cfg.FeeURL != "" {
- // Do not cache fees on regtest to make it easier to execute
- // manual or automated test cases.
- cacheFees := !cfg.Bitcoin.RegTest
-
- log.Infof("Using external fee estimator %v: cached=%v",
- cfg.FeeURL, cacheFees)
-
- cc.FeeEstimator = chainfee.NewWebAPIEstimator(
- chainfee.SparseConfFeeSource{
- URL: cfg.FeeURL,
- },
- !cacheFees,
- )
- }
-
- // Start fee estimator.
- if err := cc.FeeEstimator.Start(); err != nil {
- return nil, err
- }
-
- wc, err := btcwallet.New(*walletConfig)
- if err != nil {
- fmt.Printf("unable to create wallet controller: %v\n", err)
- return nil, err
- }
-
- cc.MsgSigner = wc
- cc.Signer = wc
- cc.ChainIO = wc
- cc.Wc = wc
-
- // Select the default channel constraints for the primary chain.
- channelConstraints := DefaultBtcChannelConstraints
- if cfg.PrimaryChain() == LitecoinChain {
- channelConstraints = DefaultLtcChannelConstraints
- }
-
- keyRing := keychain.NewBtcWalletKeyRing(
- wc.InternalWallet(), cfg.ActiveNetParams.CoinType,
- )
- cc.KeyRing = keyRing
-
- // Create, and start the lnwallet, which handles the core payment
- // channel logic, and exposes control via proxy state machines.
- walletCfg := lnwallet.Config{
- Database: cfg.RemoteChanDB,
- Notifier: cc.ChainNotifier,
- WalletController: wc,
- Signer: cc.Signer,
- FeeEstimator: cc.FeeEstimator,
- SecretKeyRing: keyRing,
- ChainIO: cc.ChainIO,
- DefaultConstraints: channelConstraints,
- NetParams: *cfg.ActiveNetParams.Params,
- }
- lnWallet, err := lnwallet.NewLightningWallet(walletCfg)
- if err != nil {
- fmt.Printf("unable to create wallet: %v\n", err)
- return nil, err
- }
- if err := lnWallet.Startup(); err != nil {
- fmt.Printf("unable to start wallet: %v\n", err)
- return nil, err
- }
-
- log.Info("LightningWallet opened")
-
- cc.Wallet = lnWallet
-
- return cc, nil
-}
-
-// getBitcoindHealthCheckCmd queries bitcoind for its version to decide which
-// api we should use for our health check. We prefer to use the uptime
-// command, because it has no locking and is an inexpensive call, which was
-// added in version 0.15. If we are on an earlier version, we fallback to using
-// getblockchaininfo.
-func getBitcoindHealthCheckCmd(client *rpcclient.Client) (string, er.R) {
- // Query bitcoind to get our current version.
- resp, err := client.RawRequest("getnetworkinfo", nil)
- if err != nil {
- return "", err
- }
-
- // Parse the response to retrieve bitcoind's version.
- info := struct {
- Version int64 `json:"version"`
- }{}
- if err := json.Unmarshal(resp, &info); err != nil {
- return "", er.E(err)
- }
-
- // Bitcoind returns a single value representing the semantic version:
- // 1000000 * CLIENT_VERSION_MAJOR + 10000 * CLIENT_VERSION_MINOR
- // + 100 * CLIENT_VERSION_REVISION + 1 * CLIENT_VERSION_BUILD
- //
- // The uptime call was added in version 0.15.0, so we return it for
- // any version value >= 150000, as per the above calculation.
- if info.Version >= 150000 {
- return "uptime", nil
- }
-
- return "getblockchaininfo", nil
-}
-
-var (
- // BitcoinTestnetGenesis is the genesis hash of Bitcoin's testnet
- // chain.
- BitcoinTestnetGenesis = chainhash.Hash([chainhash.HashSize]byte{
- 0x43, 0x49, 0x7f, 0xd7, 0xf8, 0x26, 0x95, 0x71,
- 0x08, 0xf4, 0xa3, 0x0f, 0xd9, 0xce, 0xc3, 0xae,
- 0xba, 0x79, 0x97, 0x20, 0x84, 0xe9, 0x0e, 0xad,
- 0x01, 0xea, 0x33, 0x09, 0x00, 0x00, 0x00, 0x00,
- })
-
- // BitcoinMainnetGenesis is the genesis hash of Bitcoin's main chain.
- BitcoinMainnetGenesis = chainhash.Hash([chainhash.HashSize]byte{
- 0x6f, 0xe2, 0x8c, 0x0a, 0xb6, 0xf1, 0xb3, 0x72,
- 0xc1, 0xa6, 0xa2, 0x46, 0xae, 0x63, 0xf7, 0x4f,
- 0x93, 0x1e, 0x83, 0x65, 0xe1, 0x5a, 0x08, 0x9c,
- 0x68, 0xd6, 0x19, 0x00, 0x00, 0x00, 0x00, 0x00,
- })
-
- // LitecoinTestnetGenesis is the genesis hash of Litecoin's testnet4
- // chain.
- LitecoinTestnetGenesis = chainhash.Hash([chainhash.HashSize]byte{
- 0xa0, 0x29, 0x3e, 0x4e, 0xeb, 0x3d, 0xa6, 0xe6,
- 0xf5, 0x6f, 0x81, 0xed, 0x59, 0x5f, 0x57, 0x88,
- 0x0d, 0x1a, 0x21, 0x56, 0x9e, 0x13, 0xee, 0xfd,
- 0xd9, 0x51, 0x28, 0x4b, 0x5a, 0x62, 0x66, 0x49,
- })
-
- // LitecoinMainnetGenesis is the genesis hash of Litecoin's main chain.
- LitecoinMainnetGenesis = chainhash.Hash([chainhash.HashSize]byte{
- 0xe2, 0xbf, 0x04, 0x7e, 0x7e, 0x5a, 0x19, 0x1a,
- 0xa4, 0xef, 0x34, 0xd3, 0x14, 0x97, 0x9d, 0xc9,
- 0x98, 0x6e, 0x0f, 0x19, 0x25, 0x1e, 0xda, 0xba,
- 0x59, 0x40, 0xfd, 0x1f, 0xe3, 0x65, 0xa7, 0x12,
- })
-
- // chainMap is a simple index that maps a chain's genesis hash to the
- // ChainCode enum for that chain.
- chainMap = map[chainhash.Hash]ChainCode{
- BitcoinTestnetGenesis: BitcoinChain,
- LitecoinTestnetGenesis: LitecoinChain,
-
- BitcoinMainnetGenesis: BitcoinChain,
- LitecoinMainnetGenesis: LitecoinChain,
- }
-
- // ChainDNSSeeds is a map of a chain's hash to the set of DNS seeds
- // that will be use to bootstrap peers upon first startup.
- //
- // The first item in the array is the primary host we'll use to attempt
- // the SRV lookup we require. If we're unable to receive a response
- // over UDP, then we'll fall back to manual TCP resolution. The second
- // item in the array is a special A record that we'll query in order to
- // receive the IP address of the current authoritative DNS server for
- // the network seed.
- //
- // TODO(roasbeef): extend and collapse these and chainparams.go into
- // struct like chaincfg.Params
- ChainDNSSeeds = map[chainhash.Hash][][2]string{
- BitcoinMainnetGenesis: {
- {
- "nodes.lightning.directory",
- "soa.nodes.lightning.directory",
- },
- {
- "lseed.bitcoinstats.com",
- },
- },
-
- BitcoinTestnetGenesis: {
- {
- "test.nodes.lightning.directory",
- "soa.nodes.lightning.directory",
- },
- },
-
- LitecoinMainnetGenesis: {
- {
- "ltc.nodes.lightning.directory",
- "soa.nodes.lightning.directory",
- },
- },
- }
-)
-
-// ChainRegistry keeps track of the current chains
-type ChainRegistry struct {
- sync.RWMutex
-
- activeChains map[ChainCode]*ChainControl
- netParams map[ChainCode]*BitcoinNetParams
-
- primaryChain ChainCode
-}
-
-// NewChainRegistry creates a new ChainRegistry.
-func NewChainRegistry() *ChainRegistry {
- return &ChainRegistry{
- activeChains: make(map[ChainCode]*ChainControl),
- netParams: make(map[ChainCode]*BitcoinNetParams),
- }
-}
-
-// RegisterChain assigns an active ChainControl instance to a target chain
-// identified by its ChainCode.
-func (c *ChainRegistry) RegisterChain(newChain ChainCode,
- cc *ChainControl) {
-
- c.Lock()
- c.activeChains[newChain] = cc
- c.Unlock()
-}
-
-// LookupChain attempts to lookup an active ChainControl instance for the
-// target chain.
-func (c *ChainRegistry) LookupChain(targetChain ChainCode) (
- *ChainControl, bool) {
-
- c.RLock()
- cc, ok := c.activeChains[targetChain]
- c.RUnlock()
- return cc, ok
-}
-
-// LookupChainByHash attempts to look up an active ChainControl which
-// corresponds to the passed genesis hash.
-func (c *ChainRegistry) LookupChainByHash(chainHash chainhash.Hash) (*ChainControl, bool) {
- c.RLock()
- defer c.RUnlock()
-
- targetChain, ok := chainMap[chainHash]
- if !ok {
- return nil, ok
- }
-
- cc, ok := c.activeChains[targetChain]
- return cc, ok
-}
-
-// RegisterPrimaryChain sets a target chain as the "home chain" for lnd.
-func (c *ChainRegistry) RegisterPrimaryChain(cc ChainCode) {
- c.Lock()
- defer c.Unlock()
-
- c.primaryChain = cc
-}
-
-// PrimaryChain returns the primary chain for this running lnd instance. The
-// primary chain is considered the "home base" while the other registered
-// chains are treated as secondary chains.
-func (c *ChainRegistry) PrimaryChain() ChainCode {
- c.RLock()
- defer c.RUnlock()
-
- return c.primaryChain
-}
-
-// ActiveChains returns a slice containing the active chains.
-func (c *ChainRegistry) ActiveChains() []ChainCode {
- c.RLock()
- defer c.RUnlock()
-
- chains := make([]ChainCode, 0, len(c.activeChains))
- for activeChain := range c.activeChains {
- chains = append(chains, activeChain)
- }
-
- return chains
-}
-
-// NumActiveChains returns the total number of active chains.
-func (c *ChainRegistry) NumActiveChains() uint32 {
- c.RLock()
- defer c.RUnlock()
-
- return uint32(len(c.activeChains))
-}
diff --git a/lnd/chanacceptor/acceptor_test.go b/lnd/chanacceptor/acceptor_test.go
deleted file mode 100644
index 9a36e38b..00000000
--- a/lnd/chanacceptor/acceptor_test.go
+++ /dev/null
@@ -1,321 +0,0 @@
-package chanacceptor
-
-import (
- "math/big"
- "testing"
- "time"
-
- "github.com/pkt-cash/pktd/btcec"
- "github.com/pkt-cash/pktd/btcutil"
- "github.com/pkt-cash/pktd/btcutil/er"
- "github.com/pkt-cash/pktd/btcutil/util"
- "github.com/pkt-cash/pktd/chaincfg"
- "github.com/pkt-cash/pktd/lnd/lnrpc"
- "github.com/pkt-cash/pktd/lnd/lnwallet/chancloser"
- "github.com/pkt-cash/pktd/lnd/lnwire"
- "github.com/stretchr/testify/assert"
-)
-
-const testTimeout = time.Second
-
-type channelAcceptorCtx struct {
- t *testing.T
-
- // extRequests is the channel that we send our channel accept requests
- // into, this channel mocks sending of a request to the rpc acceptor.
- // This channel should be buffered with the number of requests we want
- // to send so that it does not block (like a rpc stream).
- extRequests chan []byte
-
- // responses is a map of pending channel IDs to the response which we
- // wish to mock the remote channel acceptor sending.
- responses map[[32]byte]*lnrpc.ChannelAcceptResponse
-
- // acceptor is the channel acceptor we create for the test.
- acceptor *RPCAcceptor
-
- // errChan is a channel that the error the channel acceptor exits with
- // is sent into.
- errChan chan er.R
-
- // quit is a channel that can be used to shutdown the channel acceptor
- // and return errShuttingDown.
- quit chan struct{}
-}
-
-func newChanAcceptorCtx(t *testing.T, acceptCallCount int,
- responses map[[32]byte]*lnrpc.ChannelAcceptResponse) *channelAcceptorCtx {
-
- testCtx := &channelAcceptorCtx{
- t: t,
- extRequests: make(chan []byte, acceptCallCount),
- responses: responses,
- errChan: make(chan er.R),
- quit: make(chan struct{}),
- }
-
- testCtx.acceptor = NewRPCAcceptor(
- testCtx.receiveResponse, testCtx.sendRequest, testTimeout*5,
- &chaincfg.TestNet3Params, testCtx.quit,
- )
-
- return testCtx
-}
-
-// sendRequest mocks sending a request to the channel acceptor.
-func (c *channelAcceptorCtx) sendRequest(request *lnrpc.ChannelAcceptRequest) error {
- select {
- case c.extRequests <- request.PendingChanId:
-
- case <-time.After(testTimeout):
- c.t.Fatalf("timeout sending request: %v", request.PendingChanId)
- }
-
- return nil
-}
-
-// receiveResponse mocks sending of a response from the channel acceptor.
-func (c *channelAcceptorCtx) receiveResponse() (*lnrpc.ChannelAcceptResponse,
- error) {
-
- select {
- case id := <-c.extRequests:
- scratch := [32]byte{}
- copy(scratch[:], id)
-
- resp, ok := c.responses[scratch]
- assert.True(c.t, ok)
-
- return resp, nil
-
- case <-time.After(testTimeout):
- c.t.Fatalf("timeout receiving request")
- return nil, er.Native(er.New("receiveResponse timeout"))
-
- // Exit if our test acceptor closes the done channel, which indicates
- // that the acceptor is shutting down.
- case <-c.acceptor.done:
- return nil, er.Native(er.New("acceptor shutting down"))
- }
-}
-
-// start runs our channel acceptor in a goroutine which sends its exit error
-// into our test error channel.
-func (c *channelAcceptorCtx) start() {
- go func() {
- c.errChan <- c.acceptor.Run()
- }()
-}
-
-// stop shuts down the test's channel acceptor and asserts that it exits with
-// our expected error.
-func (c *channelAcceptorCtx) stop() {
- close(c.quit)
-
- select {
- case actual := <-c.errChan:
- assert.True(c.t, errShuttingDown.Is(actual))
-
- case <-time.After(testTimeout):
- c.t.Fatal("timeout waiting for acceptor to exit")
- }
-}
-
-// queryAndAssert takes a map of open channel requests which we want to call
-// Accept for to the outcome we expect from the acceptor, dispatches each
-// request in a goroutine and then asserts that we get the outcome we expect.
-func (c *channelAcceptorCtx) queryAndAssert(queries map[*lnwire.OpenChannel]*ChannelAcceptResponse) {
- var (
- node = &btcec.PublicKey{
- X: big.NewInt(1),
- Y: big.NewInt(1),
- }
-
- responses = make(chan struct{})
- )
-
- for request, expected := range queries {
- request := request
- expected := expected
-
- go func() {
- resp := c.acceptor.Accept(&ChannelAcceptRequest{
- Node: node,
- OpenChanMsg: request,
- })
- e1 := expected.ChanAcceptError
- e2 := resp.ChanAcceptError
- assert.True(c.t, er.FuzzyEquals(e1, e2))
- expected.ChanAcceptError = nil
- resp.ChanAcceptError = nil
- assert.Equal(c.t, expected, resp)
- responses <- struct{}{}
- }()
- }
-
- // Wait for each of our requests to return a response before we exit.
- for i := 0; i < len(queries); i++ {
- select {
- case <-responses:
- case <-time.After(testTimeout):
- c.t.Fatalf("did not receive response")
- }
- }
-}
-
-// TestMultipleAcceptClients tests that the RPC acceptor is capable of handling
-// multiple requests to its Accept function and responding to them correctly.
-func TestMultipleAcceptClients(t *testing.T) {
- testAddr := "bcrt1qwrmq9uca0t3dy9t9wtuq5tm4405r7tfzyqn9pp"
- testUpfront, err := chancloser.ParseUpfrontShutdownAddress(
- testAddr, &chaincfg.TestNet3Params,
- )
- util.RequireNoErr(t, err)
-
- var (
- chan1 = &lnwire.OpenChannel{
- PendingChannelID: [32]byte{1},
- }
- chan2 = &lnwire.OpenChannel{
- PendingChannelID: [32]byte{2},
- }
- chan3 = &lnwire.OpenChannel{
- PendingChannelID: [32]byte{3},
- }
-
- customError = er.New("go away")
-
- // Queries is a map of the channel IDs we will query Accept
- // with, and the set of outcomes we expect.
- queries = map[*lnwire.OpenChannel]*ChannelAcceptResponse{
- chan1: NewChannelAcceptResponse(
- true, nil, testUpfront, 1, 2, 3, 4, 5, 6,
- ),
- chan2: NewChannelAcceptResponse(
- false, errChannelRejected.Default(), nil, 0, 0, 0,
- 0, 0, 0,
- ),
- chan3: NewChannelAcceptResponse(
- false, customError, nil, 0, 0, 0, 0, 0, 0,
- ),
- }
-
- // Responses is a mocked set of responses from the remote
- // channel acceptor.
- responses = map[[32]byte]*lnrpc.ChannelAcceptResponse{
- chan1.PendingChannelID: {
- PendingChanId: chan1.PendingChannelID[:],
- Accept: true,
- UpfrontShutdown: testAddr,
- CsvDelay: 1,
- MaxHtlcCount: 2,
- MinAcceptDepth: 3,
- ReserveSat: 4,
- InFlightMaxMsat: 5,
- MinHtlcIn: 6,
- },
- chan2.PendingChannelID: {
- PendingChanId: chan2.PendingChannelID[:],
- Accept: false,
- },
- chan3.PendingChannelID: {
- PendingChanId: chan3.PendingChannelID[:],
- Accept: false,
- Error: customError.String(),
- },
- }
- )
-
- // Create and start our channel acceptor.
- testCtx := newChanAcceptorCtx(t, len(queries), responses)
- testCtx.start()
-
- // Dispatch three queries and assert that we get our expected response.
- // for each.
- testCtx.queryAndAssert(queries)
-
- // Shutdown our acceptor.
- testCtx.stop()
-}
-
-// TestInvalidResponse tests the case where our remote channel acceptor sends us
-// an invalid response, so the channel acceptor stream terminates.
-func TestInvalidResponse(t *testing.T) {
- var (
- chan1 = [32]byte{1}
-
- // We make a single query, and expect it to fail with our
- // generic error because our response is invalid.
- queries = map[*lnwire.OpenChannel]*ChannelAcceptResponse{
- {
- PendingChannelID: chan1,
- }: NewChannelAcceptResponse(
- false, errChannelRejected.Default(), nil, 0, 0,
- 0, 0, 0, 0,
- ),
- }
-
- // Create a single response which is invalid because it accepts
- // the channel but also contains an error message.
- responses = map[[32]byte]*lnrpc.ChannelAcceptResponse{
- chan1: {
- PendingChanId: chan1[:],
- Accept: true,
- Error: "has an error as well",
- },
- }
- )
-
- // Create and start our channel acceptor.
- testCtx := newChanAcceptorCtx(t, len(queries), responses)
- testCtx.start()
-
- testCtx.queryAndAssert(queries)
-
- // We do not expect our channel acceptor to exit because of one invalid
- // response, so we shutdown and assert here.
- testCtx.stop()
-}
-
-// TestInvalidReserve tests validation of the channel reserve proposed by the
-// acceptor against the dust limit that was proposed by the remote peer.
-func TestInvalidReserve(t *testing.T) {
- var (
- chan1 = [32]byte{1}
-
- dustLimit = btcutil.Amount(1000)
- reserve = dustLimit / 2
-
- // We make a single query, and expect it to fail with our
- // generic error because channel reserve is too low.
- queries = map[*lnwire.OpenChannel]*ChannelAcceptResponse{
- {
- PendingChannelID: chan1,
- DustLimit: dustLimit,
- }: NewChannelAcceptResponse(
- false, errChannelRejected.Default(), nil, 0, 0,
- 0, reserve, 0, 0,
- ),
- }
-
- // Create a single response which is invalid because the
- // proposed reserve is below our dust limit.
- responses = map[[32]byte]*lnrpc.ChannelAcceptResponse{
- chan1: {
- PendingChanId: chan1[:],
- Accept: true,
- ReserveSat: uint64(reserve),
- },
- }
- )
-
- // Create and start our channel acceptor.
- testCtx := newChanAcceptorCtx(t, len(queries), responses)
- testCtx.start()
-
- testCtx.queryAndAssert(queries)
-
- // We do not expect our channel acceptor to exit because of one invalid
- // response, so we shutdown and assert here.
- testCtx.stop()
-}
diff --git a/lnd/chanacceptor/chainedacceptor.go b/lnd/chanacceptor/chainedacceptor.go
deleted file mode 100644
index 30466afc..00000000
--- a/lnd/chanacceptor/chainedacceptor.go
+++ /dev/null
@@ -1,98 +0,0 @@
-package chanacceptor
-
-import (
- "sync"
- "sync/atomic"
-
- "github.com/pkt-cash/pktd/btcutil/er"
- "github.com/pkt-cash/pktd/pktlog/log"
-)
-
-// ChainedAcceptor represents a conjunction of ChannelAcceptor results.
-type ChainedAcceptor struct {
- // acceptors is a map of ChannelAcceptors that will be evaluated when
- // the ChainedAcceptor's Accept method is called.
- acceptors map[uint64]ChannelAcceptor
- acceptorsMtx sync.RWMutex
-
- acceptorID uint64 // To be used atomically.
-}
-
-// NewChainedAcceptor initializes a ChainedAcceptor.
-func NewChainedAcceptor() *ChainedAcceptor {
- return &ChainedAcceptor{
- acceptors: make(map[uint64]ChannelAcceptor),
- }
-}
-
-// AddAcceptor adds a ChannelAcceptor to this ChainedAcceptor.
-func (c *ChainedAcceptor) AddAcceptor(acceptor ChannelAcceptor) uint64 {
- id := atomic.AddUint64(&c.acceptorID, 1)
-
- c.acceptorsMtx.Lock()
- c.acceptors[id] = acceptor
- c.acceptorsMtx.Unlock()
-
- // Return the id so that a caller can call RemoveAcceptor.
- return id
-}
-
-// RemoveAcceptor removes a ChannelAcceptor from this ChainedAcceptor given
-// an ID.
-func (c *ChainedAcceptor) RemoveAcceptor(id uint64) {
- c.acceptorsMtx.Lock()
- delete(c.acceptors, id)
- c.acceptorsMtx.Unlock()
-}
-
-// Accept evaluates the results of all ChannelAcceptors in the acceptors map
-// and returns the conjunction of all these predicates.
-//
-// NOTE: Part of the ChannelAcceptor interface.
-func (c *ChainedAcceptor) Accept(req *ChannelAcceptRequest) *ChannelAcceptResponse {
- c.acceptorsMtx.RLock()
- defer c.acceptorsMtx.RUnlock()
-
- var finalResp ChannelAcceptResponse
-
- for _, acceptor := range c.acceptors {
- // Call our acceptor to determine whether we want to accept this
- // channel.
- acceptorResponse := acceptor.Accept(req)
-
- // If we should reject the channel, we can just exit early. This
- // has the effect of returning the error belonging to our first
- // failed acceptor.
- if acceptorResponse.RejectChannel() {
- return acceptorResponse
- }
-
- // If we have accepted the channel, we need to set the other
- // fields that were set in the response. However, since we are
- // dealing with multiple responses, we need to make sure that we
- // have not received inconsistent values (eg a csv delay of 1
- // from one acceptor, and a delay of 120 from another). We
- // set each value on our final response if it has not been set
- // yet, and allow duplicate sets if the value is the same. If
- // we cannot set a field, we return an error response.
- var err er.R
- finalResp, err = mergeResponse(finalResp, *acceptorResponse)
- if err != nil {
- log.Errorf("response for: %x has inconsistent values: %v",
- req.OpenChanMsg.PendingChannelID, err)
-
- return NewChannelAcceptResponse(
- false, errChannelRejected.Default(), nil, 0, 0,
- 0, 0, 0, 0,
- )
- }
- }
-
- // If we have gone through all of our acceptors with no objections, we
- // can return an acceptor with a nil error.
- return &finalResp
-}
-
-// A compile-time constraint to ensure ChainedAcceptor implements the
-// ChannelAcceptor interface.
-var _ ChannelAcceptor = (*ChainedAcceptor)(nil)
diff --git a/lnd/chanacceptor/interface.go b/lnd/chanacceptor/interface.go
deleted file mode 100644
index c5638609..00000000
--- a/lnd/chanacceptor/interface.go
+++ /dev/null
@@ -1,112 +0,0 @@
-package chanacceptor
-
-import (
- "github.com/pkt-cash/pktd/btcec"
- "github.com/pkt-cash/pktd/btcutil"
- "github.com/pkt-cash/pktd/btcutil/er"
- "github.com/pkt-cash/pktd/lnd/lnwire"
-)
-
-var (
- // errChannelRejected is returned when the rpc channel acceptor rejects
- // a channel due to acceptor timeout, shutdown, or because no custom
- // error value is available when the channel was rejected.
- errChannelRejected = er.GenericErrorType.CodeWithDetail("errChannelRejected", "channel rejected")
-)
-
-// ChannelAcceptRequest is a struct containing the requesting node's public key
-// along with the lnwire.OpenChannel message that they sent when requesting an
-// inbound channel. This information is provided to each acceptor so that they
-// can each leverage their own decision-making with this information.
-type ChannelAcceptRequest struct {
- // Node is the public key of the node requesting to open a channel.
- Node *btcec.PublicKey
-
- // OpenChanMsg is the actual OpenChannel protocol message that the peer
- // sent to us.
- OpenChanMsg *lnwire.OpenChannel
-}
-
-// ChannelAcceptResponse is a struct containing the response to a request to
-// open an inbound channel. Note that fields added to this struct must be added
-// to the mergeResponse function to allow combining of responses from different
-// acceptors.
-type ChannelAcceptResponse struct {
- // ChanAcceptError the error returned by the channel acceptor. If the
- // channel was accepted, this value will be nil.
- ChanAcceptError er.R
-
- // UpfrontShutdown is the address that we will set as our upfront
- // shutdown address.
- UpfrontShutdown lnwire.DeliveryAddress
-
- // CSVDelay is the csv delay we require for the remote peer.
- CSVDelay uint16
-
- // Reserve is the amount that require the remote peer hold in reserve
- // on the channel.
- Reserve btcutil.Amount
-
- // InFlightTotal is the maximum amount that we allow the remote peer to
- // hold in outstanding htlcs.
- InFlightTotal lnwire.MilliSatoshi
-
- // HtlcLimit is the maximum number of htlcs that we allow the remote
- // peer to offer us.
- HtlcLimit uint16
-
- // MinHtlcIn is the minimum incoming htlc value allowed on the channel.
- MinHtlcIn lnwire.MilliSatoshi
-
- // MinAcceptDepth is the minimum depth that the initiator of the
- // channel should wait before considering the channel open.
- MinAcceptDepth uint16
-}
-
-// NewChannelAcceptResponse is a constructor for a channel accept response,
-// which creates a response with an appropriately wrapped error (in the case of
-// a rejection) so that the error will be whitelisted and delivered to the
-// initiating peer. Accepted channels simply return a response containing a nil
-// error.
-func NewChannelAcceptResponse(accept bool, acceptErr er.R,
- upfrontShutdown lnwire.DeliveryAddress, csvDelay, htlcLimit,
- minDepth uint16, reserve btcutil.Amount, inFlight,
- minHtlcIn lnwire.MilliSatoshi) *ChannelAcceptResponse {
-
- resp := &ChannelAcceptResponse{
- UpfrontShutdown: upfrontShutdown,
- CSVDelay: csvDelay,
- Reserve: reserve,
- InFlightTotal: inFlight,
- HtlcLimit: htlcLimit,
- MinHtlcIn: minHtlcIn,
- MinAcceptDepth: minDepth,
- }
-
- // If we want to accept the channel, we return a response with a nil
- // error.
- if accept {
- return resp
- }
-
- // Use a generic error when no custom error is provided.
- if acceptErr == nil {
- acceptErr = errChannelRejected.Default()
- }
-
- resp.ChanAcceptError = acceptErr
-
- return resp
-}
-
-// RejectChannel returns a boolean that indicates whether we should reject the
-// channel.
-func (c *ChannelAcceptResponse) RejectChannel() bool {
- return c.ChanAcceptError != nil
-}
-
-// ChannelAcceptor is an interface that represents a predicate on the data
-// contained in ChannelAcceptRequest.
-type ChannelAcceptor interface {
- Accept(req *ChannelAcceptRequest) *ChannelAcceptResponse
-}
diff --git a/lnd/chanacceptor/merge.go b/lnd/chanacceptor/merge.go
deleted file mode 100644
index f2258df2..00000000
--- a/lnd/chanacceptor/merge.go
+++ /dev/null
@@ -1,152 +0,0 @@
-package chanacceptor
-
-import (
- "bytes"
-
- "github.com/pkt-cash/pktd/btcutil"
- "github.com/pkt-cash/pktd/btcutil/er"
- "github.com/pkt-cash/pktd/lnd/lnwire"
-)
-
-const (
- // We use field names in our errors for more readable errors. Create
- // consts for them here so that we can exactly match in our unit tests.
- fieldCSV = "csv delay"
- fieldHtlcLimit = "htlc limit"
- fieldMinDep = "min depth"
- fieldReserve = "reserve"
- fieldMinIn = "min htlc in"
- fieldInFlightTotal = "in flight total"
- fieldUpfrontShutdown = "upfront shutdown"
-)
-
-// fieldMismatchError returns a merge error for a named field when we get two
-// channel acceptor responses which have different values set.
-func fieldMismatchError(name string, current, new interface{}) er.R {
- return er.Errorf("multiple values set for: %v, %v and %v",
- name, current, new)
-}
-
-// mergeInt64 merges two int64 values, failing if they have different non-zero
-// values.
-func mergeInt64(name string, current, new int64) (int64, er.R) {
- switch {
- case current == 0:
- return new, nil
-
- case new == 0:
- return current, nil
-
- case current != new:
- return 0, fieldMismatchError(name, current, new)
-
- default:
- return new, nil
- }
-}
-
-// mergeMillisatoshi merges two msat values, failing if they have different
-// non-zero values.
-func mergeMillisatoshi(name string, current,
- new lnwire.MilliSatoshi) (lnwire.MilliSatoshi, er.R) {
-
- switch {
- case current == 0:
- return new, nil
-
- case new == 0:
- return current, nil
-
- case current != new:
- return 0, fieldMismatchError(name, current, new)
-
- default:
- return new, nil
- }
-}
-
-// mergeDeliveryAddress merges two delivery address values, failing if they have
-// different non-zero values.
-func mergeDeliveryAddress(name string, current,
- new lnwire.DeliveryAddress) (lnwire.DeliveryAddress, er.R) {
-
- switch {
- case current == nil:
- return new, nil
-
- case new == nil:
- return current, nil
-
- case !bytes.Equal(current, new):
- return nil, fieldMismatchError(name, current, new)
-
- default:
- return new, nil
- }
-}
-
-// mergeResponse takes two channel accept responses, and attempts to merge their
-// fields, failing if any fields conflict (are non-zero and not equal). It
-// returns a new response that has all the merged fields in it.
-func mergeResponse(current, new ChannelAcceptResponse) (ChannelAcceptResponse,
- er.R) {
-
- csv, err := mergeInt64(
- fieldCSV, int64(current.CSVDelay), int64(new.CSVDelay),
- )
- if err != nil {
- return current, err
- }
- current.CSVDelay = uint16(csv)
-
- htlcLimit, err := mergeInt64(
- fieldHtlcLimit, int64(current.HtlcLimit),
- int64(new.HtlcLimit),
- )
- if err != nil {
- return current, err
- }
- current.HtlcLimit = uint16(htlcLimit)
-
- minDepth, err := mergeInt64(
- fieldMinDep, int64(current.MinAcceptDepth),
- int64(new.MinAcceptDepth),
- )
- if err != nil {
- return current, err
- }
- current.MinAcceptDepth = uint16(minDepth)
-
- reserve, err := mergeInt64(
- fieldReserve, int64(current.Reserve), int64(new.Reserve),
- )
- if err != nil {
- return current, err
- }
- current.Reserve = btcutil.Amount(reserve)
-
- current.MinHtlcIn, err = mergeMillisatoshi(
- fieldMinIn, current.MinHtlcIn, new.MinHtlcIn,
- )
- if err != nil {
- return current, err
- }
-
- current.InFlightTotal, err = mergeMillisatoshi(
- fieldInFlightTotal, current.InFlightTotal,
- new.InFlightTotal,
- )
- if err != nil {
- return current, err
- }
-
- current.UpfrontShutdown, err = mergeDeliveryAddress(
- fieldUpfrontShutdown, current.UpfrontShutdown,
- new.UpfrontShutdown,
- )
- if err != nil {
- return current, err
- }
-
- return current, nil
-}
diff --git a/lnd/chanacceptor/merge_test.go b/lnd/chanacceptor/merge_test.go
deleted file mode 100644
index 2da363f4..00000000
--- a/lnd/chanacceptor/merge_test.go
+++ /dev/null
@@ -1,189 +0,0 @@
-package chanacceptor
-
-import (
- "testing"
-
- "github.com/pkt-cash/pktd/btcutil/er"
- "github.com/pkt-cash/pktd/lnd/lnwire"
- "github.com/stretchr/testify/require"
-)
-
-// TestMergeResponse tests merging of channel acceptor responses.
-func TestMergeResponse(t *testing.T) {
- var (
- addr1 = lnwire.DeliveryAddress{1}
- addr2 = lnwire.DeliveryAddress{2}
-
- populatedResp = ChannelAcceptResponse{
- UpfrontShutdown: addr1,
- CSVDelay: 2,
- Reserve: 3,
- InFlightTotal: 4,
- HtlcLimit: 5,
- MinHtlcIn: 6,
- MinAcceptDepth: 7,
- }
- )
-
- tests := []struct {
- name string
- current ChannelAcceptResponse
- new ChannelAcceptResponse
- merged ChannelAcceptResponse
- err er.R
- }{
- {
- name: "same response",
- current: populatedResp,
- new: populatedResp,
- merged: populatedResp,
- err: nil,
- },
- {
- name: "different upfront",
- current: ChannelAcceptResponse{
- UpfrontShutdown: addr1,
- },
- new: ChannelAcceptResponse{
- UpfrontShutdown: addr2,
- },
- err: fieldMismatchError(fieldUpfrontShutdown, addr1, addr2),
- },
- {
- name: "different csv",
- current: ChannelAcceptResponse{
- CSVDelay: 1,
- },
- new: ChannelAcceptResponse{
- CSVDelay: 2,
- },
- err: fieldMismatchError(fieldCSV, 1, 2),
- },
- {
- name: "different reserve",
- current: ChannelAcceptResponse{
- Reserve: 1,
- },
- new: ChannelAcceptResponse{
- Reserve: 2,
- },
- err: fieldMismatchError(fieldReserve, 1, 2),
- },
- {
- name: "different in flight",
- current: ChannelAcceptResponse{
- InFlightTotal: 1,
- },
- new: ChannelAcceptResponse{
- InFlightTotal: 2,
- },
- err: fieldMismatchError(
- fieldInFlightTotal, lnwire.MilliSatoshi(1),
- lnwire.MilliSatoshi(2),
- ),
- },
- {
- name: "different htlc limit",
- current: ChannelAcceptResponse{
- HtlcLimit: 1,
- },
- new: ChannelAcceptResponse{
- HtlcLimit: 2,
- },
- err: fieldMismatchError(fieldHtlcLimit, 1, 2),
- },
- {
- name: "different min in",
- current: ChannelAcceptResponse{
- MinHtlcIn: 1,
- },
- new: ChannelAcceptResponse{
- MinHtlcIn: 2,
- },
- err: fieldMismatchError(
- fieldMinIn, lnwire.MilliSatoshi(1),
- lnwire.MilliSatoshi(2),
- ),
- },
- {
- name: "different depth",
- current: ChannelAcceptResponse{
- MinAcceptDepth: 1,
- },
- new: ChannelAcceptResponse{
- MinAcceptDepth: 2,
- },
- err: fieldMismatchError(fieldMinDep, 1, 2),
- },
- {
- name: "merge all values",
- current: ChannelAcceptResponse{
- UpfrontShutdown: lnwire.DeliveryAddress{1},
- CSVDelay: 1,
- Reserve: 0,
- InFlightTotal: 3,
- HtlcLimit: 0,
- MinHtlcIn: 5,
- MinAcceptDepth: 0,
- },
- new: ChannelAcceptResponse{
- UpfrontShutdown: nil,
- CSVDelay: 0,
- Reserve: 2,
- InFlightTotal: 0,
- HtlcLimit: 4,
- MinHtlcIn: 0,
- MinAcceptDepth: 6,
- },
- merged: ChannelAcceptResponse{
- UpfrontShutdown: lnwire.DeliveryAddress{1},
- CSVDelay: 1,
- Reserve: 2,
- InFlightTotal: 3,
- HtlcLimit: 4,
- MinHtlcIn: 5,
- MinAcceptDepth: 6,
- },
- err: nil,
- },
- {
- // Test the case where fields have the same non-zero
- // value, and the case where only response value is
- // non-zero.
- name: "empty and identical",
- current: ChannelAcceptResponse{
- CSVDelay: 1,
- Reserve: 2,
- InFlightTotal: 0,
- },
- new: ChannelAcceptResponse{
- CSVDelay: 0,
- Reserve: 2,
- InFlightTotal: 3,
- },
- merged: ChannelAcceptResponse{
- CSVDelay: 1,
- Reserve: 2,
- InFlightTotal: 3,
- },
- err: nil,
- },
- }
-
- for _, test := range tests {
- test := test
-
- t.Run(test.name, func(t *testing.T) {
- resp, err := mergeResponse(test.current, test.new)
- require.True(t, er.FuzzyEquals(test.err, err))
-
- // If we expect an error, exit early rather than compare
- // our result.
- if test.err != nil {
- return
- }
-
- require.Equal(t, test.merged, resp)
- })
- }
-}
diff --git a/lnd/chanacceptor/rpcacceptor.go b/lnd/chanacceptor/rpcacceptor.go
deleted file mode 100644
index 7376282d..00000000
--- a/lnd/chanacceptor/rpcacceptor.go
+++ /dev/null
@@ -1,411 +0,0 @@
-package chanacceptor
-
-import (
- "encoding/hex"
- "fmt"
- "sync"
- "time"
-
- "github.com/pkt-cash/pktd/btcutil"
- "github.com/pkt-cash/pktd/btcutil/er"
- "github.com/pkt-cash/pktd/chaincfg"
- "github.com/pkt-cash/pktd/lnd/input"
- "github.com/pkt-cash/pktd/lnd/lnrpc"
- "github.com/pkt-cash/pktd/lnd/lnwallet/chancloser"
- "github.com/pkt-cash/pktd/lnd/lnwire"
- "github.com/pkt-cash/pktd/pktlog/log"
-)
-
-var (
- errShuttingDown = er.GenericErrorType.CodeWithDetail("errShuttingDown", "server shutting down")
-
- // errCustomLength is returned when our custom error's length exceeds
- // our maximum.
- errCustomLength = er.GenericErrorType.CodeWithDetail("errCustomLength",
- fmt.Sprintf("custom error message exceeds length "+
- "limit: %v", maxErrorLength))
-
- // errInvalidUpfrontShutdown is returned when we cannot parse the
- // upfront shutdown address returned.
- errInvalidUpfrontShutdown = er.GenericErrorType.CodeWithDetail("errInvalidUpfrontShutdown",
- "could not parse upfront "+
- "shutdown address")
-
- // errInsufficientReserve is returned when the reserve proposed by for
- // a channel is less than the dust limit originally supplied.
- errInsufficientReserve = er.GenericErrorType.CodeWithDetail("errInsufficientReserve",
- "reserve lower than proposed dust "+
- "limit")
-
- // errAcceptWithError is returned when we get a response which accepts
- // a channel but ambiguously also sets a custom error message.
- errAcceptWithError = er.GenericErrorType.CodeWithDetail("errAcceptWithError",
- "channel acceptor response accepts "+
- "channel, but also includes custom error")
-
- // errMaxHtlcTooHigh is returned if our htlc count exceeds the number
- // hard-set by BOLT 2.
- errMaxHtlcTooHigh = er.GenericErrorType.CodeWithDetail("errMaxHtlcTooHigh",
- fmt.Sprintf("htlc limit exceeds spec limit of: %v",
- input.MaxHTLCNumber/2))
-
- // maxErrorLength is the maximum error length we allow the error we
- // send to our peer to be.
- maxErrorLength = 500
-)
-
-// chanAcceptInfo contains a request for a channel acceptor decision, and a
-// channel that the response should be sent on.
-type chanAcceptInfo struct {
- request *ChannelAcceptRequest
- response chan *ChannelAcceptResponse
-}
-
-// RPCAcceptor represents the RPC-controlled variant of the ChannelAcceptor.
-// One RPCAcceptor allows one RPC client.
-type RPCAcceptor struct {
- // receive is a function from which we receive channel acceptance
- // decisions. Note that this function is expected to block.
- receive func() (*lnrpc.ChannelAcceptResponse, error)
-
- // send is a function which sends requests for channel acceptance
- // decisions into our rpc stream.
- send func(request *lnrpc.ChannelAcceptRequest) error
-
- // requests is a channel that we send requests for a acceptor response
- // into.
- requests chan *chanAcceptInfo
-
- // timeout is the amount of time we allow the channel acceptance
- // decision to take. This time includes the time to send a query to the
- // acceptor, and the time it takes to receive a response.
- timeout time.Duration
-
- // params are our current chain params.
- params *chaincfg.Params
-
- // done is closed when the rpc client terminates.
- done chan struct{}
-
- // quit is closed when lnd is shutting down.
- quit chan struct{}
-
- wg sync.WaitGroup
-}
-
-// Accept is a predicate on the ChannelAcceptRequest which is sent to the RPC
-// client who will respond with the ultimate decision. This function passes the
-// request into the acceptor's requests channel, and returns the response it
-// receives, failing the request if the timeout elapses.
-//
-// NOTE: Part of the ChannelAcceptor interface.
-func (r *RPCAcceptor) Accept(req *ChannelAcceptRequest) *ChannelAcceptResponse {
- respChan := make(chan *ChannelAcceptResponse, 1)
-
- newRequest := &chanAcceptInfo{
- request: req,
- response: respChan,
- }
-
- // timeout is the time after which ChannelAcceptRequests expire.
- timeout := time.After(r.timeout)
-
- // Create a rejection response which we can use for the cases where we
- // reject the channel.
- rejectChannel := NewChannelAcceptResponse(
- false, errChannelRejected.Default(), nil, 0, 0, 0, 0, 0, 0,
- )
-
- // Send the request to the newRequests channel.
- select {
- case r.requests <- newRequest:
-
- case <-timeout:
- log.Errorf("RPCAcceptor returned false - reached timeout of %v",
- r.timeout)
- return rejectChannel
-
- case <-r.done:
- return rejectChannel
-
- case <-r.quit:
- return rejectChannel
- }
-
- // Receive the response and return it. If no response has been received
- // in AcceptorTimeout, then return false.
- select {
- case resp := <-respChan:
- return resp
-
- case <-timeout:
- log.Errorf("RPCAcceptor returned false - reached timeout of %v",
- r.timeout)
- return rejectChannel
-
- case <-r.done:
- return rejectChannel
-
- case <-r.quit:
- return rejectChannel
- }
-}
-
-// NewRPCAcceptor creates and returns an instance of the RPCAcceptor.
-func NewRPCAcceptor(receive func() (*lnrpc.ChannelAcceptResponse, error),
- send func(*lnrpc.ChannelAcceptRequest) error, timeout time.Duration,
- params *chaincfg.Params, quit chan struct{}) *RPCAcceptor {
-
- return &RPCAcceptor{
- receive: receive,
- send: send,
- requests: make(chan *chanAcceptInfo),
- timeout: timeout,
- params: params,
- done: make(chan struct{}),
- quit: quit,
- }
-}
-
-// Run is the main loop for the RPC Acceptor. This function will block until
-// it receives the signal that lnd is shutting down, or the rpc stream is
-// cancelled by the client.
-func (r *RPCAcceptor) Run() er.R {
- // Wait for our goroutines to exit before we return.
- defer r.wg.Wait()
-
- // Create a channel that responses from acceptors are sent into.
- responses := make(chan lnrpc.ChannelAcceptResponse)
-
- // errChan is used by the receive loop to signal any errors that occur
- // during reading from the stream. This is primarily used to shutdown
- // the send loop in the case of an RPC client disconnecting.
- errChan := make(chan er.R, 1)
-
- // Start a goroutine to receive responses from the channel acceptor.
- // We expect the receive function to block, so it must be run in a
- // goroutine (otherwise we could not send more than one channel accept
- // request to the client).
- r.wg.Add(1)
- go func() {
- r.receiveResponses(errChan, responses)
- r.wg.Done()
- }()
-
- return r.sendAcceptRequests(errChan, responses)
-}
-
-// receiveResponses receives responses for our channel accept requests and
-// dispatches them into the responses channel provided, sending any errors that
-// occur into the error channel provided.
-func (r *RPCAcceptor) receiveResponses(errChan chan er.R,
- responses chan lnrpc.ChannelAcceptResponse) {
-
- for {
- resp, err := r.receive()
- if err != nil {
- errChan <- er.E(err)
- return
- }
-
- var pendingID [32]byte
- copy(pendingID[:], resp.PendingChanId)
-
- openChanResp := lnrpc.ChannelAcceptResponse{
- Accept: resp.Accept,
- PendingChanId: pendingID[:],
- Error: resp.Error,
- UpfrontShutdown: resp.UpfrontShutdown,
- CsvDelay: resp.CsvDelay,
- ReserveSat: resp.ReserveSat,
- InFlightMaxMsat: resp.InFlightMaxMsat,
- MaxHtlcCount: resp.MaxHtlcCount,
- MinHtlcIn: resp.MinHtlcIn,
- MinAcceptDepth: resp.MinAcceptDepth,
- }
-
- // We have received a decision for one of our channel
- // acceptor requests.
- select {
- case responses <- openChanResp:
-
- case <-r.done:
- return
-
- case <-r.quit:
- return
- }
- }
-}
-
-// sendAcceptRequests handles channel acceptor requests sent to us by our
-// Accept() function, dispatching them to our acceptor stream and coordinating
-// return of responses to their callers.
-func (r *RPCAcceptor) sendAcceptRequests(errChan chan er.R,
- responses chan lnrpc.ChannelAcceptResponse) er.R {
-
- // Close the done channel to indicate that the acceptor is no longer
- // listening and any in-progress requests should be terminated.
- defer close(r.done)
-
- // Create a map of pending channel IDs to our original open channel
- // request and a response channel. We keep the original chanel open
- // message so that we can validate our response against it.
- acceptRequests := make(map[[32]byte]*chanAcceptInfo)
-
- for {
- select {
- // Consume requests passed to us from our Accept() function and
- // send them into our stream.
- case newRequest := <-r.requests:
-
- req := newRequest.request
- pendingChanID := req.OpenChanMsg.PendingChannelID
-
- acceptRequests[pendingChanID] = newRequest
-
- // A ChannelAcceptRequest has been received, send it to the client.
- chanAcceptReq := &lnrpc.ChannelAcceptRequest{
- NodePubkey: req.Node.SerializeCompressed(),
- ChainHash: req.OpenChanMsg.ChainHash[:],
- PendingChanId: req.OpenChanMsg.PendingChannelID[:],
- FundingAmt: uint64(req.OpenChanMsg.FundingAmount),
- PushAmt: uint64(req.OpenChanMsg.PushAmount),
- DustLimit: uint64(req.OpenChanMsg.DustLimit),
- MaxValueInFlight: uint64(req.OpenChanMsg.MaxValueInFlight),
- ChannelReserve: uint64(req.OpenChanMsg.ChannelReserve),
- MinHtlc: uint64(req.OpenChanMsg.HtlcMinimum),
- FeePerKw: uint64(req.OpenChanMsg.FeePerKiloWeight),
- CsvDelay: uint32(req.OpenChanMsg.CsvDelay),
- MaxAcceptedHtlcs: uint32(req.OpenChanMsg.MaxAcceptedHTLCs),
- ChannelFlags: uint32(req.OpenChanMsg.ChannelFlags),
- }
-
- if err := r.send(chanAcceptReq); err != nil {
- return er.E(err)
- }
-
- // Process newly received responses from our channel acceptor,
- // looking the original request up in our map of requests and
- // dispatching the response.
- case resp := <-responses:
- // Look up the appropriate channel to send on given the
- // pending ID. If a channel is found, send the response
- // over it.
- var pendingID [32]byte
- copy(pendingID[:], resp.PendingChanId)
- requestInfo, ok := acceptRequests[pendingID]
- if !ok {
- continue
- }
-
- // Validate the response we have received. If it is not
- // valid, we log our error and proceed to deliver the
- // rejection.
- accept, acceptErr, shutdown, err := r.validateAcceptorResponse(
- requestInfo.request.OpenChanMsg.DustLimit, resp,
- )
- if err != nil {
- log.Errorf("Invalid acceptor response: %v", err)
- }
-
- requestInfo.response <- NewChannelAcceptResponse(
- accept, acceptErr, shutdown,
- uint16(resp.CsvDelay),
- uint16(resp.MaxHtlcCount),
- uint16(resp.MinAcceptDepth),
- btcutil.Amount(resp.ReserveSat),
- lnwire.MilliSatoshi(resp.InFlightMaxMsat),
- lnwire.MilliSatoshi(resp.MinHtlcIn),
- )
-
- // Delete the channel from the acceptRequests map.
- delete(acceptRequests, pendingID)
-
- // If we failed to receive from our acceptor, we exit.
- case err := <-errChan:
- log.Errorf("Received an error: %v, shutting down", err)
- return err
-
- // Exit if we are shutting down.
- case <-r.quit:
- return errShuttingDown.Default()
- }
- }
-}
-
-// validateAcceptorResponse validates the response we get from the channel
-// acceptor, returning a boolean indicating whether to accept the channel, an
-// error to send to the peer, and any validation errors that occurred.
-func (r *RPCAcceptor) validateAcceptorResponse(dustLimit btcutil.Amount,
- req lnrpc.ChannelAcceptResponse) (bool, er.R, lnwire.DeliveryAddress,
- er.R) {
-
- channelStr := hex.EncodeToString(req.PendingChanId)
-
- // Check that the max htlc count is within the BOLT 2 hard-limit of 483.
- // The initiating side should fail values above this anyway, but we
- // catch the invalid user input here.
- if req.MaxHtlcCount > input.MaxHTLCNumber/2 {
- log.Errorf("Max htlc count: %v for channel: %v is greater "+
- "than limit of: %v", req.MaxHtlcCount, channelStr,
- input.MaxHTLCNumber/2)
-
- return false, errChannelRejected.Default(), nil, errMaxHtlcTooHigh.Default()
- }
-
- // Ensure that the reserve that has been proposed, if it is set, is at
- // least the dust limit that was proposed by the remote peer. This is
- // required by BOLT 2.
- reserveSat := btcutil.Amount(req.ReserveSat)
- if reserveSat != 0 && reserveSat < dustLimit {
- log.Errorf("Remote reserve: %v sat for channel: %v must be "+
- "at least equal to proposed dust limit: %v",
- req.ReserveSat, channelStr, dustLimit)
-
- return false, errChannelRejected.Default(), nil, errInsufficientReserve.Default()
- }
-
- // Attempt to parse the upfront shutdown address provided.
- upfront, err := chancloser.ParseUpfrontShutdownAddress(
- req.UpfrontShutdown, r.params,
- )
- if err != nil {
- log.Errorf("Could not parse upfront shutdown for "+
- "%v: %v", channelStr, err)
-
- return false, errChannelRejected.Default(), nil, errInvalidUpfrontShutdown.Default()
- }
-
- // Check that the custom error provided is valid.
- if len(req.Error) > maxErrorLength {
- return false, errChannelRejected.Default(), nil, errCustomLength.Default()
- }
-
- var haveCustomError = len(req.Error) != 0
-
- switch {
- // If accept is true, but we also have an error specified, we fail
- // because this result is ambiguous.
- case req.Accept && haveCustomError:
- return false, errChannelRejected.Default(), nil, errAcceptWithError.Default()
-
- // If we accept without an error message, we can just return a nil
- // error.
- case req.Accept:
- return true, nil, upfront, nil
-
- // If we reject the channel, and have a custom error, then we use it.
- case haveCustomError:
- return false, er.Errorf(req.Error), nil, nil
-
- // Otherwise, we have rejected the channel with no custom error, so we
- // just use a generic error to fail the channel.
- default:
- return false, errChannelRejected.Default(), nil, nil
- }
-}
-
-// A compile-time constraint to ensure RPCAcceptor implements the ChannelAcceptor
-// interface.
-var _ ChannelAcceptor = (*RPCAcceptor)(nil)
diff --git a/lnd/chanacceptor/rpcacceptor_test.go b/lnd/chanacceptor/rpcacceptor_test.go
deleted file mode 100644
index 0114f552..00000000
--- a/lnd/chanacceptor/rpcacceptor_test.go
+++ /dev/null
@@ -1,139 +0,0 @@
-package chanacceptor
-
-import (
- "strings"
- "testing"
-
- "github.com/pkt-cash/pktd/btcutil"
- "github.com/pkt-cash/pktd/btcutil/er"
- "github.com/pkt-cash/pktd/chaincfg"
- "github.com/pkt-cash/pktd/lnd/input"
- "github.com/pkt-cash/pktd/lnd/lnrpc"
- "github.com/pkt-cash/pktd/lnd/lnwallet/chancloser"
- "github.com/pkt-cash/pktd/lnd/lnwire"
- "github.com/stretchr/testify/require"
-)
-
-// TestValidateAcceptorResponse test validation of acceptor responses.
-func TestValidateAcceptorResponse(t *testing.T) {
- var (
- customError = er.New("custom error")
- validAddr = "bcrt1qwrmq9uca0t3dy9t9wtuq5tm4405r7tfzyqn9pp"
- addr, _ = chancloser.ParseUpfrontShutdownAddress(
- validAddr, &chaincfg.TestNet3Params,
- )
- )
-
- tests := []struct {
- name string
- dustLimit btcutil.Amount
- response lnrpc.ChannelAcceptResponse
- accept bool
- acceptorErr er.R
- error er.R
- shutdown lnwire.DeliveryAddress
- }{
- {
- name: "accepted with error",
- response: lnrpc.ChannelAcceptResponse{
- Accept: true,
- Error: customError.String(),
- },
- accept: false,
- acceptorErr: errChannelRejected.Default(),
- error: errAcceptWithError.Default(),
- },
- {
- name: "custom error too long",
- response: lnrpc.ChannelAcceptResponse{
- Accept: false,
- Error: strings.Repeat(" ", maxErrorLength+1),
- },
- accept: false,
- acceptorErr: errChannelRejected.Default(),
- error: errCustomLength.Default(),
- },
- {
- name: "accepted",
- response: lnrpc.ChannelAcceptResponse{
- Accept: true,
- UpfrontShutdown: validAddr,
- },
- accept: true,
- acceptorErr: nil,
- error: nil,
- shutdown: addr,
- },
- {
- name: "rejected with error",
- response: lnrpc.ChannelAcceptResponse{
- Accept: false,
- Error: customError.String(),
- },
- accept: false,
- acceptorErr: customError,
- error: nil,
- },
- {
- name: "rejected with no error",
- response: lnrpc.ChannelAcceptResponse{
- Accept: false,
- },
- accept: false,
- acceptorErr: errChannelRejected.Default(),
- error: nil,
- },
- {
- name: "invalid upfront shutdown",
- response: lnrpc.ChannelAcceptResponse{
- Accept: true,
- UpfrontShutdown: "invalid addr",
- },
- accept: false,
- acceptorErr: errChannelRejected.Default(),
- error: errInvalidUpfrontShutdown.Default(),
- },
- {
- name: "reserve too low",
- dustLimit: 100,
- response: lnrpc.ChannelAcceptResponse{
- Accept: true,
- ReserveSat: 10,
- },
- accept: false,
- acceptorErr: errChannelRejected.Default(),
- error: errInsufficientReserve.Default(),
- },
- {
- name: "max htlcs too high",
- dustLimit: 100,
- response: lnrpc.ChannelAcceptResponse{
- Accept: true,
- MaxHtlcCount: 1 + input.MaxHTLCNumber/2,
- },
- accept: false,
- acceptorErr: errChannelRejected.Default(),
- error: errMaxHtlcTooHigh.Default(),
- },
- }
-
- for _, test := range tests {
- test := test
-
- t.Run(test.name, func(t *testing.T) {
- // Create an acceptor, everything can be nil because
- // we just need the params.
- acceptor := NewRPCAcceptor(
- nil, nil, 0, &chaincfg.TestNet3Params, nil,
- )
-
- accept, acceptErr, shutdown, err := acceptor.validateAcceptorResponse(
- test.dustLimit, test.response,
- )
- require.Equal(t, test.accept, accept)
- require.True(t, er.FuzzyEquals(test.acceptorErr, acceptErr))
- require.True(t, er.FuzzyEquals(test.error, err))
- require.Equal(t, test.shutdown, shutdown)
- })
- }
-}
diff --git a/lnd/chanbackup/backup.go b/lnd/chanbackup/backup.go
deleted file mode 100644
index 67561369..00000000
--- a/lnd/chanbackup/backup.go
+++ /dev/null
@@ -1,100 +0,0 @@
-package chanbackup
-
-import (
- "net"
-
- "github.com/pkt-cash/pktd/btcec"
- "github.com/pkt-cash/pktd/btcutil/er"
- "github.com/pkt-cash/pktd/lnd/channeldb"
- "github.com/pkt-cash/pktd/pktlog/log"
- "github.com/pkt-cash/pktd/wire"
-)
-
-// LiveChannelSource is an interface that allows us to query for the set of
-// live channels. A live channel is one that is open, and has not had a
-// commitment transaction broadcast.
-type LiveChannelSource interface {
- // FetchAllChannels returns all known live channels.
- FetchAllChannels() ([]*channeldb.OpenChannel, er.R)
-
- // FetchChannel attempts to locate a live channel identified by the
- // passed chanPoint.
- FetchChannel(chanPoint wire.OutPoint) (*channeldb.OpenChannel, er.R)
-
- // AddrsForNode returns all known addresses for the target node public
- // key.
- AddrsForNode(nodePub *btcec.PublicKey) ([]net.Addr, er.R)
-}
-
-// assembleChanBackup attempts to assemble a static channel backup for the
-// passed open channel. The backup includes all information required to restore
-// the channel, as well as addressing information so we can find the peer and
-// reconnect to them to initiate the protocol.
-func assembleChanBackup(chanSource LiveChannelSource,
- openChan *channeldb.OpenChannel) (*Single, er.R) {
-
- log.Debugf("Crafting backup for ChannelPoint(%v)",
- openChan.FundingOutpoint)
-
- // First, we'll query the channel source to obtain all the addresses
- // that are are associated with the peer for this channel.
- nodeAddrs, err := chanSource.AddrsForNode(openChan.IdentityPub)
- if err != nil {
- return nil, err
- }
-
- single := NewSingle(openChan, nodeAddrs)
-
- return &single, nil
-}
-
-// FetchBackupForChan attempts to create a plaintext static channel backup for
-// the target channel identified by its channel point. If we're unable to find
-// the target channel, then an error will be returned.
-func FetchBackupForChan(chanPoint wire.OutPoint,
- chanSource LiveChannelSource) (*Single, er.R) {
-
- // First, we'll query the channel source to see if the channel is known
- // and open within the database.
- targetChan, err := chanSource.FetchChannel(chanPoint)
- if err != nil {
- // If we can't find the channel, then we return with an error,
- // as we have nothing to backup.
- return nil, er.Errorf("unable to find target channel")
- }
-
- // Once we have the target channel, we can assemble the backup using
- // the source to obtain any extra information that we may need.
- staticChanBackup, err := assembleChanBackup(chanSource, targetChan)
- if err != nil {
- return nil, er.Errorf("unable to create chan backup: %v", err)
- }
-
- return staticChanBackup, nil
-}
-
-// FetchStaticChanBackups will return a plaintext static channel back up for
-// all known active/open channels within the passed channel source.
-func FetchStaticChanBackups(chanSource LiveChannelSource) ([]Single, er.R) {
- // First, we'll query the backup source for information concerning all
- // currently open and available channels.
- openChans, err := chanSource.FetchAllChannels()
- if err != nil {
- return nil, err
- }
-
- // Now that we have all the channels, we'll use the chanSource to
- // obtain any auxiliary information we need to craft a backup for each
- // channel.
- staticChanBackups := make([]Single, 0, len(openChans))
- for _, openChan := range openChans {
- chanBackup, err := assembleChanBackup(chanSource, openChan)
- if err != nil {
- return nil, err
- }
-
- staticChanBackups = append(staticChanBackups, *chanBackup)
- }
-
- return staticChanBackups, nil
-}
diff --git a/lnd/chanbackup/backup_test.go b/lnd/chanbackup/backup_test.go
deleted file mode 100644
index c9d73a4d..00000000
--- a/lnd/chanbackup/backup_test.go
+++ /dev/null
@@ -1,197 +0,0 @@
-package chanbackup
-
-import (
- "net"
- "testing"
-
- "github.com/pkt-cash/pktd/btcec"
- "github.com/pkt-cash/pktd/btcutil/er"
- "github.com/pkt-cash/pktd/lnd/channeldb"
- "github.com/pkt-cash/pktd/wire"
-)
-
-type mockChannelSource struct {
- chans map[wire.OutPoint]*channeldb.OpenChannel
-
- failQuery bool
-
- addrs map[[33]byte][]net.Addr
-}
-
-func newMockChannelSource() *mockChannelSource {
- return &mockChannelSource{
- chans: make(map[wire.OutPoint]*channeldb.OpenChannel),
- addrs: make(map[[33]byte][]net.Addr),
- }
-}
-
-func (m *mockChannelSource) FetchAllChannels() ([]*channeldb.OpenChannel, er.R) {
- if m.failQuery {
- return nil, er.Errorf("fail")
- }
-
- chans := make([]*channeldb.OpenChannel, 0, len(m.chans))
- for _, channel := range m.chans {
- chans = append(chans, channel)
- }
-
- return chans, nil
-}
-
-func (m *mockChannelSource) FetchChannel(chanPoint wire.OutPoint) (*channeldb.OpenChannel, er.R) {
- if m.failQuery {
- return nil, er.Errorf("fail")
- }
-
- channel, ok := m.chans[chanPoint]
- if !ok {
- return nil, er.Errorf("can't find chan")
- }
-
- return channel, nil
-}
-
-func (m *mockChannelSource) addAddrsForNode(nodePub *btcec.PublicKey, addrs []net.Addr) {
- var nodeKey [33]byte
- copy(nodeKey[:], nodePub.SerializeCompressed())
-
- m.addrs[nodeKey] = addrs
-}
-
-func (m *mockChannelSource) AddrsForNode(nodePub *btcec.PublicKey) ([]net.Addr, er.R) {
- if m.failQuery {
- return nil, er.Errorf("fail")
- }
-
- var nodeKey [33]byte
- copy(nodeKey[:], nodePub.SerializeCompressed())
-
- addrs, ok := m.addrs[nodeKey]
- if !ok {
- return nil, er.Errorf("can't find addr")
- }
-
- return addrs, nil
-}
-
-// TestFetchBackupForChan tests that we're able to construct a single channel
-// backup for channels that are known, unknown, and also channels in which we
-// can find addresses for and otherwise.
-func TestFetchBackupForChan(t *testing.T) {
- t.Parallel()
-
- // First, we'll make two channels, only one of them will have all the
- // information we need to construct set of backups for them.
- randomChan1, err := genRandomOpenChannelShell()
- if err != nil {
- t.Fatalf("unable to generate chan: %v", err)
- }
- randomChan2, err := genRandomOpenChannelShell()
- if err != nil {
- t.Fatalf("unable to generate chan: %v", err)
- }
-
- chanSource := newMockChannelSource()
- chanSource.chans[randomChan1.FundingOutpoint] = randomChan1
- chanSource.chans[randomChan2.FundingOutpoint] = randomChan2
-
- chanSource.addAddrsForNode(randomChan1.IdentityPub, []net.Addr{addr1})
-
- testCases := []struct {
- chanPoint wire.OutPoint
-
- pass bool
- }{
- // Able to find channel, and addresses, should pass.
- {
- chanPoint: randomChan1.FundingOutpoint,
- pass: true,
- },
-
- // Able to find channel, not able to find addrs, should fail.
- {
- chanPoint: randomChan2.FundingOutpoint,
- pass: false,
- },
-
- // Not able to find channel, should fail.
- {
- chanPoint: op,
- pass: false,
- },
- }
- for i, testCase := range testCases {
- _, err := FetchBackupForChan(testCase.chanPoint, chanSource)
- switch {
- // If this is a valid test case, and we failed, then we'll
- // return an error.
- case err != nil && testCase.pass:
- t.Fatalf("#%v, unable to make chan backup: %v", i, err)
-
- // If this is an invalid test case, and we passed it, then
- // we'll return an error.
- case err == nil && !testCase.pass:
- t.Fatalf("#%v got nil error for invalid req: %v",
- i, err)
- }
- }
-}
-
-// TestFetchStaticChanBackups tests that we're able to properly query the
-// channel source for all channels and construct a Single for each channel.
-func TestFetchStaticChanBackups(t *testing.T) {
- t.Parallel()
-
- // First, we'll make the set of channels that we want to seed the
- // channel source with. Both channels will be fully populated in the
- // channel source.
- const numChans = 2
- randomChan1, err := genRandomOpenChannelShell()
- if err != nil {
- t.Fatalf("unable to generate chan: %v", err)
- }
- randomChan2, err := genRandomOpenChannelShell()
- if err != nil {
- t.Fatalf("unable to generate chan: %v", err)
- }
-
- chanSource := newMockChannelSource()
- chanSource.chans[randomChan1.FundingOutpoint] = randomChan1
- chanSource.chans[randomChan2.FundingOutpoint] = randomChan2
- chanSource.addAddrsForNode(randomChan1.IdentityPub, []net.Addr{addr1})
- chanSource.addAddrsForNode(randomChan2.IdentityPub, []net.Addr{addr2})
-
- // With the channel source populated, we'll now attempt to create a set
- // of backups for all the channels. This should succeed, as all items
- // are populated within the channel source.
- backups, err := FetchStaticChanBackups(chanSource)
- if err != nil {
- t.Fatalf("unable to create chan back ups: %v", err)
- }
-
- if len(backups) != numChans {
- t.Fatalf("expected %v chans, instead got %v", numChans,
- len(backups))
- }
-
- // We'll attempt to create a set up backups again, but this time the
- // second channel will have missing information, which should cause the
- // query to fail.
- var n [33]byte
- copy(n[:], randomChan2.IdentityPub.SerializeCompressed())
- delete(chanSource.addrs, n)
-
- _, err = FetchStaticChanBackups(chanSource)
- if err == nil {
- t.Fatalf("query with incomplete information should fail")
- }
-
- // To wrap up, we'll ensure that if we're unable to query the channel
- // source at all, then we'll fail as well.
- chanSource = newMockChannelSource()
- chanSource.failQuery = true
- _, err = FetchStaticChanBackups(chanSource)
- if err == nil {
- t.Fatalf("query should fail")
- }
-}
diff --git a/lnd/chanbackup/backupfile.go b/lnd/chanbackup/backupfile.go
deleted file mode 100644
index a41affca..00000000
--- a/lnd/chanbackup/backupfile.go
+++ /dev/null
@@ -1,152 +0,0 @@
-package chanbackup
-
-import (
- "io/ioutil"
- "os"
- "path/filepath"
-
- "github.com/pkt-cash/pktd/btcutil/er"
- "github.com/pkt-cash/pktd/lnd/keychain"
- "github.com/pkt-cash/pktd/pktlog/log"
-)
-
-const (
- // DefaultBackupFileName is the default name of the auto updated static
- // channel backup fie.
- DefaultBackupFileName = "channel.backup"
-
- // DefaultTempBackupFileName is the default name of the temporary SCB
- // file that we'll use to atomically update the primary back up file
- // when new channel are detected.
- DefaultTempBackupFileName = "temp-dont-use.backup"
-)
-
-var (
- // ErrNoBackupFileExists is returned if caller attempts to call
- // UpdateAndSwap with the file name not set.
- ErrNoBackupFileExists = er.GenericErrorType.CodeWithDetail("ErrNoBackupFileExists",
- "back up file name not set")
-
- // ErrNoTempBackupFile is returned if caller attempts to call
- // UpdateAndSwap with the temp back up file name not set.
- ErrNoTempBackupFile = er.GenericErrorType.CodeWithDetail("ErrNoTempBackupFile",
- "temp backup file not set")
-)
-
-// MultiFile represents a file on disk that a caller can use to read the packed
-// multi backup into an unpacked one, and also atomically update the contents
-// on disk once new channels have been opened, and old ones closed. This struct
-// relies on an atomic file rename property which most widely use file systems
-// have.
-type MultiFile struct {
- // fileName is the file name of the main back up file.
- fileName string
-
- // tempFileName is the name of the file that we'll use to stage a new
- // packed multi-chan backup, and the rename to the main back up file.
- tempFileName string
-
- // tempFile is an open handle to the temp back up file.
- tempFile *os.File
-}
-
-// NewMultiFile create a new multi-file instance at the target location on the
-// file system.
-func NewMultiFile(fileName string) *MultiFile {
-
- // We'll our temporary backup file in the very same directory as the
- // main backup file.
- backupFileDir := filepath.Dir(fileName)
- tempFileName := filepath.Join(
- backupFileDir, DefaultTempBackupFileName,
- )
-
- return &MultiFile{
- fileName: fileName,
- tempFileName: tempFileName,
- }
-}
-
-// UpdateAndSwap will attempt write a new temporary backup file to disk with
-// the newBackup encoded, then atomically swap (via rename) the old file for
-// the new file by updating the name of the new file to the old.
-func (b *MultiFile) UpdateAndSwap(newBackup PackedMulti) er.R {
- // If the main backup file isn't set, then we can't proceed.
- if b.fileName == "" {
- return ErrNoBackupFileExists.Default()
- }
-
- log.Infof("Updating backup file at %v", b.fileName)
-
- // If the old back up file still exists, then we'll delete it before
- // proceeding.
- if _, err := os.Stat(b.tempFileName); err == nil {
- log.Infof("Found old temp backup @ %v, removing before swap",
- b.tempFileName)
-
- err = os.Remove(b.tempFileName)
- if err != nil {
- return er.Errorf("unable to remove temp "+
- "backup file: %v", err)
- }
- }
-
- // Now that we know the staging area is clear, we'll create the new
- // temporary back up file.
- var err error
- b.tempFile, err = os.Create(b.tempFileName)
- if err != nil {
- return er.Errorf("unable to create temp file: %v", err)
- }
-
- // With the file created, we'll write the new packed multi backup and
- // remove the temporary file all together once this method exits.
- _, err = b.tempFile.Write([]byte(newBackup))
- if err != nil {
- return er.Errorf("unable to write backup to temp file: %v", err)
- }
- if err := b.tempFile.Sync(); err != nil {
- return er.Errorf("unable to sync temp file: %v", err)
- }
- defer os.Remove(b.tempFileName)
-
- log.Infof("Swapping old multi backup file from %v to %v",
- b.tempFileName, b.fileName)
-
- // Before we rename the swap (atomic name swap), we'll make
- // sure to close the current file as some OSes don't support
- // renaming a file that's already open (Windows).
- if err := b.tempFile.Close(); err != nil {
- return er.Errorf("unable to close file: %v", err)
- }
-
- // Finally, we'll attempt to atomically rename the temporary file to
- // the main back up file. If this succeeds, then we'll only have a
- // single file on disk once this method exits.
- return er.E(os.Rename(b.tempFileName, b.fileName))
-}
-
-// ExtractMulti attempts to extract the packed multi backup we currently point
-// to into an unpacked version. This method will fail if no backup file
-// currently exists as the specified location.
-func (b *MultiFile) ExtractMulti(keyChain keychain.KeyRing) (*Multi, er.R) {
- var err error
-
- // We'll return an error if the main file isn't currently set.
- if b.fileName == "" {
- return nil, ErrNoBackupFileExists.Default()
- }
-
- // Now that we've confirmed the target file is populated, we'll read
- // all the contents of the file. This function ensures that file is
- // always closed, even if we can't read the contents.
- multiBytes, err := ioutil.ReadFile(b.fileName)
- if err != nil {
- return nil, er.E(err)
- }
-
- // Finally, we'll attempt to unpack the file and return the unpack
- // version to the caller.
- packedMulti := PackedMulti(multiBytes)
- return packedMulti.Unpack(keyChain)
-}
diff --git a/lnd/chanbackup/backupfile_test.go b/lnd/chanbackup/backupfile_test.go
deleted file mode 100644
index e30ec572..00000000
--- a/lnd/chanbackup/backupfile_test.go
+++ /dev/null
@@ -1,290 +0,0 @@
-package chanbackup
-
-import (
- "bytes"
- "io/ioutil"
- "math/rand"
- "os"
- "path/filepath"
- "testing"
-
- "github.com/pkt-cash/pktd/btcutil/er"
-)
-
-func makeFakePackedMulti() (PackedMulti, er.R) {
- newPackedMulti := make([]byte, 50)
- if _, err := rand.Read(newPackedMulti[:]); err != nil {
- return nil, er.Errorf("unable to make test backup: %v", err)
- }
-
- return PackedMulti(newPackedMulti), nil
-}
-
-func assertBackupMatches(t *testing.T, filePath string,
- currentBackup PackedMulti) {
-
- t.Helper()
-
- packedBackup, err := ioutil.ReadFile(filePath)
- if err != nil {
- t.Fatalf("unable to test file: %v", err)
- }
-
- if !bytes.Equal(packedBackup, currentBackup) {
- t.Fatalf("backups don't match after first swap: "+
- "expected %x got %x", packedBackup[:],
- currentBackup)
- }
-}
-
-func assertFileDeleted(t *testing.T, filePath string) {
- t.Helper()
-
- _, err := os.Stat(filePath)
- if err == nil {
- t.Fatalf("file %v still exists: ", filePath)
- }
-}
-
-// TestUpdateAndSwap test that we're able to properly swap out old backups on
-// disk with new ones. Additionally, after a swap operation succeeds, then each
-// time we should only have the main backup file on disk, as the temporary file
-// has been removed.
-func TestUpdateAndSwap(t *testing.T) {
- t.Parallel()
-
- tempTestDir, err := ioutil.TempDir("", "")
- if err != nil {
- t.Fatalf("unable to make temp dir: %v", err)
- }
- defer os.Remove(tempTestDir)
-
- testCases := []struct {
- fileName string
- tempFileName string
-
- oldTempExists bool
-
- valid bool
- }{
- // Main file name is blank, should fail.
- {
- fileName: "",
- valid: false,
- },
-
- // Old temporary file still exists, should be removed. Only one
- // file should remain.
- {
- fileName: filepath.Join(
- tempTestDir, DefaultBackupFileName,
- ),
- tempFileName: filepath.Join(
- tempTestDir, DefaultTempBackupFileName,
- ),
- oldTempExists: true,
- valid: true,
- },
-
- // Old temp doesn't exist, should swap out file, only a single
- // file remains.
- {
- fileName: filepath.Join(
- tempTestDir, DefaultBackupFileName,
- ),
- tempFileName: filepath.Join(
- tempTestDir, DefaultTempBackupFileName,
- ),
- valid: true,
- },
- }
- for i, testCase := range testCases {
- // Ensure that all created files are removed at the end of the
- // test case.
- defer os.Remove(testCase.fileName)
- defer os.Remove(testCase.tempFileName)
-
- backupFile := NewMultiFile(testCase.fileName)
-
- // To start with, we'll make a random byte slice that'll pose
- // as our packed multi backup.
- newPackedMulti, err := makeFakePackedMulti()
- if err != nil {
- t.Fatalf("unable to make test backup: %v", err)
- }
-
- // If the old temporary file is meant to exist, then we'll
- // create it now as an empty file.
- if testCase.oldTempExists {
- _, err := os.Create(testCase.tempFileName)
- if err != nil {
- t.Fatalf("unable to create temp file: %v", err)
- }
-
- // TODO(roasbeef): mock out fs calls?
- }
-
- // With our backup created, we'll now attempt to swap out this
- // backup, for the old one.
- err = backupFile.UpdateAndSwap(PackedMulti(newPackedMulti))
- switch {
- // If this is a valid test case, and we failed, then we'll
- // return an error.
- case err != nil && testCase.valid:
- t.Fatalf("#%v, unable to swap file: %v", i, err)
-
- // If this is an invalid test case, and we passed it, then
- // we'll return an error.
- case err == nil && !testCase.valid:
- t.Fatalf("#%v file swap should have failed: %v", i, err)
- }
-
- if !testCase.valid {
- continue
- }
-
- // If we read out the file on disk, then it should match
- // exactly what we wrote. The temp backup file should also be
- // gone.
- assertBackupMatches(t, testCase.fileName, newPackedMulti)
- assertFileDeleted(t, testCase.tempFileName)
-
- // Now that we know this is a valid test case, we'll make a new
- // packed multi to swap out this current one.
- newPackedMulti2, err := makeFakePackedMulti()
- if err != nil {
- t.Fatalf("unable to make test backup: %v", err)
- }
-
- // We'll then attempt to swap the old version for this new one.
- err = backupFile.UpdateAndSwap(PackedMulti(newPackedMulti2))
- if err != nil {
- t.Fatalf("unable to swap file: %v", err)
- }
-
- // Once again, the file written on disk should have been
- // properly swapped out with the new instance.
- assertBackupMatches(t, testCase.fileName, newPackedMulti2)
-
- // Additionally, we shouldn't be able to find the temp backup
- // file on disk, as it should be deleted each time.
- assertFileDeleted(t, testCase.tempFileName)
- }
-}
-
-func assertMultiEqual(t *testing.T, a, b *Multi) {
-
- if len(a.StaticBackups) != len(b.StaticBackups) {
- t.Fatalf("expected %v backups, got %v", len(a.StaticBackups),
- len(b.StaticBackups))
- }
-
- for i := 0; i < len(a.StaticBackups); i++ {
- assertSingleEqual(t, a.StaticBackups[i], b.StaticBackups[i])
- }
-}
-
-// TestExtractMulti tests that given a valid packed multi file on disk, we're
-// able to read it multiple times repeatedly.
-func TestExtractMulti(t *testing.T) {
- t.Parallel()
-
- keyRing := &mockKeyRing{}
-
- // First, as prep, we'll create a single chan backup, then pack that
- // fully into a multi backup.
- channel, err := genRandomOpenChannelShell()
- if err != nil {
- t.Fatalf("unable to gen chan: %v", err)
- }
-
- singleBackup := NewSingle(channel, nil)
-
- var b bytes.Buffer
- unpackedMulti := Multi{
- StaticBackups: []Single{singleBackup},
- }
- err = unpackedMulti.PackToWriter(&b, keyRing)
- if err != nil {
- t.Fatalf("unable to pack to writer: %v", err)
- }
-
- packedMulti := PackedMulti(b.Bytes())
-
- // Finally, we'll make a new temporary file, then write out the packed
- // multi directly to to it.
- tempFile, errr := ioutil.TempFile("", "")
- if errr != nil {
- t.Fatalf("unable to create temp file: %v", errr)
- }
- defer os.Remove(tempFile.Name())
-
- _, errr = tempFile.Write(packedMulti)
- if errr != nil {
- t.Fatalf("unable to write temp file: %v", errr)
- }
- if err := tempFile.Sync(); err != nil {
- t.Fatalf("unable to sync temp file: %v", err)
- }
-
- testCases := []struct {
- fileName string
- pass bool
- }{
- // Main file not read, file name not present.
- {
- fileName: "",
- pass: false,
- },
-
- // Main file not read, file name is there, but file doesn't
- // exist.
- {
- fileName: "kek",
- pass: false,
- },
-
- // Main file not read, should be able to read multiple times.
- {
- fileName: tempFile.Name(),
- pass: true,
- },
- }
- for i, testCase := range testCases {
- // First, we'll make our backup file with the specified name.
- backupFile := NewMultiFile(testCase.fileName)
-
- // With our file made, we'll now attempt to read out the
- // multi-file.
- freshUnpackedMulti, err := backupFile.ExtractMulti(keyRing)
- switch {
- // If this is a valid test case, and we failed, then we'll
- // return an error.
- case err != nil && testCase.pass:
- t.Fatalf("#%v, unable to extract file: %v", i, err)
-
- // If this is an invalid test case, and we passed it, then
- // we'll return an error.
- case err == nil && !testCase.pass:
- t.Fatalf("#%v file extraction should have "+
- "failed: %v", i, err)
- }
-
- if !testCase.pass {
- continue
- }
-
- // We'll now ensure that the unpacked multi we read is
- // identical to the one we wrote out above.
- assertMultiEqual(t, &unpackedMulti, freshUnpackedMulti)
-
- // We should also be able to read the file again, as we have an
- // existing handle to it.
- freshUnpackedMulti, err = backupFile.ExtractMulti(keyRing)
- if err != nil {
- t.Fatalf("unable to unpack multi: %v", err)
- }
-
- assertMultiEqual(t, &unpackedMulti, freshUnpackedMulti)
- }
-}
diff --git a/lnd/chanbackup/crypto.go b/lnd/chanbackup/crypto.go
deleted file mode 100644
index e2e559a7..00000000
--- a/lnd/chanbackup/crypto.go
+++ /dev/null
@@ -1,141 +0,0 @@
-package chanbackup
-
-import (
- "bytes"
- "crypto/rand"
- "crypto/sha256"
- "io"
- "io/ioutil"
-
- "github.com/pkt-cash/pktd/btcutil/er"
- "github.com/pkt-cash/pktd/btcutil/util"
- "github.com/pkt-cash/pktd/lnd/keychain"
- "golang.org/x/crypto/chacha20poly1305"
-)
-
-// TODO(roasbeef): interface in front of?
-
-// baseEncryptionKeyLoc is the KeyLocator that we'll use to derive the base
-// encryption key used for encrypting all static channel backups. We use this
-// to then derive the actual key that we'll use for encryption. We do this
-// rather than using the raw key, as we assume that we can't obtain the raw
-// keys, and we don't want to require that the HSM know our target cipher for
-// encryption.
-//
-// TODO(roasbeef): possibly unique encrypt?
-var baseEncryptionKeyLoc = keychain.KeyLocator{
- Family: keychain.KeyFamilyStaticBackup,
- Index: 0,
-}
-
-// genEncryptionKey derives the key that we'll use to encrypt all of our static
-// channel backups. The key itself, is the sha2 of a base key that we get from
-// the keyring. We derive the key this way as we don't force the HSM (or any
-// future abstractions) to be able to derive and know of the cipher that we'll
-// use within our protocol.
-func genEncryptionKey(keyRing keychain.KeyRing) ([]byte, er.R) {
- // key = SHA256(baseKey)
- baseKey, err := keyRing.DeriveKey(
- baseEncryptionKeyLoc,
- )
- if err != nil {
- return nil, err
- }
-
- encryptionKey := sha256.Sum256(
- baseKey.PubKey.SerializeCompressed(),
- )
-
- // TODO(roasbeef): throw back in ECDH?
-
- return encryptionKey[:], nil
-}
-
-// encryptPayloadToWriter attempts to write the set of bytes contained within
-// the passed byes.Buffer into the passed io.Writer in an encrypted form. We
-// use a 24-byte chachapoly AEAD instance with a randomized nonce that's
-// pre-pended to the final payload and used as associated data in the AEAD. We
-// use the passed keyRing to generate the encryption key, see genEncryptionKey
-// for further details.
-func encryptPayloadToWriter(payload bytes.Buffer, w io.Writer,
- keyRing keychain.KeyRing) er.R {
-
- // First, we'll derive the key that we'll use to encrypt the payload
- // for safe storage without giving away the details of any of our
- // channels. The final operation is:
- //
- // key = SHA256(baseKey)
- encryptionKey, err := genEncryptionKey(keyRing)
- if err != nil {
- return err
- }
-
- // Before encryption, we'll initialize our cipher with the target
- // encryption key, and also read out our random 24-byte nonce we use
- // for encryption. Note that we use NewX, not New, as the latter
- // version requires a 12-byte nonce, not a 24-byte nonce.
- cipher, errr := chacha20poly1305.NewX(encryptionKey)
- if errr != nil {
- return er.E(errr)
- }
- var nonce [chacha20poly1305.NonceSizeX]byte
- if _, errr := rand.Read(nonce[:]); errr != nil {
- return er.E(errr)
- }
-
- // Finally, we encrypted the final payload, and write out our
- // ciphertext with nonce pre-pended.
- ciphertext := cipher.Seal(nil, nonce[:], payload.Bytes(), nonce[:])
-
- if _, err := util.Write(w, nonce[:]); err != nil {
- return err
- }
- if _, err := util.Write(w, ciphertext); err != nil {
- return err
- }
-
- return nil
-}
-
-// decryptPayloadFromReader attempts to decrypt the encrypted bytes within the
-// passed io.Reader instance using the key derived from the passed keyRing. For
-// further details regarding the key derivation protocol, see the
-// genEncryptionKey method.
-func decryptPayloadFromReader(payload io.Reader,
- keyRing keychain.KeyRing) ([]byte, er.R) {
-
- // First, we'll re-generate the encryption key that we use for all the
- // SCBs.
- encryptionKey, err := genEncryptionKey(keyRing)
- if err != nil {
- return nil, err
- }
-
- // Next, we'll read out the entire blob as we need to isolate the nonce
- // from the rest of the ciphertext.
- packedBackup, errr := ioutil.ReadAll(payload)
- if errr != nil {
- return nil, er.E(errr)
- }
- if len(packedBackup) < chacha20poly1305.NonceSizeX {
- return nil, er.Errorf("payload size too small, must be at "+
- "least %v bytes", chacha20poly1305.NonceSizeX)
- }
-
- nonce := packedBackup[:chacha20poly1305.NonceSizeX]
- ciphertext := packedBackup[chacha20poly1305.NonceSizeX:]
-
- // Now that we have the cipher text and the nonce separated, we can go
- // ahead and decrypt the final blob so we can properly serialized the
- // SCB.
- cipher, errr := chacha20poly1305.NewX(encryptionKey)
- if errr != nil {
- return nil, er.E(errr)
- }
- plaintext, errr := cipher.Open(nil, nonce, ciphertext, nonce)
- if errr != nil {
- return nil, er.E(errr)
- }
-
- return plaintext, nil
-}
diff --git a/lnd/chanbackup/crypto_test.go b/lnd/chanbackup/crypto_test.go
deleted file mode 100644
index fd2039f4..00000000
--- a/lnd/chanbackup/crypto_test.go
+++ /dev/null
@@ -1,156 +0,0 @@
-package chanbackup
-
-import (
- "bytes"
- "testing"
-
- "github.com/pkt-cash/pktd/btcec"
- "github.com/pkt-cash/pktd/btcutil/er"
- "github.com/pkt-cash/pktd/lnd/keychain"
-)
-
-var (
- testWalletPrivKey = []byte{
- 0x2b, 0xd8, 0x06, 0xc9, 0x7f, 0x0e, 0x00, 0xaf,
- 0x1a, 0x1f, 0xc3, 0x32, 0x8f, 0xa7, 0x63, 0xa9,
- 0x26, 0x97, 0x23, 0xc8, 0xdb, 0x8f, 0xac, 0x4f,
- 0x93, 0xaf, 0x71, 0xdb, 0x18, 0x6d, 0x6e, 0x90,
- }
-)
-
-type mockKeyRing struct {
- fail bool
-}
-
-func (m *mockKeyRing) DeriveNextKey(keyFam keychain.KeyFamily) (keychain.KeyDescriptor, er.R) {
- return keychain.KeyDescriptor{}, nil
-}
-func (m *mockKeyRing) DeriveKey(keyLoc keychain.KeyLocator) (keychain.KeyDescriptor, er.R) {
- if m.fail {
- return keychain.KeyDescriptor{}, er.Errorf("fail")
- }
-
- _, pub := btcec.PrivKeyFromBytes(btcec.S256(), testWalletPrivKey)
- return keychain.KeyDescriptor{
- PubKey: pub,
- }, nil
-}
-
-// TestEncryptDecryptPayload tests that given a static key, we're able to
-// properly decrypt and encrypted payload. We also test that we'll reject a
-// ciphertext that has been modified.
-func TestEncryptDecryptPayload(t *testing.T) {
- t.Parallel()
-
- payloadCases := []struct {
- // plaintext is the string that we'll be encrypting.
- plaintext []byte
-
- // mutator allows a test case to modify the ciphertext before
- // we attempt to decrypt it.
- mutator func(*[]byte)
-
- // valid indicates if this test should pass or fail.
- valid bool
- }{
- // Proper payload, should decrypt.
- {
- plaintext: []byte("payload test plain text"),
- mutator: nil,
- valid: true,
- },
-
- // Mutator modifies cipher text, shouldn't decrypt.
- {
- plaintext: []byte("payload test plain text"),
- mutator: func(p *[]byte) {
- // Flip a byte in the payload to render it invalid.
- (*p)[0] ^= 1
- },
- valid: false,
- },
-
- // Cipher text is too small, shouldn't decrypt.
- {
- plaintext: []byte("payload test plain text"),
- mutator: func(p *[]byte) {
- // Modify the cipher text to be zero length.
- *p = []byte{}
- },
- valid: false,
- },
- }
-
- keyRing := &mockKeyRing{}
-
- for i, payloadCase := range payloadCases {
- var cipherBuffer bytes.Buffer
-
- // First, we'll encrypt the passed payload with our scheme.
- payloadReader := bytes.NewBuffer(payloadCase.plaintext)
- err := encryptPayloadToWriter(
- *payloadReader, &cipherBuffer, keyRing,
- )
- if err != nil {
- t.Fatalf("unable encrypt paylaod: %v", err)
- }
-
- // If we have a mutator, then we'll wrong the mutator over the
- // cipher text, then reset the main buffer and re-write the new
- // cipher text.
- if payloadCase.mutator != nil {
- cipherText := cipherBuffer.Bytes()
-
- payloadCase.mutator(&cipherText)
-
- cipherBuffer.Reset()
- cipherBuffer.Write(cipherText)
- }
-
- plaintext, err := decryptPayloadFromReader(&cipherBuffer, keyRing)
-
- switch {
- // If this was meant to be a valid decryption, but we failed,
- // then we'll return an error.
- case err != nil && payloadCase.valid:
- t.Fatalf("unable to decrypt valid payload case %v", i)
-
- // If this was meant to be an invalid decryption, and we didn't
- // fail, then we'll return an error.
- case err == nil && !payloadCase.valid:
- t.Fatalf("payload was invalid yet was able to decrypt")
- }
-
- // Only if this case was mean to be valid will we ensure the
- // resulting decrypted plaintext matches the original input.
- if payloadCase.valid &&
- !bytes.Equal(plaintext, payloadCase.plaintext) {
- t.Fatalf("#%v: expected %v, got %v: ", i,
- payloadCase.plaintext, plaintext)
- }
- }
-}
-
-// TestInvalidKeyEncryption tests that encryption fails if we're unable to
-// obtain a valid key.
-func TestInvalidKeyEncryption(t *testing.T) {
- t.Parallel()
-
- var b bytes.Buffer
- err := encryptPayloadToWriter(b, &b, &mockKeyRing{true})
- if err == nil {
- t.Fatalf("expected error due to fail key gen")
- }
-}
-
-// TestInvalidKeyDecrytion tests that decryption fails if we're unable to
-// obtain a valid key.
-func TestInvalidKeyDecrytion(t *testing.T) {
- t.Parallel()
-
- var b bytes.Buffer
- _, err := decryptPayloadFromReader(&b, &mockKeyRing{true})
- if err == nil {
- t.Fatalf("expected error due to fail key gen")
- }
-}
diff --git a/lnd/chanbackup/multi.go b/lnd/chanbackup/multi.go
deleted file mode 100644
index cad73e41..00000000
--- a/lnd/chanbackup/multi.go
+++ /dev/null
@@ -1,181 +0,0 @@
-package chanbackup
-
-import (
- "bytes"
- "io"
-
- "github.com/pkt-cash/pktd/btcutil/er"
- "github.com/pkt-cash/pktd/lnd/keychain"
- "github.com/pkt-cash/pktd/lnd/lnwire"
-)
-
-// MultiBackupVersion denotes the version of the multi channel static channel
-// backup. Based on this version, we know how to encode/decode packed/unpacked
-// versions of multi backups.
-type MultiBackupVersion byte
-
-const (
- // DefaultMultiVersion is the default version of the multi channel
- // backup. The serialized format for this version is simply: version ||
- // numBackups || SCBs...
- DefaultMultiVersion = 0
-
- // NilMultiSizePacked is the size of a "nil" packed Multi (45 bytes).
- // This consists of the 24 byte chacha nonce, the 16 byte MAC, one byte
- // for the version, and 4 bytes to signal zero entries.
- NilMultiSizePacked = 24 + 16 + 1 + 4
-)
-
-// Multi is a form of static channel backup that is amenable to being
-// serialized in a single file. Rather than a series of ciphertexts, a
-// multi-chan backup is a single ciphertext of all static channel backups
-// concatenated. This form factor gives users a single blob that they can use
-// to safely copy/obtain at anytime to backup their channels.
-type Multi struct {
- // Version is the version that should be observed when attempting to
- // pack the multi backup.
- Version MultiBackupVersion
-
- // StaticBackups is the set of single channel backups that this multi
- // backup is comprised of.
- StaticBackups []Single
-}
-
-// PackToWriter packs (encrypts+serializes) the target set of static channel
-// backups into a single AEAD ciphertext into the passed io.Writer. This is the
-// opposite of UnpackFromReader. The plaintext form of a multi-chan backup is
-// the following: a 4 byte integer denoting the number of serialized static
-// channel backups serialized, a series of serialized static channel backups
-// concatenated. To pack this payload, we then apply our chacha20 AEAD to the
-// entire payload, using the 24-byte nonce as associated data.
-func (m Multi) PackToWriter(w io.Writer, keyRing keychain.KeyRing) er.R {
- // The only version that we know how to pack atm is version 0. Attempts
- // to pack any other version will result in an error.
- switch m.Version {
- case DefaultMultiVersion:
- break
-
- default:
- return er.Errorf("unable to pack unknown multi-version "+
- "of %v", m.Version)
- }
-
- var multiBackupBuffer bytes.Buffer
-
- // First, we'll write out the version of this multi channel baackup.
- err := lnwire.WriteElements(&multiBackupBuffer, byte(m.Version))
- if err != nil {
- return err
- }
-
- // Now that we've written out the version of this multi-pack format,
- // we'll now write the total number of backups to expect after this
- // point.
- numBackups := uint32(len(m.StaticBackups))
- err = lnwire.WriteElements(&multiBackupBuffer, numBackups)
- if err != nil {
- return err
- }
-
- // Next, we'll serialize the raw plaintext version of each of the
- // backup into the intermediate buffer.
- for _, chanBackup := range m.StaticBackups {
- err := chanBackup.Serialize(&multiBackupBuffer)
- if err != nil {
- return er.Errorf("unable to serialize backup "+
- "for %v: %v", chanBackup.FundingOutpoint, err)
- }
- }
-
- // With the plaintext multi backup assembled, we'll now encrypt it
- // directly to the passed writer.
- return encryptPayloadToWriter(multiBackupBuffer, w, keyRing)
-}
-
-// UnpackFromReader attempts to unpack (decrypt+deserialize) a packed
-// multi-chan backup form the passed io.Reader. If we're unable to decrypt the
-// any portion of the multi-chan backup, an error will be returned.
-func (m *Multi) UnpackFromReader(r io.Reader, keyRing keychain.KeyRing) er.R {
- // We'll attempt to read the entire packed backup, and also decrypt it
- // using the passed key ring which is expected to be able to derive the
- // encryption keys.
- plaintextBackup, err := decryptPayloadFromReader(r, keyRing)
- if err != nil {
- return err
- }
- backupReader := bytes.NewReader(plaintextBackup)
-
- // Now that we've decrypted the payload successfully, we can parse out
- // each of the individual static channel backups.
-
- // First, we'll need to read the version of this multi-back up so we
- // can know how to unpack each of the individual SCB's.
- var multiVersion byte
- err = lnwire.ReadElements(backupReader, &multiVersion)
- if err != nil {
- return err
- }
-
- m.Version = MultiBackupVersion(multiVersion)
- switch m.Version {
-
- // The default version is simply a set of serialized SCB's with the
- // number of total SCB's prepended to the front of the byte slice.
- case DefaultMultiVersion:
- // First, we'll need to read out the total number of backups
- // that've been serialized into this multi-chan backup. Each
- // backup is the same size, so we can continue until we've
- // parsed out everything.
- var numBackups uint32
- err = lnwire.ReadElements(backupReader, &numBackups)
- if err != nil {
- return err
- }
-
- // We'll continue to parse out each backup until we've read all
- // that was indicated from the length prefix.
- for ; numBackups != 0; numBackups-- {
- // Attempt to parse out the net static channel backup,
- // if it's been malformed, then we'll return with an
- // error
- var chanBackup Single
- err := chanBackup.Deserialize(backupReader)
- if err != nil {
- return err
- }
-
- // Collect the next valid chan backup into the main
- // multi backup slice.
- m.StaticBackups = append(m.StaticBackups, chanBackup)
- }
-
- default:
- return er.Errorf("unable to unpack unknown multi-version "+
- "of %v", multiVersion)
- }
-
- return nil
-}
-
-// TODO(roasbeef): new key ring interface?
-// * just returns key given params?
-
-// PackedMulti represents a raw fully packed (serialized+encrypted)
-// multi-channel static channel backup.
-type PackedMulti []byte
-
-// Unpack attempts to unpack (decrypt+desrialize) the target packed
-// multi-channel back up. If we're unable to fully unpack this back, then an
-// error will be returned.
-func (p *PackedMulti) Unpack(keyRing keychain.KeyRing) (*Multi, er.R) {
- var m Multi
-
- packedReader := bytes.NewReader(*p)
- if err := m.UnpackFromReader(packedReader, keyRing); err != nil {
- return nil, err
- }
-
- return &m, nil
-}
-
-// TODO(roasbsef): fuzz parsing
diff --git a/lnd/chanbackup/multi_test.go b/lnd/chanbackup/multi_test.go
deleted file mode 100644
index a6317e09..00000000
--- a/lnd/chanbackup/multi_test.go
+++ /dev/null
@@ -1,159 +0,0 @@
-package chanbackup
-
-import (
- "bytes"
- "net"
- "testing"
-)
-
-// TestMultiPackUnpack...
-func TestMultiPackUnpack(t *testing.T) {
- t.Parallel()
-
- var multi Multi
- numSingles := 10
- originalSingles := make([]Single, 0, numSingles)
- for i := 0; i < numSingles; i++ {
- channel, err := genRandomOpenChannelShell()
- if err != nil {
- t.Fatalf("unable to gen channel: %v", err)
- }
-
- single := NewSingle(channel, []net.Addr{addr1, addr2})
-
- originalSingles = append(originalSingles, single)
- multi.StaticBackups = append(multi.StaticBackups, single)
- }
-
- keyRing := &mockKeyRing{}
-
- versionTestCases := []struct {
- // version is the pack/unpack version that we should use to
- // decode/encode the final SCB.
- version MultiBackupVersion
-
- // valid tests us if this test case should pass or not.
- valid bool
- }{
- // The default version, should pack/unpack with no problem.
- {
- version: DefaultSingleVersion,
- valid: true,
- },
-
- // A non-default version, atm this should result in a failure.
- {
- version: 99,
- valid: false,
- },
- }
- for i, versionCase := range versionTestCases {
- multi.Version = versionCase.version
-
- var b bytes.Buffer
- err := multi.PackToWriter(&b, keyRing)
- switch {
- // If this is a valid test case, and we failed, then we'll
- // return an error.
- case err != nil && versionCase.valid:
- t.Fatalf("#%v, unable to pack multi: %v", i, err)
-
- // If this is an invalid test case, and we passed it, then
- // we'll return an error.
- case err == nil && !versionCase.valid:
- t.Fatalf("#%v got nil error for invalid pack: %v",
- i, err)
- }
-
- // If this is a valid test case, then we'll continue to ensure
- // we can unpack it, and also that if we mutate the packed
- // version, then we trigger an error.
- if versionCase.valid {
- var unpackedMulti Multi
- err = unpackedMulti.UnpackFromReader(&b, keyRing)
- if err != nil {
- t.Fatalf("#%v unable to unpack multi: %v",
- i, err)
- }
-
- // First, we'll ensure that the unpacked version of the
- // packed multi is the same as the original set.
- if len(originalSingles) !=
- len(unpackedMulti.StaticBackups) {
- t.Fatalf("expected %v singles, got %v",
- len(originalSingles),
- len(unpackedMulti.StaticBackups))
- }
- for i := 0; i < numSingles; i++ {
- assertSingleEqual(
- t, originalSingles[i],
- unpackedMulti.StaticBackups[i],
- )
- }
-
- // Next, we'll make a fake packed multi, it'll have an
- // unknown version relative to what's implemented atm.
- var fakePackedMulti bytes.Buffer
- fakeRawMulti := bytes.NewBuffer(
- bytes.Repeat([]byte{99}, 20),
- )
- err := encryptPayloadToWriter(
- *fakeRawMulti, &fakePackedMulti, keyRing,
- )
- if err != nil {
- t.Fatalf("unable to pack fake multi; %v", err)
- }
-
- // We should reject this fake multi as it contains an
- // unknown version.
- err = unpackedMulti.UnpackFromReader(
- &fakePackedMulti, keyRing,
- )
- if err == nil {
- t.Fatalf("#%v unpack with unknown version "+
- "should have failed", i)
- }
- }
- }
-}
-
-// TestPackedMultiUnpack tests that we're able to properly unpack a typed
-// packed multi.
-func TestPackedMultiUnpack(t *testing.T) {
- t.Parallel()
-
- keyRing := &mockKeyRing{}
-
- // First, we'll make a new unpacked multi with a random channel.
- testChannel, err := genRandomOpenChannelShell()
- if err != nil {
- t.Fatalf("unable to gen random channel: %v", err)
- }
- var multi Multi
- multi.StaticBackups = append(
- multi.StaticBackups, NewSingle(testChannel, nil),
- )
-
- // Now that we have our multi, we'll pack it into a new buffer.
- var b bytes.Buffer
- if err := multi.PackToWriter(&b, keyRing); err != nil {
- t.Fatalf("unable to pack multi: %v", err)
- }
-
- // We should be able to properly unpack this typed packed multi.
- packedMulti := PackedMulti(b.Bytes())
- unpackedMulti, err := packedMulti.Unpack(keyRing)
- if err != nil {
- t.Fatalf("unable to unpack multi: %v", err)
- }
-
- // Finally, the versions should match, and the unpacked singles also
- // identical.
- if multi.Version != unpackedMulti.Version {
- t.Fatalf("version mismatch: expected %v got %v",
- multi.Version, unpackedMulti.Version)
- }
- assertSingleEqual(
- t, multi.StaticBackups[0], unpackedMulti.StaticBackups[0],
- )
-}
diff --git a/lnd/chanbackup/pubsub.go b/lnd/chanbackup/pubsub.go
deleted file mode 100644
index 747578bb..00000000
--- a/lnd/chanbackup/pubsub.go
+++ /dev/null
@@ -1,311 +0,0 @@
-package chanbackup
-
-import (
- "bytes"
- "net"
- "os"
- "sync"
-
- "github.com/pkt-cash/pktd/btcutil/er"
- "github.com/pkt-cash/pktd/lnd/channeldb"
- "github.com/pkt-cash/pktd/lnd/keychain"
- "github.com/pkt-cash/pktd/pktlog/log"
- "github.com/pkt-cash/pktd/wire"
-)
-
-// Swapper is an interface that allows the chanbackup.SubSwapper to update the
-// main multi backup location once it learns of new channels or that prior
-// channels have been closed.
-type Swapper interface {
- // UpdateAndSwap attempts to atomically update the main multi back up
- // file location with the new fully packed multi-channel backup.
- UpdateAndSwap(newBackup PackedMulti) er.R
-
- // ExtractMulti attempts to obtain and decode the current SCB instance
- // stored by the Swapper instance.
- ExtractMulti(keychain keychain.KeyRing) (*Multi, er.R)
-}
-
-// ChannelWithAddrs bundles an open channel along with all the addresses for
-// the channel peer.
-type ChannelWithAddrs struct {
- *channeldb.OpenChannel
-
- // Addrs is the set of addresses that we can use to reach the target
- // peer.
- Addrs []net.Addr
-}
-
-// ChannelEvent packages a new update of new channels since subscription, and
-// channels that have been opened since prior channel event.
-type ChannelEvent struct {
- // ClosedChans are the set of channels that have been closed since the
- // last event.
- ClosedChans []wire.OutPoint
-
- // NewChans is the set of channels that have been opened since the last
- // event.
- NewChans []ChannelWithAddrs
-}
-
-// ChannelSubscription represents an intent to be notified of any updates to
-// the primary channel state.
-type ChannelSubscription struct {
- // ChanUpdates is a channel that will be sent upon once the primary
- // channel state is updated.
- ChanUpdates chan ChannelEvent
-
- // Cancel is a closure that allows the caller to cancel their
- // subscription and free up any resources allocated.
- Cancel func()
-}
-
-// ChannelNotifier represents a system that allows the chanbackup.SubSwapper to
-// be notified of any changes to the primary channel state.
-type ChannelNotifier interface {
- // SubscribeChans requests a new channel subscription relative to the
- // initial set of known channels. We use the knownChans as a
- // synchronization point to ensure that the chanbackup.SubSwapper does
- // not miss any channel open or close events in the period between when
- // it's created, and when it requests the channel subscription.
- SubscribeChans(map[wire.OutPoint]struct{}) (*ChannelSubscription, er.R)
-}
-
-// SubSwapper subscribes to new updates to the open channel state, and then
-// swaps out the on-disk channel backup state in response. This sub-system
-// that will ensure that the multi chan backup file on disk will always be
-// updated with the latest channel back up state. We'll receive new
-// opened/closed channels from the ChannelNotifier, then use the Swapper to
-// update the file state on disk with the new set of open channels. This can
-// be used to implement a system that always keeps the multi-chan backup file
-// on disk in a consistent state for safety purposes.
-type SubSwapper struct {
- started sync.Once
- stopped sync.Once
-
- // backupState are the set of SCBs for all open channels we know of.
- backupState map[wire.OutPoint]Single
-
- // chanEvents is an active subscription to receive new channel state
- // over.
- chanEvents *ChannelSubscription
-
- // keyRing is the main key ring that will allow us to pack the new
- // multi backup.
- keyRing keychain.KeyRing
-
- Swapper
-
- quit chan struct{}
- wg sync.WaitGroup
-}
-
-// NewSubSwapper creates a new instance of the SubSwapper given the starting
-// set of channels, and the required interfaces to be notified of new channel
-// updates, pack a multi backup, and swap the current best backup from its
-// storage location.
-func NewSubSwapper(startingChans []Single, chanNotifier ChannelNotifier,
- keyRing keychain.KeyRing, backupSwapper Swapper) (*SubSwapper, er.R) {
-
- // First, we'll subscribe to the latest set of channel updates given
- // the set of channels we already know of.
- knownChans := make(map[wire.OutPoint]struct{})
- for _, chanBackup := range startingChans {
- knownChans[chanBackup.FundingOutpoint] = struct{}{}
- }
- chanEvents, err := chanNotifier.SubscribeChans(knownChans)
- if err != nil {
- return nil, err
- }
-
- // Next, we'll construct our own backup state so we can add/remove
- // channels that have been opened and closed.
- backupState := make(map[wire.OutPoint]Single)
- for _, chanBackup := range startingChans {
- backupState[chanBackup.FundingOutpoint] = chanBackup
- }
-
- return &SubSwapper{
- backupState: backupState,
- chanEvents: chanEvents,
- keyRing: keyRing,
- Swapper: backupSwapper,
- quit: make(chan struct{}),
- }, nil
-}
-
-// Start starts the chanbackup.SubSwapper.
-func (s *SubSwapper) Start() er.R {
- var startErr er.R
- s.started.Do(func() {
- log.Infof("Starting chanbackup.SubSwapper")
-
- // Before we enter our main loop, we'll update the on-disk
- // state with the latest Single state, as nodes may have new
- // advertised addresses.
- if err := s.updateBackupFile(); err != nil {
- startErr = er.Errorf("unable to refresh backup "+
- "file: %v", err)
- return
- }
-
- s.wg.Add(1)
- go s.backupUpdater()
- })
-
- return startErr
-}
-
-// Stop signals the SubSwapper to being a graceful shutdown.
-func (s *SubSwapper) Stop() er.R {
- s.stopped.Do(func() {
- log.Infof("Stopping chanbackup.SubSwapper")
-
- close(s.quit)
- s.wg.Wait()
- })
- return nil
-}
-
-// updateBackupFile updates the backup file in place given the current state of
-// the SubSwapper. We accept the set of channels that were closed between this
-// update and the last to make sure we leave them out of our backup set union.
-func (s *SubSwapper) updateBackupFile(closedChans ...wire.OutPoint) er.R {
- // Before we pack the new set of SCBs, we'll first decode what we
- // already have on-disk, to make sure we can decode it (proper seed)
- // and that we're able to combine it with our new data.
- diskMulti, err := s.Swapper.ExtractMulti(s.keyRing)
-
- // If the file doesn't exist on disk, then that's OK as it was never
- // created. In this case we'll continue onwards as it isn't a critical
- // error.
- if err != nil && !os.IsNotExist(er.Wrapped(err)) {
- return er.Errorf("unable to extract on disk encrypted "+
- "SCB: %v", err)
- }
-
- // Now that we have channels stored on-disk, we'll create a new set of
- // the combined old and new channels to make sure we retain what's
- // already on-disk.
- //
- // NOTE: The ordering of this operations means that our in-memory
- // structure will replace what we read from disk.
- combinedBackup := make(map[wire.OutPoint]Single)
- if diskMulti != nil {
- for _, diskChannel := range diskMulti.StaticBackups {
- chanPoint := diskChannel.FundingOutpoint
- combinedBackup[chanPoint] = diskChannel
- }
- }
- for _, memChannel := range s.backupState {
- chanPoint := memChannel.FundingOutpoint
- if _, ok := combinedBackup[chanPoint]; ok {
- log.Warnf("Replacing disk backup for ChannelPoint(%v) "+
- "w/ newer version", chanPoint)
- }
-
- combinedBackup[chanPoint] = memChannel
- }
-
- // Remove the set of closed channels from the final set of backups.
- for _, closedChan := range closedChans {
- delete(combinedBackup, closedChan)
- }
-
- // With our updated channel state obtained, we'll create a new multi
- // from our series of singles.
- var newMulti Multi
- for _, backup := range combinedBackup {
- newMulti.StaticBackups = append(
- newMulti.StaticBackups, backup,
- )
- }
-
- // Now that our multi has been assembled, we'll attempt to pack
- // (encrypt+encode) the new channel state to our target reader.
- var b bytes.Buffer
- err = newMulti.PackToWriter(&b, s.keyRing)
- if err != nil {
- return er.Errorf("unable to pack multi backup: %v", err)
- }
-
- // Finally, we'll swap out the old backup for this new one in a single
- // atomic step, combining the file already on-disk with this set of new
- // channels.
- err = s.Swapper.UpdateAndSwap(PackedMulti(b.Bytes()))
- if err != nil {
- return er.Errorf("unable to update multi backup: %v", err)
- }
-
- return nil
-}
-
-// backupFileUpdater is the primary goroutine of the SubSwapper which is
-// responsible for listening for changes to the channel, and updating the
-// persistent multi backup state with a new packed multi of the latest channel
-// state.
-func (s *SubSwapper) backupUpdater() {
- // Ensure that once we exit, we'll cancel our active channel
- // subscription.
- defer s.chanEvents.Cancel()
- defer s.wg.Done()
-
- log.Debugf("SubSwapper's backupUpdater is active!")
-
- for {
- select {
- // The channel state has been modified! We'll evaluate all
- // changes, and swap out the old packed multi with a new one
- // with the latest channel state.
- case chanUpdate := <-s.chanEvents.ChanUpdates:
- oldStateSize := len(s.backupState)
-
- // For all new open channels, we'll create a new SCB
- // given the required information.
- for _, newChan := range chanUpdate.NewChans {
- log.Debugf("Adding channel %v to backup state",
- newChan.FundingOutpoint)
-
- s.backupState[newChan.FundingOutpoint] = NewSingle(
- newChan.OpenChannel, newChan.Addrs,
- )
- }
-
- // For all closed channels, we'll remove the prior
- // backup state.
- closedChans := make(
- []wire.OutPoint, 0, len(chanUpdate.ClosedChans),
- )
- for i, closedChan := range chanUpdate.ClosedChans {
- log.Debugf("Removing channel %v from backup "+
- "state", log.C(func() string {
- return chanUpdate.ClosedChans[i].String()
- }))
-
- delete(s.backupState, closedChan)
-
- closedChans = append(closedChans, closedChan)
- }
-
- newStateSize := len(s.backupState)
-
- log.Infof("Updating on-disk multi SCB backup: "+
- "num_old_chans=%v, num_new_chans=%v",
- oldStateSize, newStateSize)
-
- // With out new state constructed, we'll, atomically
- // update the on-disk backup state.
- if err := s.updateBackupFile(closedChans...); err != nil {
- log.Errorf("unable to update backup file: %v",
- err)
- }
-
- // TODO(roasbeef): refresh periodically on a time basis due to
- // possible addr changes from node
-
- // Exit at once if a quit signal is detected.
- case <-s.quit:
- return
- }
- }
-}
diff --git a/lnd/chanbackup/pubsub_test.go b/lnd/chanbackup/pubsub_test.go
deleted file mode 100644
index 76bc0373..00000000
--- a/lnd/chanbackup/pubsub_test.go
+++ /dev/null
@@ -1,284 +0,0 @@
-package chanbackup
-
-import (
- "testing"
- "time"
-
- "github.com/pkt-cash/pktd/btcutil/er"
- "github.com/pkt-cash/pktd/lnd/keychain"
- "github.com/pkt-cash/pktd/wire"
-)
-
-type mockSwapper struct {
- fail bool
-
- swaps chan PackedMulti
-
- swapState *Multi
-
- keyChain keychain.KeyRing
-}
-
-func newMockSwapper(keychain keychain.KeyRing) *mockSwapper {
- return &mockSwapper{
- swaps: make(chan PackedMulti, 1),
- keyChain: keychain,
- swapState: &Multi{},
- }
-}
-
-func (m *mockSwapper) UpdateAndSwap(newBackup PackedMulti) er.R {
- if m.fail {
- return er.Errorf("fail")
- }
-
- swapState, err := newBackup.Unpack(m.keyChain)
- if err != nil {
- return er.Errorf("unable to decode on disk swaps: %v", err)
- }
-
- m.swapState = swapState
-
- m.swaps <- newBackup
-
- return nil
-}
-
-func (m *mockSwapper) ExtractMulti(keychain keychain.KeyRing) (*Multi, er.R) {
- return m.swapState, nil
-}
-
-type mockChannelNotifier struct {
- fail bool
-
- chanEvents chan ChannelEvent
-}
-
-func newMockChannelNotifier() *mockChannelNotifier {
- return &mockChannelNotifier{
- chanEvents: make(chan ChannelEvent),
- }
-}
-
-func (m *mockChannelNotifier) SubscribeChans(chans map[wire.OutPoint]struct{}) (
- *ChannelSubscription, er.R) {
-
- if m.fail {
- return nil, er.Errorf("fail")
- }
-
- return &ChannelSubscription{
- ChanUpdates: m.chanEvents,
- Cancel: func() {
- },
- }, nil
-}
-
-// TestNewSubSwapperSubscribeFail tests that if we're unable to obtain a
-// channel subscription, then the entire sub-swapper will fail to start.
-func TestNewSubSwapperSubscribeFail(t *testing.T) {
- t.Parallel()
-
- keyRing := &mockKeyRing{}
-
- var swapper mockSwapper
- chanNotifier := mockChannelNotifier{
- fail: true,
- }
-
- _, err := NewSubSwapper(nil, &chanNotifier, keyRing, &swapper)
- if err == nil {
- t.Fatalf("expected fail due to lack of subscription")
- }
-}
-
-func assertExpectedBackupSwap(t *testing.T, swapper *mockSwapper,
- subSwapper *SubSwapper, keyRing keychain.KeyRing,
- expectedChanSet map[wire.OutPoint]Single) {
-
- t.Helper()
-
- select {
- case newPackedMulti := <-swapper.swaps:
- // If we unpack the new multi, then we should find all the old
- // channels, and also the new channel included and any deleted
- // channel omitted.
- newMulti, err := newPackedMulti.Unpack(keyRing)
- if err != nil {
- t.Fatalf("unable to unpack multi: %v", err)
- }
-
- // Ensure that once unpacked, the current backup has the
- // expected number of Singles.
- if len(newMulti.StaticBackups) != len(expectedChanSet) {
- t.Fatalf("new backup wasn't included: expected %v "+
- "backups have %v", len(expectedChanSet),
- len(newMulti.StaticBackups))
- }
-
- // We should also find all the old and new channels in this new
- // backup.
- for _, backup := range newMulti.StaticBackups {
- _, ok := expectedChanSet[backup.FundingOutpoint]
- if !ok {
- t.Fatalf("didn't find backup in original set: %v",
- backup.FundingOutpoint)
- }
- }
-
- // The same applies for our in-memory state, but it's also
- // possible for there to be items in the on-disk state that we
- // don't know of explicit.
- newChans := make(map[wire.OutPoint]Single)
- for _, newChan := range newMulti.StaticBackups {
- newChans[newChan.FundingOutpoint] = newChan
- }
- for _, backup := range subSwapper.backupState {
- _, ok := newChans[backup.FundingOutpoint]
- if !ok {
- t.Fatalf("didn't find backup in original set: %v",
- backup.FundingOutpoint)
- }
- }
-
- case <-time.After(time.Second * 5):
- t.Fatalf("update swapper didn't swap out multi")
- }
-}
-
-// TestSubSwapperIdempotentStartStop tests that calling the Start/Stop methods
-// multiple time is permitted.
-func TestSubSwapperIdempotentStartStop(t *testing.T) {
- t.Parallel()
-
- keyRing := &mockKeyRing{}
-
- var chanNotifier mockChannelNotifier
-
- swapper := newMockSwapper(keyRing)
- subSwapper, err := NewSubSwapper(nil, &chanNotifier, keyRing, swapper)
- if err != nil {
- t.Fatalf("unable to init subSwapper: %v", err)
- }
-
- if err := subSwapper.Start(); err != nil {
- t.Fatalf("unable to start swapper: %v", err)
- }
-
- // The swapper should write the initial channel state as soon as it's
- // active.
- backupSet := make(map[wire.OutPoint]Single)
- assertExpectedBackupSwap(t, swapper, subSwapper, keyRing, backupSet)
-
- subSwapper.Start()
-
- subSwapper.Stop()
- subSwapper.Stop()
-}
-
-// TestSubSwapperUpdater tests that the SubSwapper will properly swap out
-// new/old channels within the channel set, and notify the swapper to update
-// the master multi file backup.
-func TestSubSwapperUpdater(t *testing.T) {
- t.Parallel()
-
- keyRing := &mockKeyRing{}
- chanNotifier := newMockChannelNotifier()
- swapper := newMockSwapper(keyRing)
-
- // First, we'll start out by creating a channels set for the initial
- // set of channels known to the sub-swapper.
- const numStartingChans = 3
- initialChanSet := make([]Single, 0, numStartingChans)
- backupSet := make(map[wire.OutPoint]Single)
- for i := 0; i < numStartingChans; i++ {
- channel, err := genRandomOpenChannelShell()
- if err != nil {
- t.Fatalf("unable to make test chan: %v", err)
- }
-
- single := NewSingle(channel, nil)
-
- backupSet[channel.FundingOutpoint] = single
- initialChanSet = append(initialChanSet, single)
- }
-
- // We'll also generate two additional channels which will already be
- // present on disk. However, these will at first only be known by the
- // on disk backup (the backup set).
- const numDiskChans = 2
- for i := 0; i < numDiskChans; i++ {
- channel, err := genRandomOpenChannelShell()
- if err != nil {
- t.Fatalf("unable to make test chan: %v", err)
- }
-
- single := NewSingle(channel, nil)
-
- backupSet[channel.FundingOutpoint] = single
- swapper.swapState.StaticBackups = append(
- swapper.swapState.StaticBackups, single,
- )
- }
-
- // With our channel set created, we'll make a fresh sub swapper
- // instance to begin our test.
- subSwapper, err := NewSubSwapper(
- initialChanSet, chanNotifier, keyRing, swapper,
- )
- if err != nil {
- t.Fatalf("unable to make swapper: %v", err)
- }
- if err := subSwapper.Start(); err != nil {
- t.Fatalf("unable to start sub swapper: %v", err)
- }
- defer subSwapper.Stop()
-
- // The swapper should write the initial channel state as soon as it's
- // active.
- assertExpectedBackupSwap(t, swapper, subSwapper, keyRing, backupSet)
-
- // Now that the sub-swapper is active, we'll notify to add a brand new
- // channel to the channel state.
- newChannel, err := genRandomOpenChannelShell()
- if err != nil {
- t.Fatalf("unable to create new chan: %v", err)
- }
-
- // With the new channel created, we'll send a new update to the main
- // goroutine telling it about this new channel.
- select {
- case chanNotifier.chanEvents <- ChannelEvent{
- NewChans: []ChannelWithAddrs{
- {
- OpenChannel: newChannel,
- },
- },
- }:
- case <-time.After(time.Second * 5):
- t.Fatalf("update swapper didn't read new channel: %v", err)
- }
-
- backupSet[newChannel.FundingOutpoint] = NewSingle(newChannel, nil)
-
- // At this point, the sub-swapper should now have packed a new multi,
- // and then sent it to the swapper so the back up can be updated.
- assertExpectedBackupSwap(t, swapper, subSwapper, keyRing, backupSet)
-
- // We'll now trigger an update to remove an existing channel.
- chanToDelete := initialChanSet[0].FundingOutpoint
- select {
- case chanNotifier.chanEvents <- ChannelEvent{
- ClosedChans: []wire.OutPoint{chanToDelete},
- }:
-
- case <-time.After(time.Second * 5):
- t.Fatalf("update swapper didn't read new channel: %v", err)
- }
-
- delete(backupSet, chanToDelete)
-
- // Verify that the new set of backups, now has one less after the
- // sub-swapper switches the new set with the old.
- assertExpectedBackupSwap(t, swapper, subSwapper, keyRing, backupSet)
-}
diff --git a/lnd/chanbackup/recover.go b/lnd/chanbackup/recover.go
deleted file mode 100644
index f92eab52..00000000
--- a/lnd/chanbackup/recover.go
+++ /dev/null
@@ -1,125 +0,0 @@
-package chanbackup
-
-import (
- "net"
-
- "github.com/davecgh/go-spew/spew"
- "github.com/pkt-cash/pktd/btcec"
- "github.com/pkt-cash/pktd/btcutil/er"
- "github.com/pkt-cash/pktd/lnd/channeldb"
- "github.com/pkt-cash/pktd/lnd/keychain"
- "github.com/pkt-cash/pktd/pktlog/log"
-)
-
-// ChannelRestorer is an interface that allows the Recover method to map the
-// set of single channel backups into a set of "channel shells" and store these
-// persistently on disk. The channel shell should contain all the information
-// needed to execute the data loss recovery protocol once the channel peer is
-// connected to.
-type ChannelRestorer interface {
- // RestoreChansFromSingles attempts to map the set of single channel
- // backups to channel shells that will be stored persistently. Once
- // these shells have been stored on disk, we'll be able to connect to
- // the channel peer an execute the data loss recovery protocol.
- RestoreChansFromSingles(...Single) er.R
-}
-
-// PeerConnector is an interface that allows the Recover method to connect to
-// the target node given the set of possible addresses.
-type PeerConnector interface {
- // ConnectPeer attempts to connect to the target node at the set of
- // available addresses. Once this method returns with a non-nil error,
- // the connector should attempt to persistently connect to the target
- // peer in the background as a persistent attempt.
- ConnectPeer(node *btcec.PublicKey, addrs []net.Addr) er.R
-}
-
-// Recover attempts to recover the static channel state from a set of static
-// channel backups. If successfully, the database will be populated with a
-// series of "shell" channels. These "shell" channels cannot be used to operate
-// the channel as normal, but instead are meant to be used to enter the data
-// loss recovery phase, and recover the settled funds within
-// the channel. In addition a LinkNode will be created for each new peer as
-// well, in order to expose the addressing information required to locate to
-// and connect to each peer in order to initiate the recovery protocol.
-func Recover(backups []Single, restorer ChannelRestorer,
- peerConnector PeerConnector) er.R {
-
- for i, backup := range backups {
- log.Infof("Restoring ChannelPoint(%v) to disk: ",
- backup.FundingOutpoint)
-
- err := restorer.RestoreChansFromSingles(backup)
-
- // If a channel is already present in the channel DB, we can
- // just continue. No reason to fail a whole set of multi backups
- // for example. This allows resume of a restore in case another
- // error happens.
- if channeldb.ErrChanAlreadyExists.Is(err) {
- continue
- }
- if err != nil {
- return err
- }
-
- log.Infof("Attempting to connect to node=%x (addrs=%v) to "+
- "restore ChannelPoint(%v)",
- backup.RemoteNodePub.SerializeCompressed(),
- log.C(func() string {
- return spew.Sdump(backups[i].Addresses)
- }), backup.FundingOutpoint)
-
- err = peerConnector.ConnectPeer(
- backup.RemoteNodePub, backup.Addresses,
- )
- if err != nil {
- return err
- }
-
- // TODO(roasbeef): to handle case where node has changed addrs,
- // need to subscribe to new updates for target node pub to
- // attempt to connect to other addrs
- //
- // * just to to fresh w/ call to node addrs and de-dup?
- }
-
- return nil
-}
-
-// TODO(roasbeef): more specific keychain interface?
-
-// UnpackAndRecoverSingles is a one-shot method, that given a set of packed
-// single channel backups, will restore the channel state to a channel shell,
-// and also reach out to connect to any of the known node addresses for that
-// channel. It is assumes that after this method exists, if a connection we
-// able to be established, then then PeerConnector will continue to attempt to
-// re-establish a persistent connection in the background.
-func UnpackAndRecoverSingles(singles PackedSingles,
- keyChain keychain.KeyRing, restorer ChannelRestorer,
- peerConnector PeerConnector) er.R {
-
- chanBackups, err := singles.Unpack(keyChain)
- if err != nil {
- return err
- }
-
- return Recover(chanBackups, restorer, peerConnector)
-}
-
-// UnpackAndRecoverMulti is a one-shot method, that given a set of packed
-// multi-channel backups, will restore the channel states to channel shells,
-// and also reach out to connect to any of the known node addresses for that
-// channel. It is assumes that after this method exists, if a connection we
-// able to be established, then then PeerConnector will continue to attempt to
-// re-establish a persistent connection in the background.
-func UnpackAndRecoverMulti(packedMulti PackedMulti,
- keyChain keychain.KeyRing, restorer ChannelRestorer,
- peerConnector PeerConnector) er.R {
-
- chanBackups, err := packedMulti.Unpack(keyChain)
- if err != nil {
- return err
- }
-
- return Recover(chanBackups.StaticBackups, restorer, peerConnector)
-}
diff --git a/lnd/chanbackup/recover_test.go b/lnd/chanbackup/recover_test.go
deleted file mode 100644
index c90a8d06..00000000
--- a/lnd/chanbackup/recover_test.go
+++ /dev/null
@@ -1,232 +0,0 @@
-package chanbackup
-
-import (
- "bytes"
- "net"
- "testing"
-
- "github.com/pkt-cash/pktd/btcec"
- "github.com/pkt-cash/pktd/btcutil/er"
-)
-
-type mockChannelRestorer struct {
- fail bool
-
- callCount int
-}
-
-func (m *mockChannelRestorer) RestoreChansFromSingles(...Single) er.R {
- if m.fail {
- return er.Errorf("fail")
- }
-
- m.callCount++
-
- return nil
-}
-
-type mockPeerConnector struct {
- fail bool
-
- callCount int
-}
-
-func (m *mockPeerConnector) ConnectPeer(node *btcec.PublicKey,
- addrs []net.Addr) er.R {
-
- if m.fail {
- return er.Errorf("fail")
- }
-
- m.callCount++
-
- return nil
-}
-
-// TestUnpackAndRecoverSingles tests that we're able to properly unpack and
-// recover a set of packed singles.
-func TestUnpackAndRecoverSingles(t *testing.T) {
- t.Parallel()
-
- keyRing := &mockKeyRing{}
-
- // First, we'll create a number of single chan backups that we'll
- // shortly back to so we can begin our recovery attempt.
- numSingles := 10
- backups := make([]Single, 0, numSingles)
- var packedBackups PackedSingles
- for i := 0; i < numSingles; i++ {
- channel, err := genRandomOpenChannelShell()
- if err != nil {
- t.Fatalf("unable make channel: %v", err)
- }
-
- single := NewSingle(channel, nil)
-
- var b bytes.Buffer
- if err := single.PackToWriter(&b, keyRing); err != nil {
- t.Fatalf("unable to pack single: %v", err)
- }
-
- backups = append(backups, single)
- packedBackups = append(packedBackups, b.Bytes())
- }
-
- chanRestorer := mockChannelRestorer{}
- peerConnector := mockPeerConnector{}
-
- // Now that we have our backups (packed and unpacked), we'll attempt to
- // restore them all in a single batch.
-
- // If we make the channel restore fail, then the entire method should
- // as well
- chanRestorer.fail = true
- err := UnpackAndRecoverSingles(
- packedBackups, keyRing, &chanRestorer, &peerConnector,
- )
- if err == nil {
- t.Fatalf("restoration should have failed")
- }
-
- chanRestorer.fail = false
-
- // If we make the peer connector fail, then the entire method should as
- // well
- peerConnector.fail = true
- err = UnpackAndRecoverSingles(
- packedBackups, keyRing, &chanRestorer, &peerConnector,
- )
- if err == nil {
- t.Fatalf("restoration should have failed")
- }
-
- chanRestorer.callCount--
- peerConnector.fail = false
-
- // Next, we'll ensure that if all the interfaces function as expected,
- // then the channels will properly be unpacked and restored.
- err = UnpackAndRecoverSingles(
- packedBackups, keyRing, &chanRestorer, &peerConnector,
- )
- if err != nil {
- t.Fatalf("unable to recover chans: %v", err)
- }
-
- // Both the restorer, and connector should have been called 10 times,
- // once for each backup.
- if chanRestorer.callCount != numSingles {
- t.Fatalf("expected %v calls, instead got %v",
- numSingles, chanRestorer.callCount)
- }
- if peerConnector.callCount != numSingles {
- t.Fatalf("expected %v calls, instead got %v",
- numSingles, peerConnector.callCount)
- }
-
- // If we modify the keyRing, then unpacking should fail.
- keyRing.fail = true
- err = UnpackAndRecoverSingles(
- packedBackups, keyRing, &chanRestorer, &peerConnector,
- )
- if err == nil {
- t.Fatalf("unpacking should have failed")
- }
-
- // TODO(roasbeef): verify proper call args
-}
-
-// TestUnpackAndRecoverMulti tests that we're able to properly unpack and
-// recover a packed multi.
-func TestUnpackAndRecoverMulti(t *testing.T) {
- t.Parallel()
-
- keyRing := &mockKeyRing{}
-
- // First, we'll create a number of single chan backups that we'll
- // shortly back to so we can begin our recovery attempt.
- numSingles := 10
- backups := make([]Single, 0, numSingles)
- for i := 0; i < numSingles; i++ {
- channel, err := genRandomOpenChannelShell()
- if err != nil {
- t.Fatalf("unable make channel: %v", err)
- }
-
- single := NewSingle(channel, nil)
-
- backups = append(backups, single)
- }
-
- multi := Multi{
- StaticBackups: backups,
- }
-
- var b bytes.Buffer
- if err := multi.PackToWriter(&b, keyRing); err != nil {
- t.Fatalf("unable to pack multi: %v", err)
- }
-
- // Next, we'll pack the set of singles into a packed multi, and also
- // create the set of interfaces we need to carry out the remainder of
- // the test.
- packedMulti := PackedMulti(b.Bytes())
-
- chanRestorer := mockChannelRestorer{}
- peerConnector := mockPeerConnector{}
-
- // If we make the channel restore fail, then the entire method should
- // as well
- chanRestorer.fail = true
- err := UnpackAndRecoverMulti(
- packedMulti, keyRing, &chanRestorer, &peerConnector,
- )
- if err == nil {
- t.Fatalf("restoration should have failed")
- }
-
- chanRestorer.fail = false
-
- // If we make the peer connector fail, then the entire method should as
- // well
- peerConnector.fail = true
- err = UnpackAndRecoverMulti(
- packedMulti, keyRing, &chanRestorer, &peerConnector,
- )
- if err == nil {
- t.Fatalf("restoration should have failed")
- }
-
- chanRestorer.callCount--
- peerConnector.fail = false
-
- // Next, we'll ensure that if all the interfaces function as expected,
- // then the channels will properly be unpacked and restored.
- err = UnpackAndRecoverMulti(
- packedMulti, keyRing, &chanRestorer, &peerConnector,
- )
- if err != nil {
- t.Fatalf("unable to recover chans: %v", err)
- }
-
- // Both the restorer, and connector should have been called 10 times,
- // once for each backup.
- if chanRestorer.callCount != numSingles {
- t.Fatalf("expected %v calls, instead got %v",
- numSingles, chanRestorer.callCount)
- }
- if peerConnector.callCount != numSingles {
- t.Fatalf("expected %v calls, instead got %v",
- numSingles, peerConnector.callCount)
- }
-
- // If we modify the keyRing, then unpacking should fail.
- keyRing.fail = true
- err = UnpackAndRecoverMulti(
- packedMulti, keyRing, &chanRestorer, &peerConnector,
- )
- if err == nil {
- t.Fatalf("unpacking should have failed")
- }
-
- // TODO(roasbeef): verify proper call args
-}
diff --git a/lnd/chanbackup/single.go b/lnd/chanbackup/single.go
deleted file mode 100644
index 8db98b44..00000000
--- a/lnd/chanbackup/single.go
+++ /dev/null
@@ -1,513 +0,0 @@
-package chanbackup
-
-import (
- "bytes"
- "io"
- "net"
-
- "github.com/pkt-cash/pktd/btcec"
- "github.com/pkt-cash/pktd/btcutil"
- "github.com/pkt-cash/pktd/btcutil/er"
- "github.com/pkt-cash/pktd/btcutil/util"
- "github.com/pkt-cash/pktd/chaincfg/chainhash"
- "github.com/pkt-cash/pktd/lnd/channeldb"
- "github.com/pkt-cash/pktd/lnd/keychain"
- "github.com/pkt-cash/pktd/lnd/lnwire"
- "github.com/pkt-cash/pktd/wire"
-)
-
-// SingleBackupVersion denotes the version of the single static channel backup.
-// Based on this version, we know how to pack/unpack serialized versions of the
-// backup.
-type SingleBackupVersion byte
-
-const (
- // DefaultSingleVersion is the default version of the single channel
- // backup. The serialized version of this static channel backup is
- // simply: version || SCB. Where SCB is the known format of the
- // version.
- DefaultSingleVersion = 0
-
- // TweaklessCommitVersion is the second SCB version. This version
- // implicitly denotes that this channel uses the new tweakless commit
- // format.
- TweaklessCommitVersion = 1
-
- // AnchorsCommitVersion is the third SCB version. This version
- // implicitly denotes that this channel uses the new anchor commitment
- // format.
- AnchorsCommitVersion = 2
-)
-
-// Single is a static description of an existing channel that can be used for
-// the purposes of backing up. The fields in this struct allow a node to
-// recover the settled funds within a channel in the case of partial or
-// complete data loss. We provide the network address that we last used to
-// connect to the peer as well, in case the node stops advertising the IP on
-// the network for whatever reason.
-//
-// TODO(roasbeef): suffix version into struct?
-type Single struct {
- // Version is the version that should be observed when attempting to
- // pack the single backup.
- Version SingleBackupVersion
-
- // IsInitiator is true if we were the initiator of the channel, and
- // false otherwise. We'll need to know this information in order to
- // properly re-derive the state hint information.
- IsInitiator bool
-
- // ChainHash is a hash which represents the blockchain that this
- // channel will be opened within. This value is typically the genesis
- // hash. In the case that the original chain went through a contentious
- // hard-fork, then this value will be tweaked using the unique fork
- // point on each branch.
- ChainHash chainhash.Hash
-
- // FundingOutpoint is the outpoint of the final funding transaction.
- // This value uniquely and globally identities the channel within the
- // target blockchain as specified by the chain hash parameter.
- FundingOutpoint wire.OutPoint
-
- // ShortChannelID encodes the exact location in the chain in which the
- // channel was initially confirmed. This includes: the block height,
- // transaction index, and the output within the target transaction.
- // Channels that were not confirmed at the time of backup creation will
- // have the funding TX broadcast height set as their block height in
- // the ShortChannelID.
- ShortChannelID lnwire.ShortChannelID
-
- // RemoteNodePub is the identity public key of the remote node this
- // channel has been established with.
- RemoteNodePub *btcec.PublicKey
-
- // Addresses is a list of IP address in which either we were able to
- // reach the node over in the past, OR we received an incoming
- // authenticated connection for the stored identity public key.
- Addresses []net.Addr
-
- // Capacity is the size of the original channel.
- Capacity btcutil.Amount
-
- // LocalChanCfg is our local channel configuration. It contains all the
- // information we need to re-derive the keys we used within the
- // channel. Most importantly, it allows to derive the base public
- // that's used to deriving the key used within the non-delayed
- // pay-to-self output on the commitment transaction for a node. With
- // this information, we can re-derive the private key needed to sweep
- // the funds on-chain.
- //
- // NOTE: Of the items in the ChannelConstraints, we only write the CSV
- // delay.
- LocalChanCfg channeldb.ChannelConfig
-
- // RemoteChanCfg is the remote channel confirmation. We store this as
- // well since we'll need some of their keys to re-derive things like
- // the state hint obfuscator which will allow us to recognize the state
- // their broadcast on chain.
- //
- // NOTE: Of the items in the ChannelConstraints, we only write the CSV
- // delay.
- RemoteChanCfg channeldb.ChannelConfig
-
- // ShaChainRootDesc describes how to derive the private key that was
- // used as the shachain root for this channel.
- ShaChainRootDesc keychain.KeyDescriptor
-}
-
-// NewSingle creates a new static channel backup based on an existing open
-// channel. We also pass in the set of addresses that we used in the past to
-// connect to the channel peer.
-func NewSingle(channel *channeldb.OpenChannel,
- nodeAddrs []net.Addr) Single {
-
- // TODO(roasbeef): update after we start to store the KeyLoc for
- // shachain root
-
- // We'll need to obtain the shachain root which is derived directly
- // from a private key in our keychain.
- var b bytes.Buffer
- channel.RevocationProducer.Encode(&b) // Can't return an error.
-
- // Once we have the root, we'll make a public key from it, such that
- // the backups plaintext don't carry any private information. When we
- // go to recover, we'll present this in order to derive the private
- // key.
- _, shaChainPoint := btcec.PrivKeyFromBytes(btcec.S256(), b.Bytes())
-
- // If a channel is unconfirmed, the block height of the ShortChannelID
- // is zero. This will lead to problems when trying to restore that
- // channel as the spend notifier would get a height hint of zero.
- // To work around that problem, we add the channel broadcast height
- // to the channel ID so we can use that as height hint on restore.
- chanID := channel.ShortChanID()
- if chanID.BlockHeight == 0 {
- chanID.BlockHeight = channel.FundingBroadcastHeight
- }
-
- single := Single{
- IsInitiator: channel.IsInitiator,
- ChainHash: channel.ChainHash,
- FundingOutpoint: channel.FundingOutpoint,
- ShortChannelID: chanID,
- RemoteNodePub: channel.IdentityPub,
- Addresses: nodeAddrs,
- Capacity: channel.Capacity,
- LocalChanCfg: channel.LocalChanCfg,
- RemoteChanCfg: channel.RemoteChanCfg,
- ShaChainRootDesc: keychain.KeyDescriptor{
- PubKey: shaChainPoint,
- KeyLocator: keychain.KeyLocator{
- Family: keychain.KeyFamilyRevocationRoot,
- },
- },
- }
-
- switch {
- case channel.ChanType.HasAnchors():
- single.Version = AnchorsCommitVersion
-
- case channel.ChanType.IsTweakless():
- single.Version = TweaklessCommitVersion
-
- default:
- single.Version = DefaultSingleVersion
- }
-
- return single
-}
-
-// Serialize attempts to write out the serialized version of the target
-// StaticChannelBackup into the passed io.Writer.
-func (s *Single) Serialize(w io.Writer) er.R {
- // Check to ensure that we'll only attempt to serialize a version that
- // we're aware of.
- switch s.Version {
- case DefaultSingleVersion:
- case TweaklessCommitVersion:
- case AnchorsCommitVersion:
- default:
- return er.Errorf("unable to serialize w/ unknown "+
- "version: %v", s.Version)
- }
-
- // If the sha chain root has specified a public key (which is
- // optional), then we'll encode it now.
- var shaChainPub [33]byte
- if s.ShaChainRootDesc.PubKey != nil {
- copy(
- shaChainPub[:],
- s.ShaChainRootDesc.PubKey.SerializeCompressed(),
- )
- }
-
- // First we gather the SCB as is into a temporary buffer so we can
- // determine the total length. Before we write out the serialized SCB,
- // we write the length which allows us to skip any Singles that we
- // don't know of when decoding a multi.
- var singleBytes bytes.Buffer
- if err := lnwire.WriteElements(
- &singleBytes,
- s.IsInitiator,
- s.ChainHash[:],
- s.FundingOutpoint,
- s.ShortChannelID,
- s.RemoteNodePub,
- s.Addresses,
- s.Capacity,
-
- s.LocalChanCfg.CsvDelay,
-
- // We only need to write out the KeyLocator portion of the
- // local channel config.
- uint32(s.LocalChanCfg.MultiSigKey.Family),
- s.LocalChanCfg.MultiSigKey.Index,
- uint32(s.LocalChanCfg.RevocationBasePoint.Family),
- s.LocalChanCfg.RevocationBasePoint.Index,
- uint32(s.LocalChanCfg.PaymentBasePoint.Family),
- s.LocalChanCfg.PaymentBasePoint.Index,
- uint32(s.LocalChanCfg.DelayBasePoint.Family),
- s.LocalChanCfg.DelayBasePoint.Index,
- uint32(s.LocalChanCfg.HtlcBasePoint.Family),
- s.LocalChanCfg.HtlcBasePoint.Index,
-
- s.RemoteChanCfg.CsvDelay,
-
- // We only need to write out the raw pubkey for the remote
- // channel config.
- s.RemoteChanCfg.MultiSigKey.PubKey,
- s.RemoteChanCfg.RevocationBasePoint.PubKey,
- s.RemoteChanCfg.PaymentBasePoint.PubKey,
- s.RemoteChanCfg.DelayBasePoint.PubKey,
- s.RemoteChanCfg.HtlcBasePoint.PubKey,
-
- shaChainPub[:],
- uint32(s.ShaChainRootDesc.KeyLocator.Family),
- s.ShaChainRootDesc.KeyLocator.Index,
- ); err != nil {
- return err
- }
-
- return lnwire.WriteElements(
- w,
- byte(s.Version),
- uint16(len(singleBytes.Bytes())),
- singleBytes.Bytes(),
- )
-}
-
-// PackToWriter is similar to the Serialize method, but takes the operation a
-// step further by encryption the raw bytes of the static channel back up. For
-// encryption we use the chacah20poly1305 AEAD cipher with a 24 byte nonce and
-// 32-byte key size. We use a 24-byte nonce, as we can't ensure that we have a
-// global counter to use as a sequence number for nonces, and want to ensure
-// that we're able to decrypt these blobs without any additional context. We
-// derive the key that we use for encryption via a SHA2 operation of the with
-// the golden keychain.KeyFamilyStaticBackup base encryption key. We then take
-// the serialized resulting shared secret point, and hash it using sha256 to
-// obtain the key that we'll use for encryption. When using the AEAD, we pass
-// the nonce as associated data such that we'll be able to package the two
-// together for storage. Before writing out the encrypted payload, we prepend
-// the nonce to the final blob.
-func (s *Single) PackToWriter(w io.Writer, keyRing keychain.KeyRing) er.R {
- // First, we'll serialize the SCB (StaticChannelBackup) into a
- // temporary buffer so we can store it in a temporary place before we
- // go to encrypt the entire thing.
- var rawBytes bytes.Buffer
- if err := s.Serialize(&rawBytes); err != nil {
- return err
- }
-
- // Finally, we'll encrypt the raw serialized SCB (using the nonce as
- // associated data), and write out the ciphertext prepend with the
- // nonce that we used to the passed io.Reader.
- return encryptPayloadToWriter(rawBytes, w, keyRing)
-}
-
-// readLocalKeyDesc reads a KeyDescriptor encoded within an unpacked Single.
-// For local KeyDescs, we only write out the KeyLocator information as we can
-// re-derive the pubkey from it.
-func readLocalKeyDesc(r io.Reader) (keychain.KeyDescriptor, er.R) {
- var keyDesc keychain.KeyDescriptor
-
- var keyFam uint32
- if err := lnwire.ReadElements(r, &keyFam); err != nil {
- return keyDesc, err
- }
- keyDesc.Family = keychain.KeyFamily(keyFam)
-
- if err := lnwire.ReadElements(r, &keyDesc.Index); err != nil {
- return keyDesc, err
- }
-
- return keyDesc, nil
-}
-
-// readRemoteKeyDesc reads a remote KeyDescriptor encoded within an unpacked
-// Single. For remote KeyDescs, we write out only the PubKey since we don't
-// actually have the KeyLocator data.
-func readRemoteKeyDesc(r io.Reader) (keychain.KeyDescriptor, er.R) {
- var (
- keyDesc keychain.KeyDescriptor
- pub [33]byte
- )
-
- _, err := util.ReadFull(r, pub[:])
- if err != nil {
- return keychain.KeyDescriptor{}, err
- }
-
- keyDesc.PubKey, err = btcec.ParsePubKey(pub[:], btcec.S256())
- if err != nil {
- return keychain.KeyDescriptor{}, err
- }
-
- keyDesc.PubKey.Curve = nil
-
- return keyDesc, nil
-}
-
-// Deserialize attempts to read the raw plaintext serialized SCB from the
-// passed io.Reader. If the method is successful, then the target
-// StaticChannelBackup will be fully populated.
-func (s *Single) Deserialize(r io.Reader) er.R {
- // First, we'll need to read the version of this single-back up so we
- // can know how to unpack each of the SCB.
- var version byte
- err := lnwire.ReadElements(r, &version)
- if err != nil {
- return err
- }
-
- s.Version = SingleBackupVersion(version)
-
- switch s.Version {
- case DefaultSingleVersion:
- case TweaklessCommitVersion:
- case AnchorsCommitVersion:
- default:
- return er.Errorf("unable to de-serialize w/ unknown "+
- "version: %v", s.Version)
- }
-
- var length uint16
- if err := lnwire.ReadElements(r, &length); err != nil {
- return err
- }
-
- err = lnwire.ReadElements(
- r, &s.IsInitiator, s.ChainHash[:], &s.FundingOutpoint,
- &s.ShortChannelID, &s.RemoteNodePub, &s.Addresses, &s.Capacity,
- )
- if err != nil {
- return err
- }
-
- err = lnwire.ReadElements(r, &s.LocalChanCfg.CsvDelay)
- if err != nil {
- return err
- }
- s.LocalChanCfg.MultiSigKey, err = readLocalKeyDesc(r)
- if err != nil {
- return err
- }
- s.LocalChanCfg.RevocationBasePoint, err = readLocalKeyDesc(r)
- if err != nil {
- return err
- }
- s.LocalChanCfg.PaymentBasePoint, err = readLocalKeyDesc(r)
- if err != nil {
- return err
- }
- s.LocalChanCfg.DelayBasePoint, err = readLocalKeyDesc(r)
- if err != nil {
- return err
- }
- s.LocalChanCfg.HtlcBasePoint, err = readLocalKeyDesc(r)
- if err != nil {
- return err
- }
-
- err = lnwire.ReadElements(r, &s.RemoteChanCfg.CsvDelay)
- if err != nil {
- return err
- }
- s.RemoteChanCfg.MultiSigKey, err = readRemoteKeyDesc(r)
- if err != nil {
- return err
- }
- s.RemoteChanCfg.RevocationBasePoint, err = readRemoteKeyDesc(r)
- if err != nil {
- return err
- }
- s.RemoteChanCfg.PaymentBasePoint, err = readRemoteKeyDesc(r)
- if err != nil {
- return err
- }
- s.RemoteChanCfg.DelayBasePoint, err = readRemoteKeyDesc(r)
- if err != nil {
- return err
- }
- s.RemoteChanCfg.HtlcBasePoint, err = readRemoteKeyDesc(r)
- if err != nil {
- return err
- }
-
- // Finally, we'll parse out the ShaChainRootDesc.
- var (
- shaChainPub [33]byte
- zeroPub [33]byte
- )
- if err := lnwire.ReadElements(r, shaChainPub[:]); err != nil {
- return err
- }
-
- // Since this field is optional, we'll check to see if the pubkey has
- // been specified or not.
- if !bytes.Equal(shaChainPub[:], zeroPub[:]) {
- s.ShaChainRootDesc.PubKey, err = btcec.ParsePubKey(
- shaChainPub[:], btcec.S256(),
- )
- if err != nil {
- return err
- }
- }
-
- var shaKeyFam uint32
- if err := lnwire.ReadElements(r, &shaKeyFam); err != nil {
- return err
- }
- s.ShaChainRootDesc.KeyLocator.Family = keychain.KeyFamily(shaKeyFam)
-
- return lnwire.ReadElements(r, &s.ShaChainRootDesc.KeyLocator.Index)
-}
-
-// UnpackFromReader is similar to Deserialize method, but it expects the passed
-// io.Reader to contain an encrypt SCB. Refer to the SerializeAndEncrypt method
-// for details w.r.t the encryption scheme used. If we're unable to decrypt the
-// payload for whatever reason (wrong key, wrong nonce, etc), then this method
-// will return an error.
-func (s *Single) UnpackFromReader(r io.Reader, keyRing keychain.KeyRing) er.R {
- plaintext, err := decryptPayloadFromReader(r, keyRing)
- if err != nil {
- return err
- }
-
- // Finally, we'll pack the bytes into a reader to we can deserialize
- // the plaintext bytes of the SCB.
- backupReader := bytes.NewReader(plaintext)
- return s.Deserialize(backupReader)
-}
-
-// PackStaticChanBackups accepts a set of existing open channels, and a
-// keychain.KeyRing, and returns a map of outpoints to the serialized+encrypted
-// static channel backups. The passed keyRing should be backed by the users
-// root HD seed in order to ensure full determinism.
-func PackStaticChanBackups(backups []Single,
- keyRing keychain.KeyRing) (map[wire.OutPoint][]byte, er.R) {
-
- packedBackups := make(map[wire.OutPoint][]byte)
- for _, chanBackup := range backups {
- chanPoint := chanBackup.FundingOutpoint
-
- var b bytes.Buffer
- err := chanBackup.PackToWriter(&b, keyRing)
- if err != nil {
- return nil, er.Errorf("unable to pack chan backup "+
- "for %v: %v", chanPoint, err)
- }
-
- packedBackups[chanPoint] = b.Bytes()
- }
-
- return packedBackups, nil
-}
-
-// PackedSingles represents a series of fully packed SCBs. This may be the
-// combination of a series of individual SCBs in order to batch their
-// unpacking.
-type PackedSingles [][]byte
-
-// Unpack attempts to decrypt the passed set of encrypted SCBs and deserialize
-// each one into a new SCB struct. The passed keyRing should be backed by the
-// same HD seed as was used to encrypt the set of backups in the first place.
-// If we're unable to decrypt any of the back ups, then we'll return an error.
-func (p PackedSingles) Unpack(keyRing keychain.KeyRing) ([]Single, er.R) {
-
- backups := make([]Single, len(p))
- for i, encryptedBackup := range p {
- var backup Single
-
- backupReader := bytes.NewReader(encryptedBackup)
- err := backup.UnpackFromReader(backupReader, keyRing)
- if err != nil {
- return nil, err
- }
-
- backups[i] = backup
- }
-
- return backups, nil
-}
-
-// TODO(roasbeef): make codec package?
diff --git a/lnd/chanbackup/single_test.go b/lnd/chanbackup/single_test.go
deleted file mode 100644
index def29f84..00000000
--- a/lnd/chanbackup/single_test.go
+++ /dev/null
@@ -1,462 +0,0 @@
-package chanbackup
-
-import (
- "bytes"
- "math"
- "math/rand"
- "net"
- "reflect"
- "testing"
-
- "github.com/davecgh/go-spew/spew"
- "github.com/pkt-cash/pktd/btcec"
- "github.com/pkt-cash/pktd/btcutil/er"
- "github.com/pkt-cash/pktd/chaincfg/chainhash"
- "github.com/pkt-cash/pktd/lnd/channeldb"
- "github.com/pkt-cash/pktd/lnd/keychain"
- "github.com/pkt-cash/pktd/lnd/lnwire"
- "github.com/pkt-cash/pktd/lnd/shachain"
- "github.com/pkt-cash/pktd/wire"
-)
-
-var (
- chainHash = chainhash.Hash{
- 0xb7, 0x94, 0x38, 0x5f, 0x2d, 0x1e, 0xf7, 0xab,
- 0x4d, 0x92, 0x73, 0xd1, 0x90, 0x63, 0x81, 0xb4,
- 0x4f, 0x2f, 0x6f, 0x25, 0x18, 0xa3, 0xef, 0xb9,
- 0x64, 0x49, 0x18, 0x83, 0x31, 0x98, 0x47, 0x53,
- }
-
- op = wire.OutPoint{
- Hash: chainHash,
- Index: 4,
- }
-
- addr1, _ = net.ResolveTCPAddr("tcp", "10.0.0.2:9000")
- addr2, _ = net.ResolveTCPAddr("tcp", "10.0.0.3:9000")
-)
-
-func assertSingleEqual(t *testing.T, a, b Single) {
- t.Helper()
-
- if a.Version != b.Version {
- t.Fatalf("versions don't match: %v vs %v", a.Version,
- b.Version)
- }
- if a.IsInitiator != b.IsInitiator {
- t.Fatalf("initiators don't match: %v vs %v", a.IsInitiator,
- b.IsInitiator)
- }
- if a.ChainHash != b.ChainHash {
- t.Fatalf("chainhash doesn't match: %v vs %v", a.ChainHash,
- b.ChainHash)
- }
- if a.FundingOutpoint != b.FundingOutpoint {
- t.Fatalf("chan point doesn't match: %v vs %v",
- a.FundingOutpoint, b.FundingOutpoint)
- }
- if a.ShortChannelID != b.ShortChannelID {
- t.Fatalf("chan id doesn't match: %v vs %v",
- a.ShortChannelID, b.ShortChannelID)
- }
- if a.Capacity != b.Capacity {
- t.Fatalf("capacity doesn't match: %v vs %v",
- a.Capacity, b.Capacity)
- }
- if !a.RemoteNodePub.IsEqual(b.RemoteNodePub) {
- t.Fatalf("node pubs don't match %x vs %x",
- a.RemoteNodePub.SerializeCompressed(),
- b.RemoteNodePub.SerializeCompressed())
- }
- if !reflect.DeepEqual(a.LocalChanCfg, b.LocalChanCfg) {
- t.Fatalf("local chan config doesn't match: %v vs %v",
- spew.Sdump(a.LocalChanCfg),
- spew.Sdump(b.LocalChanCfg))
- }
- if !reflect.DeepEqual(a.RemoteChanCfg, b.RemoteChanCfg) {
- t.Fatalf("remote chan config doesn't match: %v vs %v",
- spew.Sdump(a.RemoteChanCfg),
- spew.Sdump(b.RemoteChanCfg))
- }
- if !reflect.DeepEqual(a.ShaChainRootDesc, b.ShaChainRootDesc) {
- t.Fatalf("sha chain point doesn't match: %v vs %v",
- spew.Sdump(a.ShaChainRootDesc),
- spew.Sdump(b.ShaChainRootDesc))
- }
-
- if len(a.Addresses) != len(b.Addresses) {
- t.Fatalf("expected %v addrs got %v", len(a.Addresses),
- len(b.Addresses))
- }
- for i := 0; i < len(a.Addresses); i++ {
- if a.Addresses[i].String() != b.Addresses[i].String() {
- t.Fatalf("addr mismatch: %v vs %v",
- a.Addresses[i], b.Addresses[i])
- }
- }
-}
-
-func genRandomOpenChannelShell() (*channeldb.OpenChannel, er.R) {
- var testPriv [32]byte
- if _, err := rand.Read(testPriv[:]); err != nil {
- return nil, er.E(err)
- }
-
- _, pub := btcec.PrivKeyFromBytes(btcec.S256(), testPriv[:])
-
- var chanPoint wire.OutPoint
- if _, err := rand.Read(chanPoint.Hash[:]); err != nil {
- return nil, er.E(err)
- }
-
- pub.Curve = nil
-
- chanPoint.Index = uint32(rand.Intn(math.MaxUint16))
-
- var shaChainRoot [32]byte
- if _, err := rand.Read(shaChainRoot[:]); err != nil {
- return nil, er.E(err)
- }
-
- shaChainProducer := shachain.NewRevocationProducer(shaChainRoot)
-
- var isInitiator bool
- if rand.Int63()%2 == 0 {
- isInitiator = true
- }
-
- chanType := channeldb.SingleFunderBit
- if rand.Int63()%2 == 0 {
- chanType = channeldb.SingleFunderTweaklessBit
- }
-
- return &channeldb.OpenChannel{
- ChainHash: chainHash,
- ChanType: chanType,
- IsInitiator: isInitiator,
- FundingOutpoint: chanPoint,
- ShortChannelID: lnwire.NewShortChanIDFromInt(
- uint64(rand.Int63()),
- ),
- IdentityPub: pub,
- LocalChanCfg: channeldb.ChannelConfig{
- ChannelConstraints: channeldb.ChannelConstraints{
- CsvDelay: uint16(rand.Int63()),
- },
- MultiSigKey: keychain.KeyDescriptor{
- KeyLocator: keychain.KeyLocator{
- Family: keychain.KeyFamily(rand.Int63()),
- Index: uint32(rand.Int63()),
- },
- },
- RevocationBasePoint: keychain.KeyDescriptor{
- KeyLocator: keychain.KeyLocator{
- Family: keychain.KeyFamily(rand.Int63()),
- Index: uint32(rand.Int63()),
- },
- },
- PaymentBasePoint: keychain.KeyDescriptor{
- KeyLocator: keychain.KeyLocator{
- Family: keychain.KeyFamily(rand.Int63()),
- Index: uint32(rand.Int63()),
- },
- },
- DelayBasePoint: keychain.KeyDescriptor{
- KeyLocator: keychain.KeyLocator{
- Family: keychain.KeyFamily(rand.Int63()),
- Index: uint32(rand.Int63()),
- },
- },
- HtlcBasePoint: keychain.KeyDescriptor{
- KeyLocator: keychain.KeyLocator{
- Family: keychain.KeyFamily(rand.Int63()),
- Index: uint32(rand.Int63()),
- },
- },
- },
- RemoteChanCfg: channeldb.ChannelConfig{
- ChannelConstraints: channeldb.ChannelConstraints{
- CsvDelay: uint16(rand.Int63()),
- },
- MultiSigKey: keychain.KeyDescriptor{
- PubKey: pub,
- },
- RevocationBasePoint: keychain.KeyDescriptor{
- PubKey: pub,
- },
- PaymentBasePoint: keychain.KeyDescriptor{
- PubKey: pub,
- },
- DelayBasePoint: keychain.KeyDescriptor{
- PubKey: pub,
- },
- HtlcBasePoint: keychain.KeyDescriptor{
- PubKey: pub,
- },
- },
- RevocationProducer: shaChainProducer,
- }, nil
-}
-
-// TestSinglePackUnpack tests that we're able to unpack a previously packed
-// channel backup.
-func TestSinglePackUnpack(t *testing.T) {
- t.Parallel()
-
- // Given our test pub key, we'll create an open channel shell that
- // contains all the information we need to create a static channel
- // backup.
- channel, err := genRandomOpenChannelShell()
- if err != nil {
- t.Fatalf("unable to gen open channel: %v", err)
- }
-
- singleChanBackup := NewSingle(channel, []net.Addr{addr1, addr2})
- singleChanBackup.RemoteNodePub.Curve = nil
-
- keyRing := &mockKeyRing{}
-
- versionTestCases := []struct {
- // version is the pack/unpack version that we should use to
- // decode/encode the final SCB.
- version SingleBackupVersion
-
- // valid tests us if this test case should pass or not.
- valid bool
- }{
- // The default version, should pack/unpack with no problem.
- {
- version: DefaultSingleVersion,
- valid: true,
- },
-
- // The new tweakless version, should pack/unpack with no
- // problem.
- {
- version: TweaklessCommitVersion,
- valid: true,
- },
-
- // The new anchor version, should pack/unpack with no
- // problem.
- {
- version: AnchorsCommitVersion,
- valid: true,
- },
-
- // A non-default version, atm this should result in a failure.
- {
- version: 99,
- valid: false,
- },
- }
- for i, versionCase := range versionTestCases {
- // First, we'll re-assign SCB version to what was indicated in
- // the test case.
- singleChanBackup.Version = versionCase.version
-
- var b bytes.Buffer
-
- err := singleChanBackup.PackToWriter(&b, keyRing)
- switch {
- // If this is a valid test case, and we failed, then we'll
- // return an error.
- case err != nil && versionCase.valid:
- t.Fatalf("#%v, unable to pack single: %v", i, err)
-
- // If this is an invalid test case, and we passed it, then
- // we'll return an error.
- case err == nil && !versionCase.valid:
- t.Fatalf("#%v got nil error for invalid pack: %v",
- i, err)
- }
-
- // If this is a valid test case, then we'll continue to ensure
- // we can unpack it, and also that if we mutate the packed
- // version, then we trigger an error.
- if versionCase.valid {
- var unpackedSingle Single
- err = unpackedSingle.UnpackFromReader(&b, keyRing)
- if err != nil {
- t.Fatalf("#%v unable to unpack single: %v",
- i, err)
- }
- unpackedSingle.RemoteNodePub.Curve = nil
-
- assertSingleEqual(t, singleChanBackup, unpackedSingle)
-
- // If this was a valid packing attempt, then we'll test
- // to ensure that if we mutate the version prepended to
- // the serialization, then unpacking will fail as well.
- var rawSingle bytes.Buffer
- err := unpackedSingle.Serialize(&rawSingle)
- if err != nil {
- t.Fatalf("unable to serialize single: %v", err)
- }
-
- rawBytes := rawSingle.Bytes()
- rawBytes[0] ^= 5
-
- newReader := bytes.NewReader(rawBytes)
- err = unpackedSingle.Deserialize(newReader)
- if err == nil {
- t.Fatalf("#%v unpack with unknown version "+
- "should have failed", i)
- }
- }
- }
-}
-
-// TestPackedSinglesUnpack tests that we're able to properly unpack a series of
-// packed singles.
-func TestPackedSinglesUnpack(t *testing.T) {
- t.Parallel()
-
- keyRing := &mockKeyRing{}
-
- // To start, we'll create 10 new singles, and them assemble their
- // packed forms into a slice.
- numSingles := 10
- packedSingles := make([][]byte, 0, numSingles)
- unpackedSingles := make([]Single, 0, numSingles)
- for i := 0; i < numSingles; i++ {
- channel, err := genRandomOpenChannelShell()
- if err != nil {
- t.Fatalf("unable to gen channel: %v", err)
- }
-
- single := NewSingle(channel, nil)
-
- var b bytes.Buffer
- if err := single.PackToWriter(&b, keyRing); err != nil {
- t.Fatalf("unable to pack single: %v", err)
- }
-
- packedSingles = append(packedSingles, b.Bytes())
- unpackedSingles = append(unpackedSingles, single)
- }
-
- // With all singles packed, we'll create the grouped type and attempt
- // to Unpack all of them in a single go.
- freshSingles, err := PackedSingles(packedSingles).Unpack(keyRing)
- if err != nil {
- t.Fatalf("unable to unpack singles: %v", err)
- }
-
- // The set of freshly unpacked singles should exactly match the initial
- // set of singles that we packed before.
- for i := 0; i < len(unpackedSingles); i++ {
- assertSingleEqual(t, unpackedSingles[i], freshSingles[i])
- }
-
- // If we mutate one of the packed singles, then the entire method
- // should fail.
- packedSingles[0][0] ^= 1
- _, err = PackedSingles(packedSingles).Unpack(keyRing)
- if err == nil {
- t.Fatalf("unpack attempt should fail")
- }
-}
-
-// TestSinglePackStaticChanBackups tests that we're able to batch pack a set of
-// Singles, and then unpack them obtaining the same set of unpacked singles.
-func TestSinglePackStaticChanBackups(t *testing.T) {
- t.Parallel()
-
- keyRing := &mockKeyRing{}
-
- // First, we'll create a set of random single, and along the way,
- // create a map that will let us look up each single by its chan point.
- numSingles := 10
- singleMap := make(map[wire.OutPoint]Single, numSingles)
- unpackedSingles := make([]Single, 0, numSingles)
- for i := 0; i < numSingles; i++ {
- channel, err := genRandomOpenChannelShell()
- if err != nil {
- t.Fatalf("unable to gen channel: %v", err)
- }
-
- single := NewSingle(channel, nil)
-
- singleMap[channel.FundingOutpoint] = single
- unpackedSingles = append(unpackedSingles, single)
- }
-
- // Now that we have all of our singles are created, we'll attempt to
- // pack them all in a single batch.
- packedSingleMap, err := PackStaticChanBackups(unpackedSingles, keyRing)
- if err != nil {
- t.Fatalf("unable to pack backups: %v", err)
- }
-
- // With our packed singles obtained, we'll ensure that each of them
- // match their unpacked counterparts after they themselves have been
- // unpacked.
- for chanPoint, single := range singleMap {
- packedSingles, ok := packedSingleMap[chanPoint]
- if !ok {
- t.Fatalf("unable to find single %v", chanPoint)
- }
-
- var freshSingle Single
- err := freshSingle.UnpackFromReader(
- bytes.NewReader(packedSingles), keyRing,
- )
- if err != nil {
- t.Fatalf("unable to unpack single: %v", err)
- }
-
- assertSingleEqual(t, single, freshSingle)
- }
-
- // If we attempt to pack again, but force the key ring to fail, then
- // the entire method should fail.
- _, err = PackStaticChanBackups(
- unpackedSingles, &mockKeyRing{true},
- )
- if err == nil {
- t.Fatalf("pack attempt should fail")
- }
-}
-
-// TestSingleUnconfirmedChannel tests that unconfirmed channels get serialized
-// correctly by encoding the funding broadcast height as block height of the
-// short channel ID.
-func TestSingleUnconfirmedChannel(t *testing.T) {
- t.Parallel()
-
- var fundingBroadcastHeight = uint32(1234)
-
- // Let's create an open channel shell that contains all the information
- // we need to create a static channel backup but simulate an
- // unconfirmed channel by setting the block height to 0.
- channel, err := genRandomOpenChannelShell()
- if err != nil {
- t.Fatalf("unable to gen open channel: %v", err)
- }
- channel.ShortChannelID.BlockHeight = 0
- channel.FundingBroadcastHeight = fundingBroadcastHeight
-
- singleChanBackup := NewSingle(channel, []net.Addr{addr1, addr2})
- keyRing := &mockKeyRing{}
-
- // Pack it and then unpack it again to make sure everything is written
- // correctly, then check that the block height of the unpacked
- // is the funding broadcast height we set before.
- var b bytes.Buffer
- if err := singleChanBackup.PackToWriter(&b, keyRing); err != nil {
- t.Fatalf("unable to pack single: %v", err)
- }
- var unpackedSingle Single
- err = unpackedSingle.UnpackFromReader(&b, keyRing)
- if err != nil {
- t.Fatalf("unable to unpack single: %v", err)
- }
- if unpackedSingle.ShortChannelID.BlockHeight != fundingBroadcastHeight {
- t.Fatalf("invalid block height. got %d expected %d.",
- unpackedSingle.ShortChannelID.BlockHeight,
- fundingBroadcastHeight)
- }
-}
-
-// TODO(roasbsef): fuzz parsing
diff --git a/lnd/chanfitness/chanevent.go b/lnd/chanfitness/chanevent.go
deleted file mode 100644
index bd78c3d4..00000000
--- a/lnd/chanfitness/chanevent.go
+++ /dev/null
@@ -1,418 +0,0 @@
-package chanfitness
-
-import (
- "time"
-
- "github.com/pkt-cash/pktd/btcutil/er"
- "github.com/pkt-cash/pktd/lnd/clock"
- "github.com/pkt-cash/pktd/wire"
-)
-
-type eventType int
-
-const (
- peerOnlineEvent eventType = iota
- peerOfflineEvent
-)
-
-// String provides string representations of channel events.
-func (e eventType) String() string {
- switch e {
- case peerOnlineEvent:
- return "peer_online"
-
- case peerOfflineEvent:
- return "peer_offline"
- }
-
- return "unknown"
-}
-
-type event struct {
- timestamp time.Time
- eventType eventType
-}
-
-// peerLog tracks events for a peer and its channels. If we currently have no
-// channels with the peer, it will simply track its current online state. If we
-// do have channels open with the peer, it will track the peer's online and
-// offline events so that we can calculate uptime for our channels. A single
-// event log is used for these online and offline events, and uptime for a
-// channel is calculated by examining a subsection of this log.
-type peerLog struct {
- // online stores whether the peer is currently online.
- online bool
-
- // onlineEvents is a log of timestamped events observed for the peer
- // that we have committed to allocating memory to.
- onlineEvents []*event
-
- // stagedEvent represents an event that is pending addition to the
- // events list. It has not yet been added because we rate limit the
- // frequency that we store events at. We need to store this value
- // in the log (rather than just ignore events) so that we can flush the
- // aggregate outcome to our event log once the rate limiting period has
- // ended.
- //
- // Take the following example:
- // - Peer online event recorded
- // - Peer offline event, not recorded due to rate limit
- // - No more events, we incorrectly believe our peer to be online
- // Instead of skipping events, we stage the most recent event during the
- // rate limited period so that we know what happened (on aggregate)
- // while we were rate limiting events.
- //
- // Note that we currently only store offline/online events so we can
- // use this field to track our online state. With the addition of other
- // event types, we need to only stage online/offline events, or split
- // them out.
- stagedEvent *event
-
- // flapCount is the number of times this peer has been observed as
- // going offline.
- flapCount int
-
- // lastFlap is the timestamp of the last flap we recorded for the peer.
- // This value will be nil if we have never recorded a flap for the peer.
- lastFlap *time.Time
-
- // clock allows creation of deterministic unit tests.
- clock clock.Clock
-
- // channels contains a set of currently open channels. Channels will be
- // added and removed from this map as they are opened and closed.
- channels map[wire.OutPoint]*channelInfo
-}
-
-// newPeerLog creates a log for a peer, taking its historical flap count and
-// last flap time as parameters. These values may be zero/nil if we have no
-// record of historical flap count for the peer.
-func newPeerLog(clock clock.Clock, flapCount int,
- lastFlap *time.Time) *peerLog {
-
- return &peerLog{
- clock: clock,
- flapCount: flapCount,
- lastFlap: lastFlap,
- channels: make(map[wire.OutPoint]*channelInfo),
- }
-}
-
-// channelInfo contains information about a channel.
-type channelInfo struct {
- // openedAt tracks the first time this channel was seen. This is not
- // necessarily the time that it confirmed on chain because channel
- // events are not persisted at present.
- openedAt time.Time
-}
-
-func newChannelInfo(openedAt time.Time) *channelInfo {
- return &channelInfo{
- openedAt: openedAt,
- }
-}
-
-// onlineEvent records a peer online or offline event in the log and increments
-// the peer's flap count.
-func (p *peerLog) onlineEvent(online bool) {
- eventTime := p.clock.Now()
-
- // If we have a non-nil last flap time, potentially apply a cooldown
- // factor to the peer's flap count before we rate limit it. This allows
- // us to decrease the penalty for historical flaps over time, provided
- // the peer has not flapped for a while.
- if p.lastFlap != nil {
- p.flapCount = cooldownFlapCount(
- p.clock.Now(), p.flapCount, *p.lastFlap,
- )
- }
-
- // Record flap count information and online state regardless of whether
- // we have any channels open with this peer.
- p.flapCount++
- p.lastFlap = &eventTime
- p.online = online
-
- // If we have no channels currently open with the peer, we do not want
- // to commit resources to tracking their online state beyond a simple
- // online boolean, so we exit early.
- if p.channelCount() == 0 {
- return
- }
-
- p.addEvent(online, eventTime)
-}
-
-// addEvent records an online or offline event in our event log. and increments
-// the peer's flap count.
-func (p *peerLog) addEvent(online bool, time time.Time) {
- eventType := peerOnlineEvent
- if !online {
- eventType = peerOfflineEvent
- }
-
- event := &event{
- timestamp: time,
- eventType: eventType,
- }
-
- // If we have no staged events, we can just stage this event and return.
- if p.stagedEvent == nil {
- p.stagedEvent = event
- return
- }
-
- // We get the amount of time we require between events according to
- // peer flap count.
- aggregation := getRateLimit(p.flapCount)
- nextRecordTime := p.stagedEvent.timestamp.Add(aggregation)
- flushEvent := nextRecordTime.Before(event.timestamp)
-
- // If enough time has passed since our last staged event, we add our
- // event to our in-memory list.
- if flushEvent {
- p.onlineEvents = append(p.onlineEvents, p.stagedEvent)
- }
-
- // Finally, we replace our staged event with the new event we received.
- p.stagedEvent = event
-}
-
-// addChannel adds a channel to our log. If we have not tracked any online
-// events for our peer yet, we create one with our peer's current online state
-// so that we know the state that the peer had at channel start, which is
-// required to calculate uptime over the channel's lifetime.
-func (p *peerLog) addChannel(channelPoint wire.OutPoint) er.R {
- _, ok := p.channels[channelPoint]
- if ok {
- return er.Errorf("channel: %v already present", channelPoint)
- }
-
- openTime := p.clock.Now()
- p.channels[channelPoint] = newChannelInfo(openTime)
-
- // If we do not have any online events tracked for our peer (which is
- // the case when we have no other channels open with the peer), we add
- // an event with the peer's current online state so that we know that
- // starting state for this peer when a channel was connected (which
- // allows us to calculate uptime over the lifetime of the channel).
- if len(p.onlineEvents) == 0 {
- p.addEvent(p.online, openTime)
- }
-
- return nil
-}
-
-// removeChannel removes a channel from our log. If we have no more channels
-// with the peer after removing this one, we clear our list of events.
-func (p *peerLog) removeChannel(channelPoint wire.OutPoint) er.R {
- _, ok := p.channels[channelPoint]
- if !ok {
- return er.Errorf("channel: %v not present", channelPoint)
- }
-
- delete(p.channels, channelPoint)
-
- // If we have no more channels in our event log, we can discard all of
- // our online events in memory, since we don't need them anymore.
- // TODO(carla): this could be done on a per channel basis.
- if p.channelCount() == 0 {
- p.onlineEvents = nil
- p.stagedEvent = nil
- }
-
- return nil
-}
-
-// channelCount returns the number of channels that we currently have
-// with the peer.
-func (p *peerLog) channelCount() int {
- return len(p.channels)
-}
-
-// channelUptime looks up a channel and returns the amount of time that the
-// channel has been monitored for and its uptime over this period.
-func (p *peerLog) channelUptime(channelPoint wire.OutPoint) (time.Duration,
- time.Duration, er.R) {
-
- channel, ok := p.channels[channelPoint]
- if !ok {
- return 0, 0, ErrChannelNotFound.Default()
- }
-
- now := p.clock.Now()
-
- uptime, err := p.uptime(channel.openedAt, now)
- if err != nil {
- return 0, 0, err
- }
-
- return now.Sub(channel.openedAt), uptime, nil
-}
-
-// getFlapCount returns the peer's flap count and the timestamp that we last
-// recorded a flap.
-func (p *peerLog) getFlapCount() (int, *time.Time) {
- return p.flapCount, p.lastFlap
-}
-
-// listEvents returns all of the events that our event log has tracked,
-// including events that are staged for addition to our set of events but have
-// not yet been committed to (because we rate limit and store only the aggregate
-// outcome over a period).
-func (p *peerLog) listEvents() []*event {
- if p.stagedEvent == nil {
- return p.onlineEvents
- }
-
- return append(p.onlineEvents, p.stagedEvent)
-}
-
-// onlinePeriod represents a period of time over which a peer was online.
-type onlinePeriod struct {
- start, end time.Time
-}
-
-// getOnlinePeriods returns a list of all the periods that the event log has
-// recorded the remote peer as being online. In the unexpected case where there
-// are no events, the function returns early. Online periods are defined as a
-// peer online event which is terminated by a peer offline event. If the event
-// log ends on a peer online event, it appends a final period which is
-// calculated until the present. This function expects the event log provided
-// to be ordered by ascending timestamp, and can tolerate multiple consecutive
-// online or offline events.
-func (p *peerLog) getOnlinePeriods() []*onlinePeriod {
- events := p.listEvents()
-
- // Return early if there are no events, there are no online periods.
- if len(events) == 0 {
- return nil
- }
-
- var (
- // lastEvent tracks the last event that we had that was of
- // a different type to our own. It is used to determine the
- // start time of our online periods when we experience an
- // offline event, and to track our last recorded state.
- lastEvent *event
- onlinePeriods []*onlinePeriod
- )
-
- // Loop through all events to build a list of periods that the peer was
- // online. Online periods are added when they are terminated with a peer
- // offline event. If the log ends on an online event, the period between
- // the online event and the present is not tracked. The type of the most
- // recent event is tracked using the offline bool so that we can add a
- // final online period if necessary.
- for _, event := range events {
- switch event.eventType {
- case peerOnlineEvent:
- // If our previous event is nil, we just set it and
- // break out of the switch.
- if lastEvent == nil {
- lastEvent = event
- break
- }
-
- // If our previous event was an offline event, we update
- // it to this event. We do not do this if it was an
- // online event because duplicate online events would
- // progress our online timestamp forward (rather than
- // keep it at our earliest online event timestamp).
- if lastEvent.eventType == peerOfflineEvent {
- lastEvent = event
- }
-
- case peerOfflineEvent:
- // If our previous event is nil, we just set it and
- // break out of the switch since we cannot record an
- // online period from this single event.
- if lastEvent == nil {
- lastEvent = event
- break
- }
-
- // If the last event we saw was an online event, we
- // add an online period to our set and progress our
- // previous event to this offline event. We do not
- // do this if we have had duplicate offline events
- // because we would be tracking the most recent offline
- // event (rather than keep it at our earliest offline
- // event timestamp).
- if lastEvent.eventType == peerOnlineEvent {
- onlinePeriods = append(
- onlinePeriods, &onlinePeriod{
- start: lastEvent.timestamp,
- end: event.timestamp,
- },
- )
-
- lastEvent = event
- }
- }
- }
-
- // If the last event was an peer offline event, we do not need to
- // calculate a final online period and can return online periods as is.
- if lastEvent.eventType == peerOfflineEvent {
- return onlinePeriods
- }
-
- // The log ended on an online event, so we need to add a final online
- // period which terminates at the present.
- finalEvent := &onlinePeriod{
- start: lastEvent.timestamp,
- end: p.clock.Now(),
- }
-
- // Add the final online period to the set and return.
- return append(onlinePeriods, finalEvent)
-}
-
-// uptime calculates the total uptime we have recorded for a peer over the
-// inclusive range specified. An error is returned if the end of the range is
-// before the start or a zero end time is returned.
-func (p *peerLog) uptime(start, end time.Time) (time.Duration, er.R) {
- // Error if we are provided with an invalid range to calculate uptime
- // for.
- if end.Before(start) {
- return 0, er.Errorf("end time: %v before start time: %v",
- end, start)
- }
- if end.IsZero() {
- return 0, er.Errorf("zero end time")
- }
-
- var uptime time.Duration
-
- for _, p := range p.getOnlinePeriods() {
- // The online period ends before the range we're looking at, so
- // we can skip over it.
- if p.end.Before(start) {
- continue
- }
- // The online period starts after the range we're looking at, so
- // can stop calculating uptime.
- if p.start.After(end) {
- break
- }
-
- // If the online period starts before our range, shift the start
- // time up so that we only calculate uptime from the start of
- // our range.
- if p.start.Before(start) {
- p.start = start
- }
-
- // If the online period ends before our range, shift the end
- // time forward so that we only calculate uptime until the end
- // of the range.
- if p.end.After(end) {
- p.end = end
- }
-
- uptime += p.end.Sub(p.start)
- }
-
- return uptime, nil
-}
diff --git a/lnd/chanfitness/chanevent_test.go b/lnd/chanfitness/chanevent_test.go
deleted file mode 100644
index a0a6a131..00000000
--- a/lnd/chanfitness/chanevent_test.go
+++ /dev/null
@@ -1,565 +0,0 @@
-package chanfitness
-
-import (
- "testing"
- "time"
-
- "github.com/pkt-cash/pktd/btcutil/util"
- "github.com/pkt-cash/pktd/lnd/clock"
- "github.com/pkt-cash/pktd/wire"
- "github.com/stretchr/testify/require"
-)
-
-// TestPeerLog tests the functionality of the peer log struct.
-func TestPeerLog(t *testing.T) {
- clock := clock.NewTestClock(testNow)
- peerLog := newPeerLog(clock, 0, nil)
-
- // assertFlapCount is a helper that asserts that our peer's flap count
- // and timestamp is set to expected values.
- assertFlapCount := func(expectedCount int, expectedTs *time.Time) {
- flapCount, flapTs := peerLog.getFlapCount()
- require.Equal(t, expectedCount, flapCount)
- require.Equal(t, expectedTs, flapTs)
- }
-
- require.Zero(t, peerLog.channelCount())
- require.False(t, peerLog.online)
- assertFlapCount(0, nil)
-
- // Test that looking up an unknown channel fails.
- _, _, err := peerLog.channelUptime(wire.OutPoint{Index: 1})
- util.RequireErr(t, err)
-
- lastFlap := clock.Now()
-
- // Add an offline event, since we have no channels, we do not expect
- // to have any online periods recorded for our peer. However, we should
- // increment our flap count for the peer.
- peerLog.onlineEvent(false)
- require.Len(t, peerLog.getOnlinePeriods(), 0)
- assertFlapCount(1, &lastFlap)
-
- // Bump our test clock's time by an hour so that we can create an online
- // event with a distinct time.
- lastFlap = testNow.Add(time.Hour)
- clock.SetTime(lastFlap)
-
- // Likewise, if we have an online event, nothing beyond the online state
- // of our peer log should change, but our flap count should change.
- peerLog.onlineEvent(true)
- require.Len(t, peerLog.getOnlinePeriods(), 0)
- assertFlapCount(2, &lastFlap)
-
- // Add a channel and assert that we have one channel listed. Since this
- // is the first channel we track for the peer, we expect an online
- // event to be added, however, our flap count should not change because
- // this is not a new online event, we are just copying one into our log
- // for our purposes.
- chan1 := wire.OutPoint{
- Index: 1,
- }
- util.RequireNoErr(t, peerLog.addChannel(chan1))
- require.Equal(t, 1, peerLog.channelCount())
- assertFlapCount(2, &lastFlap)
-
- // Assert that we can now successfully get our added channel.
- _, _, err = peerLog.channelUptime(chan1)
- util.RequireNoErr(t, err)
-
- // Bump our test clock's time so that our current time is different to
- // channel open time.
- lastFlap = clock.Now().Add(time.Hour)
- clock.SetTime(lastFlap)
-
- // Now that we have added a channel and an hour has passed, we expect
- // our uptime and lifetime to both equal an hour.
- lifetime, uptime, err := peerLog.channelUptime(chan1)
- util.RequireNoErr(t, err)
- require.Equal(t, time.Hour, lifetime)
- require.Equal(t, time.Hour, uptime)
-
- // Add an offline event for our peer and assert that our flap count is
- // incremented.
- peerLog.onlineEvent(false)
- assertFlapCount(3, &lastFlap)
-
- // Now we add another channel to our store and assert that we now report
- // two channels for this peer.
- chan2 := wire.OutPoint{
- Index: 2,
- }
- util.RequireNoErr(t, peerLog.addChannel(chan2))
- require.Equal(t, 2, peerLog.channelCount())
-
- // Progress our time again, so that our peer has now been offline for
- // two hours.
- now := lastFlap.Add(time.Hour * 2)
- clock.SetTime(now)
-
- // Our first channel should report as having been monitored for three
- // hours, but only online for one of those hours.
- lifetime, uptime, err = peerLog.channelUptime(chan1)
- util.RequireNoErr(t, err)
- require.Equal(t, time.Hour*3, lifetime)
- require.Equal(t, time.Hour, uptime)
-
- // Remove our first channel and check that we can still correctly query
- // uptime for the second channel.
- util.RequireNoErr(t, peerLog.removeChannel(chan1))
- require.Equal(t, 1, peerLog.channelCount())
-
- // Our second channel, which was created when our peer was offline,
- // should report as having been monitored for two hours, but have zero
- // uptime.
- lifetime, uptime, err = peerLog.channelUptime(chan2)
- util.RequireNoErr(t, err)
- require.Equal(t, time.Hour*2, lifetime)
- require.Equal(t, time.Duration(0), uptime)
-
- // Finally, remove our second channel and assert that our peer cleans
- // up its in memory set of events but keeps its flap count record.
- util.RequireNoErr(t, peerLog.removeChannel(chan2))
- require.Equal(t, 0, peerLog.channelCount())
- require.Len(t, peerLog.onlineEvents, 0)
- assertFlapCount(3, &lastFlap)
-
- require.Len(t, peerLog.listEvents(), 0)
- require.Nil(t, peerLog.stagedEvent)
-}
-
-// TestRateLimitAdd tests the addition of events to the event log with rate
-// limiting in place.
-func TestRateLimitAdd(t *testing.T) {
- // Create a mock clock specifically for this test so that we can
- // progress time without affecting the other tests.
- mockedClock := clock.NewTestClock(testNow)
-
- // Create a new peer log.
- peerLog := newPeerLog(mockedClock, 0, nil)
- require.Nil(t, peerLog.stagedEvent)
-
- // Create a channel for our peer log, otherwise it will not track online
- // events.
- util.RequireNoErr(t, peerLog.addChannel(wire.OutPoint{}))
-
- // First, we add an event to the event log. Since we have no previous
- // events, we expect this event to staged immediately.
- peerEvent := &event{
- timestamp: testNow,
- eventType: peerOfflineEvent,
- }
-
- peerLog.onlineEvent(false)
- require.Equal(t, peerEvent, peerLog.stagedEvent)
-
- // We immediately add another event to our event log. We expect our
- // staged event to be replaced with this new event, because insufficient
- // time has passed since our last event.
- peerEvent = &event{
- timestamp: testNow,
- eventType: peerOnlineEvent,
- }
-
- peerLog.onlineEvent(true)
- require.Equal(t, peerEvent, peerLog.stagedEvent)
-
- // We get the amount of time that we need to pass before we record an
- // event from our rate limiting tiers. We then progress our test clock
- // to just after this point.
- delta := getRateLimit(peerLog.flapCount)
- newNow := testNow.Add(delta + 1)
- mockedClock.SetTime(newNow)
-
- // Now, when we add an event, we expect our staged event to be added
- // to our events list and for our new event to be staged.
- newEvent := &event{
- timestamp: newNow,
- eventType: peerOfflineEvent,
- }
- peerLog.onlineEvent(false)
-
- require.Equal(t, []*event{peerEvent}, peerLog.onlineEvents)
- require.Equal(t, newEvent, peerLog.stagedEvent)
-
- // Now, we test the case where we add many events to our log. We expect
- // our set of events to be untouched, but for our staged event to be
- // updated.
- nextEvent := &event{
- timestamp: newNow,
- eventType: peerOnlineEvent,
- }
-
- for i := 0; i < 5; i++ {
- // We flip the kind of event for each type so that we can check
- // that our staged event is definitely changing each time.
- if i%2 == 0 {
- nextEvent.eventType = peerOfflineEvent
- } else {
- nextEvent.eventType = peerOnlineEvent
- }
-
- online := nextEvent.eventType == peerOnlineEvent
-
- peerLog.onlineEvent(online)
- require.Equal(t, []*event{peerEvent}, peerLog.onlineEvents)
- require.Equal(t, nextEvent, peerLog.stagedEvent)
- }
-
- // Now, we test the case where a peer's flap count is cooled down
- // because it has not flapped for a while. Set our peer's flap count so
- // that we fall within our second rate limiting tier and assert that we
- // are at this level.
- peerLog.flapCount = rateLimitScale + 1
- rateLimit := getRateLimit(peerLog.flapCount)
- require.Equal(t, rateLimits[1], rateLimit)
-
- // Progress our clock to the point where we will have our flap count
- // cooled.
- newNow = mockedClock.Now().Add(flapCountCooldownPeriod)
- mockedClock.SetTime(newNow)
-
- // Add an online event, and expect it to be staged.
- onlineEvent := &event{
- timestamp: newNow,
- eventType: peerOnlineEvent,
- }
- peerLog.onlineEvent(true)
- require.Equal(t, onlineEvent, peerLog.stagedEvent)
-
- // Progress our clock by the rate limit level that we will be on if
- // our flap rate is cooled down to a lower level.
- newNow = mockedClock.Now().Add(rateLimits[0] + 1)
- mockedClock.SetTime(newNow)
-
- // Add another event. We expect this event to be staged and our previous
- // event to be flushed to the event log (because our cooldown has been
- // applied).
- offlineEvent := &event{
- timestamp: newNow,
- eventType: peerOfflineEvent,
- }
- peerLog.onlineEvent(false)
- require.Equal(t, offlineEvent, peerLog.stagedEvent)
-
- flushedEventIdx := len(peerLog.onlineEvents) - 1
- require.Equal(
- t, onlineEvent, peerLog.onlineEvents[flushedEventIdx],
- )
-}
-
-// TestGetOnlinePeriod tests the getOnlinePeriod function. It tests the case
-// where no events present, and the case where an additional online period
-// must be added because the event log ends on an online event.
-func TestGetOnlinePeriod(t *testing.T) {
- fourHoursAgo := testNow.Add(time.Hour * -4)
- threeHoursAgo := testNow.Add(time.Hour * -3)
- twoHoursAgo := testNow.Add(time.Hour * -2)
-
- tests := []struct {
- name string
- events []*event
- expectedOnline []*onlinePeriod
- }{
- {
- name: "no events",
- },
- {
- name: "start on online period",
- events: []*event{
- {
- timestamp: threeHoursAgo,
- eventType: peerOnlineEvent,
- },
- {
- timestamp: twoHoursAgo,
- eventType: peerOfflineEvent,
- },
- },
- expectedOnline: []*onlinePeriod{
- {
- start: threeHoursAgo,
- end: twoHoursAgo,
- },
- },
- },
- {
- name: "start on offline period",
- events: []*event{
- {
- timestamp: fourHoursAgo,
- eventType: peerOfflineEvent,
- },
- },
- },
- {
- name: "end on an online period",
- events: []*event{
- {
- timestamp: fourHoursAgo,
- eventType: peerOnlineEvent,
- },
- },
- expectedOnline: []*onlinePeriod{
- {
- start: fourHoursAgo,
- end: testNow,
- },
- },
- },
- {
- name: "duplicate online events",
- events: []*event{
- {
- timestamp: fourHoursAgo,
- eventType: peerOnlineEvent,
- },
- {
- timestamp: threeHoursAgo,
- eventType: peerOnlineEvent,
- },
- },
- expectedOnline: []*onlinePeriod{
- {
- start: fourHoursAgo,
- end: testNow,
- },
- },
- },
- {
- name: "duplicate offline events",
- events: []*event{
- {
- timestamp: fourHoursAgo,
- eventType: peerOfflineEvent,
- },
- {
- timestamp: threeHoursAgo,
- eventType: peerOfflineEvent,
- },
- },
- expectedOnline: nil,
- },
- {
- name: "duplicate online then offline",
- events: []*event{
- {
- timestamp: fourHoursAgo,
- eventType: peerOnlineEvent,
- },
- {
- timestamp: threeHoursAgo,
- eventType: peerOnlineEvent,
- },
- {
- timestamp: twoHoursAgo,
- eventType: peerOfflineEvent,
- },
- },
- expectedOnline: []*onlinePeriod{
- {
- start: fourHoursAgo,
- end: twoHoursAgo,
- },
- },
- },
- {
- name: "duplicate offline then online",
- events: []*event{
- {
- timestamp: fourHoursAgo,
- eventType: peerOfflineEvent,
- },
- {
- timestamp: threeHoursAgo,
- eventType: peerOfflineEvent,
- },
- {
- timestamp: twoHoursAgo,
- eventType: peerOnlineEvent,
- },
- },
- expectedOnline: []*onlinePeriod{
- {
- start: twoHoursAgo,
- end: testNow,
- },
- },
- },
- }
-
- for _, test := range tests {
- test := test
-
- t.Run(test.name, func(t *testing.T) {
- t.Parallel()
-
- score := &peerLog{
- onlineEvents: test.events,
- clock: clock.NewTestClock(testNow),
- }
-
- online := score.getOnlinePeriods()
-
- require.Equal(t, test.expectedOnline, online)
- })
-
- }
-}
-
-// TestUptime tests channel uptime calculation based on its event log.
-func TestUptime(t *testing.T) {
- fourHoursAgo := testNow.Add(time.Hour * -4)
- threeHoursAgo := testNow.Add(time.Hour * -3)
- twoHoursAgo := testNow.Add(time.Hour * -2)
- oneHourAgo := testNow.Add(time.Hour * -1)
-
- tests := []struct {
- name string
-
- // events is the set of event log that we are calculating uptime
- // for.
- events []*event
-
- // startTime is the beginning of the period that we are
- // calculating uptime for, it cannot have a zero value.
- startTime time.Time
-
- // endTime is the end of the period that we are calculating
- // uptime for, it cannot have a zero value.
- endTime time.Time
-
- // expectedUptime is the amount of uptime we expect to be
- // calculated over the period specified by startTime and
- // endTime.
- expectedUptime time.Duration
-
- // expectErr is set to true if we expect an error to be returned
- // when calling the uptime function.
- expectErr bool
- }{
- {
- name: "End before start",
- endTime: threeHoursAgo,
- startTime: testNow,
- expectErr: true,
- },
- {
- name: "Zero end time",
- expectErr: true,
- },
- {
- name: "online event and no offline",
- events: []*event{
- {
- timestamp: fourHoursAgo,
- eventType: peerOnlineEvent,
- },
- },
- startTime: fourHoursAgo,
- endTime: testNow,
- expectedUptime: time.Hour * 4,
- },
- {
- name: "online then offline event",
- events: []*event{
- {
- timestamp: threeHoursAgo,
- eventType: peerOnlineEvent,
- },
- {
- timestamp: twoHoursAgo,
- eventType: peerOfflineEvent,
- },
- },
- startTime: fourHoursAgo,
- endTime: testNow,
- expectedUptime: time.Hour,
- },
- {
- name: "online event before uptime period",
- events: []*event{
- {
- timestamp: threeHoursAgo,
- eventType: peerOnlineEvent,
- },
- },
- startTime: twoHoursAgo,
- endTime: testNow,
- expectedUptime: time.Hour * 2,
- },
- {
- name: "offline event after uptime period",
- events: []*event{
- {
- timestamp: fourHoursAgo,
- eventType: peerOnlineEvent,
- },
- {
- timestamp: testNow.Add(time.Hour),
- eventType: peerOfflineEvent,
- },
- },
- startTime: twoHoursAgo,
- endTime: testNow,
- expectedUptime: time.Hour * 2,
- },
- {
- name: "all events within period",
- events: []*event{
- {
- timestamp: twoHoursAgo,
- eventType: peerOnlineEvent,
- },
- },
- startTime: threeHoursAgo,
- endTime: oneHourAgo,
- expectedUptime: time.Hour,
- },
- {
- name: "multiple online and offline",
- events: []*event{
- {
- timestamp: testNow.Add(time.Hour * -7),
- eventType: peerOnlineEvent,
- },
- {
- timestamp: testNow.Add(time.Hour * -6),
- eventType: peerOfflineEvent,
- },
- {
- timestamp: testNow.Add(time.Hour * -5),
- eventType: peerOnlineEvent,
- },
- {
- timestamp: testNow.Add(time.Hour * -4),
- eventType: peerOfflineEvent,
- },
- {
- timestamp: testNow.Add(time.Hour * -3),
- eventType: peerOnlineEvent,
- },
- },
- startTime: testNow.Add(time.Hour * -8),
- endTime: oneHourAgo,
- expectedUptime: time.Hour * 4,
- },
- }
-
- for _, test := range tests {
- test := test
-
- t.Run(test.name, func(t *testing.T) {
- score := &peerLog{
- onlineEvents: test.events,
- clock: clock.NewTestClock(testNow),
- }
-
- uptime, err := score.uptime(
- test.startTime, test.endTime,
- )
- require.Equal(t, test.expectErr, err != nil)
- require.Equal(t, test.expectedUptime, uptime)
- })
- }
-}
diff --git a/lnd/chanfitness/chaneventstore.go b/lnd/chanfitness/chaneventstore.go
deleted file mode 100644
index 6aa460f4..00000000
--- a/lnd/chanfitness/chaneventstore.go
+++ /dev/null
@@ -1,563 +0,0 @@
-// Package chanfitness monitors the behaviour of channels to provide insight
-// into the health and performance of a channel. This is achieved by maintaining
-// an event store which tracks events for each channel.
-//
-// Lifespan: the period that the channel has been known to the scoring system.
-// Note that lifespan may not equal the channel's full lifetime because data is
-// not currently persisted.
-//
-// Uptime: the total time within a given period that the channel's remote peer
-// has been online.
-package chanfitness
-
-import (
- "sync"
- "time"
-
- "github.com/pkt-cash/pktd/btcutil/er"
- "github.com/pkt-cash/pktd/lnd/channeldb"
- "github.com/pkt-cash/pktd/lnd/channelnotifier"
- "github.com/pkt-cash/pktd/lnd/clock"
- "github.com/pkt-cash/pktd/lnd/peernotifier"
- "github.com/pkt-cash/pktd/lnd/routing/route"
- "github.com/pkt-cash/pktd/lnd/subscribe"
- "github.com/pkt-cash/pktd/lnd/ticker"
- "github.com/pkt-cash/pktd/pktlog/log"
- "github.com/pkt-cash/pktd/wire"
-)
-
-const (
- // FlapCountFlushRate determines how often we write peer total flap
- // count to disk.
- FlapCountFlushRate = time.Hour
-)
-
-var (
- Err = er.NewErrorType("lnd.chanfitness")
- // errShuttingDown is returned when the store cannot respond to a query
- // because it has received the shutdown signal.
- errShuttingDown = Err.CodeWithDetail("errShuttingDown", "channel event store shutting down")
-
- // ErrChannelNotFound is returned when a query is made for a channel
- // that the event store does not have knowledge of.
- ErrChannelNotFound = Err.CodeWithDetail("ErrChannelNotFound", "channel not found in event store")
-
- // ErrPeerNotFound is returned when a query is made for a channel
- // that has a peer that the event store is not currently tracking.
- ErrPeerNotFound = Err.CodeWithDetail("ErrPeerNotFound", "peer not found in event store")
-)
-
-// ChannelEventStore maintains a set of event logs for the node's channels to
-// provide insight into the performance and health of channels.
-type ChannelEventStore struct {
- cfg *Config
-
- // peers tracks all of our currently monitored peers and their channels.
- peers map[route.Vertex]peerMonitor
-
- // chanInfoRequests serves requests for information about our channel.
- chanInfoRequests chan channelInfoRequest
-
- // peerRequests serves requests for information about a peer.
- peerRequests chan peerRequest
-
- quit chan struct{}
-
- wg sync.WaitGroup
-}
-
-// Config provides the event store with functions required to monitor channel
-// activity. All elements of the config must be non-nil for the event store to
-// operate.
-type Config struct {
- // SubscribeChannelEvents provides a subscription client which provides
- // a stream of channel events.
- SubscribeChannelEvents func() (subscribe.Subscription, er.R)
-
- // SubscribePeerEvents provides a subscription client which provides a
- // stream of peer online/offline events.
- SubscribePeerEvents func() (subscribe.Subscription, er.R)
-
- // GetOpenChannels provides a list of existing open channels which is
- // used to populate the ChannelEventStore with a set of channels on
- // startup.
- GetOpenChannels func() ([]*channeldb.OpenChannel, er.R)
-
- // Clock is the time source that the subsystem uses, provided here
- // for ease of testing.
- Clock clock.Clock
-
- // WriteFlapCounts records the flap count for a set of peers on disk.
- WriteFlapCount func(map[route.Vertex]*channeldb.FlapCount) er.R
-
- // ReadFlapCount gets the flap count for a peer on disk.
- ReadFlapCount func(route.Vertex) (*channeldb.FlapCount, er.R)
-
- // FlapCountTicker is a ticker which controls how often we flush our
- // peer's flap count to disk.
- FlapCountTicker ticker.Ticker
-}
-
-// peerFlapCountMap is the map used to map peers to flap counts, declared here
-// to allow shorter function signatures.
-type peerFlapCountMap map[route.Vertex]*channeldb.FlapCount
-
-type channelInfoRequest struct {
- peer route.Vertex
- channelPoint wire.OutPoint
- responseChan chan channelInfoResponse
-}
-
-type channelInfoResponse struct {
- info *ChannelInfo
- err er.R
-}
-
-type peerRequest struct {
- peer route.Vertex
- responseChan chan peerResponse
-}
-
-type peerResponse struct {
- flapCount int
- ts *time.Time
- err er.R
-}
-
-// NewChannelEventStore initializes an event store with the config provided.
-// Note that this function does not start the main event loop, Start() must be
-// called.
-func NewChannelEventStore(config *Config) *ChannelEventStore {
- store := &ChannelEventStore{
- cfg: config,
- peers: make(map[route.Vertex]peerMonitor),
- chanInfoRequests: make(chan channelInfoRequest),
- peerRequests: make(chan peerRequest),
- quit: make(chan struct{}),
- }
-
- return store
-}
-
-// Start adds all existing open channels to the event store and starts the main
-// loop which records channel and peer events, and serves requests for
-// information from the store. If this function fails, it cancels its existing
-// subscriptions and returns an error.
-func (c *ChannelEventStore) Start() er.R {
- // Create a subscription to channel events.
- channelClient, err := c.cfg.SubscribeChannelEvents()
- if err != nil {
- return err
- }
-
- // Create a subscription to peer events. If an error occurs, cancel the
- // existing subscription to channel events and return.
- peerClient, err := c.cfg.SubscribePeerEvents()
- if err != nil {
- channelClient.Cancel()
- return err
- }
-
- // cancel should be called to cancel all subscriptions if an error
- // occurs.
- cancel := func() {
- channelClient.Cancel()
- peerClient.Cancel()
- }
-
- // Add the existing set of channels to the event store. This is required
- // because channel events will not be triggered for channels that exist
- // at startup time.
- channels, err := c.cfg.GetOpenChannels()
- if err != nil {
- cancel()
- return err
- }
-
- log.Infof("Adding %v channels to event store", len(channels))
-
- for _, ch := range channels {
- peerKey, err := route.NewVertexFromBytes(
- ch.IdentityPub.SerializeCompressed(),
- )
- if err != nil {
- cancel()
- return err
- }
-
- // Add existing channels to the channel store with an initial
- // peer online or offline event.
- c.addChannel(ch.FundingOutpoint, peerKey)
- }
-
- // Start a goroutine that consumes events from all subscriptions.
- c.wg.Add(1)
- go c.consume(&subscriptions{
- channelUpdates: channelClient.Updates(),
- peerUpdates: peerClient.Updates(),
- cancel: cancel,
- })
-
- return nil
-}
-
-// Stop terminates all goroutines started by the event store.
-func (c *ChannelEventStore) Stop() {
- log.Info("Stopping event store")
-
- // Stop the consume goroutine.
- close(c.quit)
- c.wg.Wait()
-
- // Stop the ticker after the goroutine reading from it has exited, to
- // avoid a race.
- c.cfg.FlapCountTicker.Stop()
-}
-
-// addChannel checks whether we are already tracking a channel's peer, creates a
-// new peer log to track it if we are not yet monitoring it, and adds the
-// channel.
-func (c *ChannelEventStore) addChannel(channelPoint wire.OutPoint,
- peer route.Vertex) {
-
- peerMonitor, err := c.getPeerMonitor(peer)
- if err != nil {
- log.Errorf("could not create monitor: %v", err)
- return
- }
-
- if err := peerMonitor.addChannel(channelPoint); err != nil {
- log.Errorf("could not add channel: %v", err)
- }
-}
-
-// getPeerMonitor tries to get an existing peer monitor from our in memory list,
-// and falls back to creating a new monitor if it is not currently known.
-func (c *ChannelEventStore) getPeerMonitor(peer route.Vertex) (peerMonitor,
- er.R) {
-
- peerMonitor, ok := c.peers[peer]
- if ok {
- return peerMonitor, nil
- }
-
- var (
- flapCount int
- lastFlap *time.Time
- )
-
- historicalFlap, err := c.cfg.ReadFlapCount(peer)
- switch {
- // If we do not have any records for this peer we set a 0 flap count
- // and timestamp.
- case channeldb.ErrNoPeerBucket.Is(err):
-
- case err == nil:
- flapCount = int(historicalFlap.Count)
- lastFlap = &historicalFlap.LastFlap
-
- // Return if we get an unexpected error.
- default:
- return nil, err
- }
-
- peerMonitor = newPeerLog(c.cfg.Clock, flapCount, lastFlap)
- c.peers[peer] = peerMonitor
-
- return peerMonitor, nil
-}
-
-// closeChannel records a closed time for a channel, and returns early is the
-// channel is not known to the event store. We log warnings (rather than errors)
-// when we cannot find a peer/channel because channels that we restore from a
-// static channel backup do not have their open notified, so the event store
-// never learns about them, but they are closed using the regular flow so we
-// will try to remove them on close. At present, we cannot easily distinguish
-// between these closes and others.
-func (c *ChannelEventStore) closeChannel(channelPoint wire.OutPoint,
- peer route.Vertex) {
-
- peerMonitor, ok := c.peers[peer]
- if !ok {
- log.Warnf("peer not known to store: %v", peer)
- return
- }
-
- if err := peerMonitor.removeChannel(channelPoint); err != nil {
- log.Warnf("could not remove channel: %v", err)
- }
-}
-
-// peerEvent creates a peer monitor for a peer if we do not currently have
-// one, and adds an online event to it.
-func (c *ChannelEventStore) peerEvent(peer route.Vertex, online bool) {
- peerMonitor, err := c.getPeerMonitor(peer)
- if err != nil {
- log.Errorf("could not create monitor: %v", err)
- return
- }
-
- peerMonitor.onlineEvent(online)
-}
-
-// subscriptions abstracts away from subscription clients to allow for mocking.
-type subscriptions struct {
- channelUpdates <-chan interface{}
- peerUpdates <-chan interface{}
- cancel func()
-}
-
-// consume is the event store's main loop. It consumes subscriptions to update
-// the event store with channel and peer events, and serves requests for channel
-// uptime and lifespan.
-func (c *ChannelEventStore) consume(subscriptions *subscriptions) {
- // Start our flap count ticker.
- c.cfg.FlapCountTicker.Resume()
-
- // On exit, we will cancel our subscriptions and write our most recent
- // flap counts to disk. This ensures that we have consistent data in
- // the case of a graceful shutdown. If we do not shutdown gracefully,
- // our worst case is data from our last flap count tick (1H).
- defer func() {
- subscriptions.cancel()
-
- if err := c.recordFlapCount(); err != nil {
- log.Errorf("error recording flap on shutdown: %v", err)
- }
-
- c.wg.Done()
- }()
-
- // Consume events until the channel is closed.
- for {
- select {
- // Process channel opened and closed events.
- case e := <-subscriptions.channelUpdates:
- switch event := e.(type) {
- // A new channel has been opened, we must add the
- // channel to the store and record a channel open event.
- case channelnotifier.OpenChannelEvent:
- compressed := event.Channel.IdentityPub.SerializeCompressed()
- peerKey, err := route.NewVertexFromBytes(
- compressed,
- )
- if err != nil {
- log.Errorf("Could not get vertex "+
- "from: %v", compressed)
- }
-
- c.addChannel(
- event.Channel.FundingOutpoint, peerKey,
- )
-
- // A channel has been closed, we must remove the channel
- // from the store and record a channel closed event.
- case channelnotifier.ClosedChannelEvent:
- compressed := event.CloseSummary.RemotePub.SerializeCompressed()
- peerKey, err := route.NewVertexFromBytes(
- compressed,
- )
- if err != nil {
- log.Errorf("Could not get vertex "+
- "from: %v", compressed)
- continue
- }
-
- c.closeChannel(
- event.CloseSummary.ChanPoint, peerKey,
- )
- }
-
- // Process peer online and offline events.
- case e := <-subscriptions.peerUpdates:
- switch event := e.(type) {
- // We have reestablished a connection with our peer,
- // and should record an online event for any channels
- // with that peer.
- case peernotifier.PeerOnlineEvent:
- c.peerEvent(event.PubKey, true)
-
- // We have lost a connection with our peer, and should
- // record an offline event for any channels with that
- // peer.
- case peernotifier.PeerOfflineEvent:
- c.peerEvent(event.PubKey, false)
- }
-
- // Serve all requests for channel lifetime.
- case req := <-c.chanInfoRequests:
- var resp channelInfoResponse
-
- resp.info, resp.err = c.getChanInfo(req)
- req.responseChan <- resp
-
- // Serve all requests for information about our peer.
- case req := <-c.peerRequests:
- var resp peerResponse
-
- resp.flapCount, resp.ts, resp.err = c.flapCount(
- req.peer,
- )
- req.responseChan <- resp
-
- case <-c.cfg.FlapCountTicker.Ticks():
- if err := c.recordFlapCount(); err != nil {
- log.Errorf("could not record flap "+
- "count: %v", err)
- }
-
- // Exit if the store receives the signal to shutdown.
- case <-c.quit:
- return
- }
- }
-}
-
-// ChannelInfo provides the set of information that the event store has recorded
-// for a channel.
-type ChannelInfo struct {
- // Lifetime is the total amount of time we have monitored the channel
- // for.
- Lifetime time.Duration
-
- // Uptime is the total amount of time that the channel peer has been
- // observed as online during the monitored lifespan.
- Uptime time.Duration
-}
-
-// GetChanInfo gets all the information we have on a channel in the event store.
-func (c *ChannelEventStore) GetChanInfo(channelPoint wire.OutPoint,
- peer route.Vertex) (*ChannelInfo, er.R) {
-
- request := channelInfoRequest{
- peer: peer,
- channelPoint: channelPoint,
- responseChan: make(chan channelInfoResponse),
- }
-
- // Send a request for the channel's information to the main event loop,
- // or return early with an error if the store has already received a
- // shutdown signal.
- select {
- case c.chanInfoRequests <- request:
- case <-c.quit:
- return nil, errShuttingDown.Default()
- }
-
- // Return the response we receive on the response channel or exit early
- // if the store is instructed to exit.
- select {
- case resp := <-request.responseChan:
- return resp.info, resp.err
-
- case <-c.quit:
- return nil, errShuttingDown.Default()
- }
-}
-
-// getChanInfo collects channel information for a channel. It gets uptime over
-// the full lifetime of the channel.
-func (c *ChannelEventStore) getChanInfo(req channelInfoRequest) (*ChannelInfo,
- er.R) {
-
- peerMonitor, ok := c.peers[req.peer]
- if !ok {
- return nil, ErrPeerNotFound.Default()
- }
-
- lifetime, uptime, err := peerMonitor.channelUptime(req.channelPoint)
- if err != nil {
- return nil, err
- }
-
- return &ChannelInfo{
- Lifetime: lifetime,
- Uptime: uptime,
- }, nil
-}
-
-// FlapCount returns the flap count we have for a peer and the timestamp of its
-// last flap. If we do not have any flaps recorded for the peer, the last flap
-// timestamp will be nil.
-func (c *ChannelEventStore) FlapCount(peer route.Vertex) (int, *time.Time,
- er.R) {
-
- request := peerRequest{
- peer: peer,
- responseChan: make(chan peerResponse),
- }
-
- // Send a request for the peer's information to the main event loop,
- // or return early with an error if the store has already received a
- // shutdown signal.
- select {
- case c.peerRequests <- request:
- case <-c.quit:
- return 0, nil, errShuttingDown.Default()
- }
-
- // Return the response we receive on the response channel or exit early
- // if the store is instructed to exit.
- select {
- case resp := <-request.responseChan:
- return resp.flapCount, resp.ts, resp.err
-
- case <-c.quit:
- return 0, nil, errShuttingDown.Default()
- }
-}
-
-// flapCount gets our peer flap count and last flap timestamp from our in memory
-// record of a peer, falling back to on disk if we are not currently tracking
-// the peer. If we have no flap count recorded for the peer, a nil last flap
-// time will be returned.
-func (c *ChannelEventStore) flapCount(peer route.Vertex) (int, *time.Time,
- er.R) {
-
- // First check whether we are tracking this peer in memory, because this
- // record will have the most accurate flap count. We do not fail if we
- // can't find the peer in memory, because we may have previously
- // recorded its flap count on disk.
- peerMonitor, ok := c.peers[peer]
- if ok {
- count, ts := peerMonitor.getFlapCount()
- return count, ts, nil
- }
-
- // Try to get our flap count from the database. If this value is not
- // recorded, we return a nil last flap time to indicate that we have no
- // record of the peer's flap count.
- flapCount, err := c.cfg.ReadFlapCount(peer)
- switch {
- case channeldb.ErrNoPeerBucket.Is(err):
- return 0, nil, nil
-
- case nil == err:
- return int(flapCount.Count), &flapCount.LastFlap, nil
-
- default:
- return 0, nil, err
- }
-}
-
-// recordFlapCount will record our flap count for each peer that we are
-// currently tracking, skipping peers that have a 0 flap count.
-func (c *ChannelEventStore) recordFlapCount() er.R {
- updates := make(peerFlapCountMap)
-
- for peer, monitor := range c.peers {
- flapCount, lastFlap := monitor.getFlapCount()
- if lastFlap == nil {
- continue
- }
-
- updates[peer] = &channeldb.FlapCount{
- Count: uint32(flapCount),
- LastFlap: *lastFlap,
- }
- }
-
- log.Debugf("recording flap count for: %v peers", len(updates))
-
- return c.cfg.WriteFlapCount(updates)
-}
diff --git a/lnd/chanfitness/chaneventstore_test.go b/lnd/chanfitness/chaneventstore_test.go
deleted file mode 100644
index 320cdd01..00000000
--- a/lnd/chanfitness/chaneventstore_test.go
+++ /dev/null
@@ -1,344 +0,0 @@
-package chanfitness
-
-import (
- "math/big"
- "testing"
- "time"
-
- "github.com/pkt-cash/pktd/btcec"
- "github.com/pkt-cash/pktd/btcutil/er"
- "github.com/pkt-cash/pktd/btcutil/util"
- "github.com/pkt-cash/pktd/lnd/channeldb"
- "github.com/pkt-cash/pktd/lnd/clock"
- "github.com/pkt-cash/pktd/lnd/routing/route"
- "github.com/pkt-cash/pktd/lnd/subscribe"
- "github.com/pkt-cash/pktd/wire"
- "github.com/stretchr/testify/require"
-)
-
-// testNow is the current time tests will use.
-var testNow = time.Unix(1592465134, 0)
-
-// TestStartStoreError tests the starting of the store in cases where the setup
-// functions fail. It does not test the mechanics of consuming events because
-// these are covered in a separate set of tests.
-func TestStartStoreError(t *testing.T) {
- // Ok and erroring subscribe functions are defined here to de-clutter
- // tests.
- okSubscribeFunc := func() (subscribe.Subscription, er.R) {
- return newMockSubscription(t), nil
- }
-
- errSubscribeFunc := func() (subscribe.Subscription, er.R) {
- return nil, er.New("intentional test err")
- }
-
- tests := []struct {
- name string
- ChannelEvents func() (subscribe.Subscription, er.R)
- PeerEvents func() (subscribe.Subscription, er.R)
- GetChannels func() ([]*channeldb.OpenChannel, er.R)
- }{
- {
- name: "Channel events fail",
- ChannelEvents: errSubscribeFunc,
- },
- {
- name: "Peer events fail",
- ChannelEvents: okSubscribeFunc,
- PeerEvents: errSubscribeFunc,
- },
- {
- name: "Get open channels fails",
- ChannelEvents: okSubscribeFunc,
- PeerEvents: okSubscribeFunc,
- GetChannels: func() ([]*channeldb.OpenChannel, er.R) {
- return nil, er.New("intentional test err")
- },
- },
- }
-
- for _, test := range tests {
- test := test
-
- t.Run(test.name, func(t *testing.T) {
- clock := clock.NewTestClock(testNow)
-
- store := NewChannelEventStore(&Config{
- SubscribeChannelEvents: test.ChannelEvents,
- SubscribePeerEvents: test.PeerEvents,
- GetOpenChannels: test.GetChannels,
- Clock: clock,
- })
-
- err := store.Start()
- // Check that we receive an error, because the test only
- // checks for error cases.
- if err == nil {
- t.Fatalf("Expected error on startup, got: nil")
- }
- })
- }
-}
-
-// TestMonitorChannelEvents tests the store's handling of channel and peer
-// events. It tests for the unexpected cases where we receive a channel open for
-// an already known channel and but does not test for closing an unknown channel
-// because it would require custom logic in the test to prevent iterating
-// through an eventLog which does not exist. This test does not test handling
-// of uptime and lifespan requests, as they are tested in their own tests.
-func TestMonitorChannelEvents(t *testing.T) {
- var (
- pubKey = &btcec.PublicKey{
- X: big.NewInt(0),
- Y: big.NewInt(1),
- Curve: btcec.S256(),
- }
-
- chan1 = wire.OutPoint{Index: 1}
- chan2 = wire.OutPoint{Index: 2}
- )
-
- peer1, err := route.NewVertexFromBytes(pubKey.SerializeCompressed())
- util.RequireNoErr(t, err)
-
- t.Run("peer comes online after channel open", func(t *testing.T) {
- gen := func(ctx *chanEventStoreTestCtx) {
- ctx.sendChannelOpenedUpdate(pubKey, chan1)
- ctx.peerEvent(peer1, true)
- }
-
- testEventStore(t, gen, peer1, 1)
- })
-
- t.Run("duplicate channel open events", func(t *testing.T) {
- gen := func(ctx *chanEventStoreTestCtx) {
- ctx.sendChannelOpenedUpdate(pubKey, chan1)
- ctx.sendChannelOpenedUpdate(pubKey, chan1)
- ctx.peerEvent(peer1, true)
- }
-
- testEventStore(t, gen, peer1, 1)
- })
-
- t.Run("peer online before channel created", func(t *testing.T) {
- gen := func(ctx *chanEventStoreTestCtx) {
- ctx.peerEvent(peer1, true)
- ctx.sendChannelOpenedUpdate(pubKey, chan1)
- }
-
- testEventStore(t, gen, peer1, 1)
- })
-
- t.Run("multiple channels for peer", func(t *testing.T) {
- gen := func(ctx *chanEventStoreTestCtx) {
- ctx.peerEvent(peer1, true)
- ctx.sendChannelOpenedUpdate(pubKey, chan1)
-
- ctx.peerEvent(peer1, false)
- ctx.sendChannelOpenedUpdate(pubKey, chan2)
- }
-
- testEventStore(t, gen, peer1, 2)
- })
-
- t.Run("multiple channels for peer, one closed", func(t *testing.T) {
- gen := func(ctx *chanEventStoreTestCtx) {
- ctx.peerEvent(peer1, true)
- ctx.sendChannelOpenedUpdate(pubKey, chan1)
-
- ctx.peerEvent(peer1, false)
- ctx.sendChannelOpenedUpdate(pubKey, chan2)
-
- ctx.closeChannel(chan1, pubKey)
- ctx.peerEvent(peer1, true)
- }
-
- testEventStore(t, gen, peer1, 1)
- })
-
-}
-
-// testEventStore creates a new test contexts, generates a set of events for it
-// and tests that it has the number of channels we expect.
-func testEventStore(t *testing.T, generateEvents func(*chanEventStoreTestCtx),
- peer route.Vertex, expectedChannels int) {
-
- testCtx := newChanEventStoreTestCtx(t)
- testCtx.start()
-
- generateEvents(testCtx)
-
- // Shutdown the store so that we can safely access the maps in our event
- // store.
- testCtx.stop()
-
- // Get our peer and check that it has the channels we expect.
- monitor, ok := testCtx.store.peers[peer]
- require.True(t, ok)
-
- require.Equal(t, expectedChannels, monitor.channelCount())
-}
-
-// TestStoreFlapCount tests flushing of flap counts to disk on timer ticks and
-// on store shutdown.
-func TestStoreFlapCount(t *testing.T) {
- testCtx := newChanEventStoreTestCtx(t)
- testCtx.start()
-
- pubkey, _, _ := testCtx.createChannel()
- testCtx.peerEvent(pubkey, false)
-
- // Now, we tick our flap count ticker. We expect our main goroutine to
- // flush our tick count to disk.
- testCtx.tickFlapCount()
-
- // Since we just tracked a offline event, we expect a single flap for
- // our peer.
- expectedUpdate := peerFlapCountMap{
- pubkey: {
- Count: 1,
- LastFlap: testCtx.clock.Now(),
- },
- }
-
- testCtx.assertFlapCountUpdated()
- testCtx.assertFlapCountUpdates(expectedUpdate)
-
- // Create three events for out peer, online/offline/online.
- testCtx.peerEvent(pubkey, true)
- testCtx.peerEvent(pubkey, false)
- testCtx.peerEvent(pubkey, true)
-
- // Trigger another write.
- testCtx.tickFlapCount()
-
- // Since we have processed 3 more events for our peer, we update our
- // expected online map to have a flap count of 4 for this peer.
- expectedUpdate[pubkey] = &channeldb.FlapCount{
- Count: 4,
- LastFlap: testCtx.clock.Now(),
- }
- testCtx.assertFlapCountUpdated()
- testCtx.assertFlapCountUpdates(expectedUpdate)
-
- testCtx.stop()
-}
-
-// TestGetChanInfo tests the GetChanInfo function for the cases where a channel
-// is known and unknown to the store.
-func TestGetChanInfo(t *testing.T) {
- ctx := newChanEventStoreTestCtx(t)
- ctx.start()
-
- // Make a note of the time that our mocked clock starts on.
- now := ctx.clock.Now()
-
- // Create mock vars for a channel but do not add them to our store yet.
- peer, pk, channel := ctx.newChannel()
-
- // Send an online event for our peer, although we do not yet have an
- // open channel.
- ctx.peerEvent(peer, true)
-
- // Try to get info for a channel that has not been opened yet, we
- // expect to get an error.
- _, err := ctx.store.GetChanInfo(channel, peer)
- require.True(t, ErrChannelNotFound.Is(err))
-
- // Now we send our store a notification that a channel has been opened.
- ctx.sendChannelOpenedUpdate(pk, channel)
-
- // Wait for our channel to be recognized by our store. We need to wait
- // for the channel to be created so that we do not update our time
- // before the channel open is processed.
- require.Eventually(t, func() bool {
- _, err = ctx.store.GetChanInfo(channel, peer)
- return err == nil
- }, timeout, time.Millisecond*20)
-
- // Increment our test clock by an hour.
- now = now.Add(time.Hour)
- ctx.clock.SetTime(now)
-
- // At this stage our channel has been open and online for an hour.
- info, err := ctx.store.GetChanInfo(channel, peer)
- util.RequireNoErr(t, err)
- require.Equal(t, time.Hour, info.Lifetime)
- require.Equal(t, time.Hour, info.Uptime)
-
- // Now we send a peer offline event for our channel.
- ctx.peerEvent(peer, false)
-
- // Since we have not bumped our mocked time, our uptime calculations
- // should be the same, even though we've just processed an offline
- // event.
- info, err = ctx.store.GetChanInfo(channel, peer)
- util.RequireNoErr(t, err)
- require.Equal(t, time.Hour, info.Lifetime)
- require.Equal(t, time.Hour, info.Uptime)
-
- // Progress our time again. This time, our peer is currently tracked as
- // being offline, so we expect our channel info to reflect that the peer
- // has been offline for this period.
- now = now.Add(time.Hour)
- ctx.clock.SetTime(now)
-
- info, err = ctx.store.GetChanInfo(channel, peer)
- util.RequireNoErr(t, err)
- require.Equal(t, time.Hour*2, info.Lifetime)
- require.Equal(t, time.Hour, info.Uptime)
-
- ctx.stop()
-}
-
-// TestFlapCount tests querying the store for peer flap counts, covering the
-// case where the peer is tracked in memory, and the case where we need to
-// lookup the peer on disk.
-func TestFlapCount(t *testing.T) {
- clock := clock.NewTestClock(testNow)
-
- var (
- peer = route.Vertex{9, 9, 9}
- peerFlapCount = 3
- lastFlap = clock.Now()
- )
-
- // Create a test context with one peer's flap count already recorded,
- // which mocks it already having its flap count stored on disk.
- ctx := newChanEventStoreTestCtx(t)
- ctx.flapUpdates[peer] = &channeldb.FlapCount{
- Count: uint32(peerFlapCount),
- LastFlap: lastFlap,
- }
-
- ctx.start()
-
- // Create test variables for a peer and channel, but do not add it to
- // our store yet.
- peer1 := route.Vertex{1, 2, 3}
-
- // First, query for a peer that we have no record of in memory or on
- // disk and confirm that we indicate that the peer was not found.
- _, ts, err := ctx.store.FlapCount(peer1)
- util.RequireNoErr(t, err)
- require.Nil(t, ts)
-
- // Send an online event for our peer.
- ctx.peerEvent(peer1, true)
-
- // Assert that we now find a record of the peer with flap count = 1.
- count, ts, err := ctx.store.FlapCount(peer1)
- util.RequireNoErr(t, err)
- require.Equal(t, lastFlap, *ts)
- require.Equal(t, 1, count)
-
- // Make a request for our peer that not tracked in memory, but does
- // have its flap count stored on disk.
- count, ts, err = ctx.store.FlapCount(peer)
- util.RequireNoErr(t, err)
- require.Equal(t, lastFlap, *ts)
- require.Equal(t, peerFlapCount, count)
-
- ctx.stop()
-}
diff --git a/lnd/chanfitness/chaneventstore_testctx_test.go b/lnd/chanfitness/chaneventstore_testctx_test.go
deleted file mode 100644
index b58d0b78..00000000
--- a/lnd/chanfitness/chaneventstore_testctx_test.go
+++ /dev/null
@@ -1,308 +0,0 @@
-package chanfitness
-
-import (
- "math/big"
- "testing"
- "time"
-
- "github.com/pkt-cash/pktd/btcec"
- "github.com/pkt-cash/pktd/btcutil/er"
- "github.com/pkt-cash/pktd/btcutil/util"
- "github.com/pkt-cash/pktd/chaincfg/chainhash"
- "github.com/pkt-cash/pktd/lnd/channeldb"
- "github.com/pkt-cash/pktd/lnd/channelnotifier"
- "github.com/pkt-cash/pktd/lnd/clock"
- "github.com/pkt-cash/pktd/lnd/peernotifier"
- "github.com/pkt-cash/pktd/lnd/routing/route"
- "github.com/pkt-cash/pktd/lnd/subscribe"
- "github.com/pkt-cash/pktd/lnd/ticker"
- "github.com/pkt-cash/pktd/wire"
- "github.com/stretchr/testify/require"
-)
-
-// timeout is the amount of time we allow our blocking test calls.
-var timeout = time.Second
-
-// chanEventStoreTestCtx is a helper struct which can be used to test the
-// channel event store.
-type chanEventStoreTestCtx struct {
- t *testing.T
-
- store *ChannelEventStore
-
- channelSubscription *mockSubscription
- peerSubscription *mockSubscription
-
- // testVarIdx is an index which will be used to deterministically add
- // channels and public keys to our test context. We use a single value
- // for a single pubkey + channel combination because its actual value
- // does not matter.
- testVarIdx int
-
- // clock is the clock that our test store will use.
- clock *clock.TestClock
-
- // flapUpdates stores our most recent set of updates flap counts.
- flapUpdates peerFlapCountMap
-
- // flapCountUpdates is a channel which receives new flap counts.
- flapCountUpdates chan peerFlapCountMap
-
- // stopped is closed when our test context is fully shutdown. It is
- // used to prevent calling of functions which can only be called after
- // shutdown.
- stopped chan struct{}
-}
-
-// newChanEventStoreTestCtx creates a test context which can be used to test
-// the event store.
-func newChanEventStoreTestCtx(t *testing.T) *chanEventStoreTestCtx {
- testCtx := &chanEventStoreTestCtx{
- t: t,
- channelSubscription: newMockSubscription(t),
- peerSubscription: newMockSubscription(t),
- clock: clock.NewTestClock(testNow),
- flapUpdates: make(peerFlapCountMap),
- flapCountUpdates: make(chan peerFlapCountMap),
- stopped: make(chan struct{}),
- }
-
- cfg := &Config{
- Clock: testCtx.clock,
- SubscribeChannelEvents: func() (subscribe.Subscription, er.R) {
- return testCtx.channelSubscription, nil
- },
- SubscribePeerEvents: func() (subscribe.Subscription, er.R) {
- return testCtx.peerSubscription, nil
- },
- GetOpenChannels: func() ([]*channeldb.OpenChannel, er.R) {
- return nil, nil
- },
- WriteFlapCount: func(updates map[route.Vertex]*channeldb.FlapCount) er.R {
- // Send our whole update map into the test context's
- // updates channel. The test will need to assert flap
- // count updated or this send will timeout.
- select {
- case testCtx.flapCountUpdates <- updates:
-
- case <-time.After(timeout):
- t.Fatalf("WriteFlapCount timeout")
- }
-
- return nil
- },
- ReadFlapCount: func(peer route.Vertex) (*channeldb.FlapCount, er.R) {
- count, ok := testCtx.flapUpdates[peer]
- if !ok {
- return nil, channeldb.ErrNoPeerBucket.Default()
- }
-
- return count, nil
- },
- FlapCountTicker: ticker.NewForce(FlapCountFlushRate),
- }
-
- testCtx.store = NewChannelEventStore(cfg)
-
- return testCtx
-}
-
-// start starts the test context's event store.
-func (c *chanEventStoreTestCtx) start() {
- util.RequireNoErr(c.t, c.store.Start())
-}
-
-// stop stops the channel event store's subscribe servers and the store itself.
-func (c *chanEventStoreTestCtx) stop() {
- // On shutdown of our event store, we write flap counts to disk. In our
- // test context, this write function is blocked on asserting that the
- // update has occurred. We stop our store in a goroutine so that we
- // can shut it down and assert that it performs these on-shutdown
- // updates. The stopped channel is used to ensure that we do not finish
- // our test before this shutdown has completed.
- go func() {
- c.store.Stop()
- close(c.stopped)
- }()
-
- // We write our flap count to disk on shutdown, assert that the most
- // recent record that the server has is written on shutdown. Calling
- // this assert unblocks the stop function above. We don't check values
- // here, so that our tests don't all require providing an expected swap
- // count, but at least assert that the write occurred.
- c.assertFlapCountUpdated()
-
- <-c.stopped
-
- // Make sure that the cancel function was called for both of our
- // subscription mocks.
- c.channelSubscription.assertCancelled()
- c.peerSubscription.assertCancelled()
-}
-
-// newChannel creates a new, unique test channel. Note that this function
-// does not add it to the test event store, it just creates mocked values.
-func (c *chanEventStoreTestCtx) newChannel() (route.Vertex, *btcec.PublicKey,
- wire.OutPoint) {
-
- // Create a pubkey for our channel peer.
- pubKey := &btcec.PublicKey{
- X: big.NewInt(int64(c.testVarIdx)),
- Y: big.NewInt(int64(c.testVarIdx)),
- Curve: btcec.S256(),
- }
-
- // Create vertex from our pubkey.
- vertex, err := route.NewVertexFromBytes(pubKey.SerializeCompressed())
- util.RequireNoErr(c.t, err)
-
- // Create a channel point using our channel index, then increment it.
- chanPoint := wire.OutPoint{
- Hash: [chainhash.HashSize]byte{1, 2, 3},
- Index: uint32(c.testVarIdx),
- }
-
- // Increment the index we use so that the next channel and pubkey we
- // create will be unique.
- c.testVarIdx++
-
- return vertex, pubKey, chanPoint
-}
-
-// createChannel creates a new channel, notifies the event store that it has
-// been created and returns the peer vertex, pubkey and channel point.
-func (c *chanEventStoreTestCtx) createChannel() (route.Vertex, *btcec.PublicKey,
- wire.OutPoint) {
-
- vertex, pubKey, chanPoint := c.newChannel()
- c.sendChannelOpenedUpdate(pubKey, chanPoint)
-
- return vertex, pubKey, chanPoint
-}
-
-// closeChannel sends a close channel event to our subscribe server.
-func (c *chanEventStoreTestCtx) closeChannel(channel wire.OutPoint,
- peer *btcec.PublicKey) {
-
- update := channelnotifier.ClosedChannelEvent{
- CloseSummary: &channeldb.ChannelCloseSummary{
- ChanPoint: channel,
- RemotePub: peer,
- },
- }
-
- c.channelSubscription.sendUpdate(update)
-}
-
-// tickFlapCount forces a tick for our flap count ticker with the current time.
-func (c *chanEventStoreTestCtx) tickFlapCount() {
- testTicker := c.store.cfg.FlapCountTicker.(*ticker.Force)
-
- select {
- case testTicker.Force <- c.store.cfg.Clock.Now():
-
- case <-time.After(timeout):
- c.t.Fatalf("could not tick flap count ticker")
- }
-}
-
-// peerEvent sends a peer online or offline event to the store for the peer
-// provided.
-func (c *chanEventStoreTestCtx) peerEvent(peer route.Vertex, online bool) {
- var update interface{}
- if online {
- update = peernotifier.PeerOnlineEvent{PubKey: peer}
- } else {
- update = peernotifier.PeerOfflineEvent{PubKey: peer}
- }
-
- c.peerSubscription.sendUpdate(update)
-}
-
-// sendChannelOpenedUpdate notifies the test event store that a channel has
-// been opened.
-func (c *chanEventStoreTestCtx) sendChannelOpenedUpdate(pubkey *btcec.PublicKey,
- channel wire.OutPoint) {
-
- update := channelnotifier.OpenChannelEvent{
- Channel: &channeldb.OpenChannel{
- FundingOutpoint: channel,
- IdentityPub: pubkey,
- },
- }
-
- c.channelSubscription.sendUpdate(update)
-}
-
-// assertFlapCountUpdated asserts that our store has made an attempt to write
-// our current set of flap counts to disk and sets this value in our test ctx.
-// Note that it does not check the values of the update.
-func (c *chanEventStoreTestCtx) assertFlapCountUpdated() {
- select {
- case c.flapUpdates = <-c.flapCountUpdates:
-
- case <-time.After(timeout):
- c.t.Fatalf("assertFlapCountUpdated timeout")
- }
-}
-
-// assertFlapCountUpdates asserts that out current record of flap counts is
-// as expected.
-func (c *chanEventStoreTestCtx) assertFlapCountUpdates(expected peerFlapCountMap) {
- require.Equal(c.t, expected, c.flapUpdates)
-}
-
-// mockSubscription is a mock subscription client that blocks on sends into the
-// updates channel. We use this mock rather than an actual subscribe client
-// because they do not block, which makes tests race (because we have no way
-// to guarantee that the test client consumes the update before shutdown).
-type mockSubscription struct {
- t *testing.T
- updates chan interface{}
-
- // Embed the subscription interface in this mock so that we satisfy it.
- subscribe.Subscription
-}
-
-// newMockSubscription creates a mock subscription.
-func newMockSubscription(t *testing.T) *mockSubscription {
- return &mockSubscription{
- t: t,
- updates: make(chan interface{}),
- }
-}
-
-// sendUpdate sends an update into our updates channel, mocking the dispatch of
-// an update from a subscription server. This call will fail the test if the
-// update is not consumed within our timeout.
-func (m *mockSubscription) sendUpdate(update interface{}) {
- select {
- case m.updates <- update:
-
- case <-time.After(timeout):
- m.t.Fatalf("update: %v timeout", update)
- }
-}
-
-// Updates returns the updates channel for the mock.
-func (m *mockSubscription) Updates() <-chan interface{} {
- return m.updates
-}
-
-// Cancel should be called in case the client no longer wants to subscribe for
-// updates from the server.
-func (m *mockSubscription) Cancel() {
- close(m.updates)
-}
-
-// assertCancelled asserts that the cancel function has been called for this
-// mock.
-func (m *mockSubscription) assertCancelled() {
- select {
- case _, open := <-m.updates:
- require.False(m.t, open, "subscription not cancelled")
-
- case <-time.After(timeout):
- m.t.Fatalf("assert cancelled timeout")
- }
-}
diff --git a/lnd/chanfitness/interface.go b/lnd/chanfitness/interface.go
deleted file mode 100644
index 307fe6ee..00000000
--- a/lnd/chanfitness/interface.go
+++ /dev/null
@@ -1,35 +0,0 @@
-package chanfitness
-
-import (
- "time"
-
- "github.com/pkt-cash/pktd/btcutil/er"
- "github.com/pkt-cash/pktd/wire"
-)
-
-// peerMonitor is an interface implemented by entities that monitor our peers
-// online events and the channels we currently have open with them.
-type peerMonitor interface {
- // event adds an online or offline event.
- onlineEvent(online bool)
-
- // addChannel adds a new channel.
- addChannel(channelPoint wire.OutPoint) er.R
-
- // removeChannel removes a channel.
- removeChannel(channelPoint wire.OutPoint) er.R
-
- // channelCount returns the number of channels that we currently have
- // with the peer.
- channelCount() int
-
- // channelUptime looks up a channel and returns the amount of time that
- // the channel has been monitored for and its uptime over this period.
- channelUptime(channelPoint wire.OutPoint) (time.Duration,
- time.Duration, er.R)
-
- // getFlapCount returns the peer's flap count and the timestamp that we
- // last recorded a flap, which may be nil if we have never recorded a
- // flap for this peer.
- getFlapCount() (int, *time.Time)
-}
diff --git a/lnd/chanfitness/rate_limit.go b/lnd/chanfitness/rate_limit.go
deleted file mode 100644
index b070a445..00000000
--- a/lnd/chanfitness/rate_limit.go
+++ /dev/null
@@ -1,82 +0,0 @@
-package chanfitness
-
-import (
- "math"
- "time"
-)
-
-const (
- // rateLimitScale is the number of events we allow per rate limited
- // tier. Increasing this value makes our rate limiting more lenient,
- // decreasing it makes us less lenient.
- rateLimitScale = 200
-
- // flapCountCooldownFactor is the factor by which we decrease a peer's
- // flap count if they have not flapped for the cooldown period.
- flapCountCooldownFactor = 0.95
-
- // flapCountCooldownPeriod is the amount of time that we require a peer
- // has not flapped for before we reduce their all time flap count using
- // our cooldown factor.
- flapCountCooldownPeriod = time.Hour * 8
-)
-
-// rateLimits is the set of rate limit tiers we apply to our peers based on
-// their flap count. A peer can be placed in their tier by dividing their flap
-// count by the rateLimitScale and returning the value at that index.
-var rateLimits = []time.Duration{
- time.Second,
- time.Second * 5,
- time.Second * 30,
- time.Minute,
- time.Minute * 30,
- time.Hour,
-}
-
-// getRateLimit returns the value of the rate limited tier that we are on based
-// on current flap count. If a peer's flap count exceeds the top tier, we just
-// return our highest tier.
-func getRateLimit(flapCount int) time.Duration {
- // Figure out the tier we fall into based on our current flap count.
- tier := flapCount / rateLimitScale
-
- // If we have more events than our number of tiers, we just use the
- // last tier
- tierLen := len(rateLimits)
- if tier >= tierLen {
- tier = tierLen - 1
- }
-
- return rateLimits[tier]
-}
-
-// cooldownFlapCount takes a timestamped flap count, and returns its value
-// scaled down by our cooldown factor if at least our cooldown period has
-// elapsed since the peer last flapped. We do this because we store all-time
-// flap count for peers, and want to allow downgrading of peers that have not
-// flapped for a long time.
-func cooldownFlapCount(now time.Time, flapCount int,
- lastFlap time.Time) int {
-
- // Calculate time since our last flap, and the number of times we need
- // to apply our cooldown factor.
- timeSinceFlap := now.Sub(lastFlap)
-
- // If our cooldown period has not elapsed yet, we just return our flap
- // count. We allow fractional cooldown periods once this period has
- // elapsed, so we do not want to apply a fractional cooldown before the
- // full cooldown period has elapsed.
- if timeSinceFlap < flapCountCooldownPeriod {
- return flapCount
- }
-
- // Get the factor by which we need to cooldown our flap count. If
- // insufficient time has passed to cooldown our flap count. Use use a
- // float so that we allow fractional cooldown periods.
- cooldownPeriods := float64(timeSinceFlap) /
- float64(flapCountCooldownPeriod)
-
- effectiveFactor := math.Pow(flapCountCooldownFactor, cooldownPeriods)
-
- return int(float64(flapCount) * effectiveFactor)
-}
diff --git a/lnd/chanfitness/rate_limit_test.go b/lnd/chanfitness/rate_limit_test.go
deleted file mode 100644
index b9bca808..00000000
--- a/lnd/chanfitness/rate_limit_test.go
+++ /dev/null
@@ -1,105 +0,0 @@
-package chanfitness
-
-import (
- "testing"
- "time"
-
- "github.com/stretchr/testify/require"
-)
-
-// TestGetRateLimit tests getting of our rate limit using the current constants.
-// It creates test cases that are relative to our constants so that they
-// can be adjusted without breaking the unit test.
-func TestGetRateLimit(t *testing.T) {
- tests := []struct {
- name string
- flapCount int
- rateLimit time.Duration
- }{
- {
- name: "zero flaps",
- flapCount: 0,
- rateLimit: rateLimits[0],
- },
- {
- name: "middle tier",
- flapCount: rateLimitScale * (len(rateLimits) / 2),
- rateLimit: rateLimits[len(rateLimits)/2],
- },
- {
- name: "last tier",
- flapCount: rateLimitScale * (len(rateLimits) - 1),
- rateLimit: rateLimits[len(rateLimits)-1],
- },
- {
- name: "beyond last tier",
- flapCount: rateLimitScale * (len(rateLimits) * 2),
- rateLimit: rateLimits[len(rateLimits)-1],
- },
- }
-
- for _, test := range tests {
- test := test
-
- t.Run(test.name, func(t *testing.T) {
- t.Parallel()
-
- limit := getRateLimit(test.flapCount)
- require.Equal(t, test.rateLimit, limit)
- })
- }
-}
-
-// TestCooldownFlapCount tests cooldown of all time flap counts.
-func TestCooldownFlapCount(t *testing.T) {
- tests := []struct {
- name string
- flapCount int
- lastFlap time.Time
- expected int
- }{
- {
- name: "just flapped, do not cooldown",
- flapCount: 1,
- lastFlap: testNow,
- expected: 1,
- },
- {
- name: "period not elapsed, do not cooldown",
- flapCount: 1,
- lastFlap: testNow.Add(flapCountCooldownPeriod / 2 * -1),
- expected: 1,
- },
- {
- name: "rounded to 0",
- flapCount: 1,
- lastFlap: testNow.Add(flapCountCooldownPeriod * -1),
- expected: 0,
- },
- {
- name: "decreased to integer value",
- flapCount: 10,
- lastFlap: testNow.Add(flapCountCooldownPeriod * -1),
- expected: 9,
- },
- {
- name: "multiple cooldown periods",
- flapCount: 10,
- lastFlap: testNow.Add(flapCountCooldownPeriod * -3),
- expected: 8,
- },
- }
-
- for _, test := range tests {
- test := test
-
- t.Run(test.name, func(t *testing.T) {
- t.Parallel()
-
- flapCount := cooldownFlapCount(
- testNow, test.flapCount, test.lastFlap,
- )
- require.Equal(t, test.expected, flapCount)
- })
- }
-}
diff --git a/lnd/channel_notifier.go b/lnd/channel_notifier.go
deleted file mode 100644
index 11d1ff02..00000000
--- a/lnd/channel_notifier.go
+++ /dev/null
@@ -1,158 +0,0 @@
-package lnd
-
-import (
- "fmt"
- "net"
-
- "github.com/pkt-cash/pktd/btcec"
- "github.com/pkt-cash/pktd/btcutil/er"
- "github.com/pkt-cash/pktd/lnd/chanbackup"
- "github.com/pkt-cash/pktd/lnd/channeldb"
- "github.com/pkt-cash/pktd/lnd/channelnotifier"
- "github.com/pkt-cash/pktd/pktlog/log"
- "github.com/pkt-cash/pktd/wire"
-)
-
-// addrSource is an interface that allow us to get the addresses for a target
-// node. We'll need this in order to be able to properly proxy the
-// notifications to create SCBs.
-type addrSource interface {
- // AddrsForNode returns all known addresses for the target node public
- // key.
- AddrsForNode(nodePub *btcec.PublicKey) ([]net.Addr, er.R)
-}
-
-// channelNotifier is an implementation of the chanbackup.ChannelNotifier
-// interface using the existing channelnotifier.ChannelNotifier struct. This
-// implementation allows us to satisfy all the dependencies of the
-// chanbackup.SubSwapper struct.
-type channelNotifier struct {
- // chanNotifier is the based channel notifier that we'll proxy requests
- // from.
- chanNotifier *channelnotifier.ChannelNotifier
-
- // addrs is an implementation of the addrSource interface that allows
- // us to get the latest set of addresses for a given node. We'll need
- // this to be able to create an SCB for new channels.
- addrs addrSource
-}
-
-// SubscribeChans requests a new channel subscription relative to the initial
-// set of known channels. We use the knownChans as a synchronization point to
-// ensure that the chanbackup.SubSwapper does not miss any channel open or
-// close events in the period between when it's created, and when it requests
-// the channel subscription.
-//
-// NOTE: This is part of the chanbackup.ChannelNotifier interface.
-func (c *channelNotifier) SubscribeChans(startingChans map[wire.OutPoint]struct{}) (
- *chanbackup.ChannelSubscription, er.R) {
-
- log.Infof("Channel backup proxy channel notifier starting")
-
- // TODO(roasbeef): read existing set of chans and diff
-
- quit := make(chan struct{})
- chanUpdates := make(chan chanbackup.ChannelEvent, 1)
-
- // sendChanOpenUpdate is a closure that sends a ChannelEvent to the
- // chanUpdates channel to inform subscribers about new pending or
- // confirmed channels.
- sendChanOpenUpdate := func(newOrPendingChan *channeldb.OpenChannel) {
- nodeAddrs, err := c.addrs.AddrsForNode(
- newOrPendingChan.IdentityPub,
- )
- if err != nil {
- pub := newOrPendingChan.IdentityPub
- log.Errorf("unable to fetch addrs for %x: %v",
- pub.SerializeCompressed(), err)
- }
-
- chanEvent := chanbackup.ChannelEvent{
- NewChans: []chanbackup.ChannelWithAddrs{
- {
- OpenChannel: newOrPendingChan,
- Addrs: nodeAddrs,
- },
- },
- }
-
- select {
- case chanUpdates <- chanEvent:
- case <-quit:
- return
- }
- }
-
- // In order to adhere to the interface, we'll proxy the events from the
- // channel notifier to the sub-swapper in a format it understands.
- go func() {
- // First, we'll subscribe to the primary channel notifier so we can
- // obtain events for new opened/closed channels.
- chanSubscription, err := c.chanNotifier.SubscribeChannelEvents()
- if err != nil {
- panic(fmt.Sprintf("unable to subscribe to chans: %v",
- err))
- }
-
- defer chanSubscription.Cancel()
-
- for {
- select {
-
- // A new event has been sent by the chanNotifier, we'll
- // filter out the events we actually care about and
- // send them to the sub-swapper.
- case e := <-chanSubscription.Updates():
- // TODO(roasbeef): batch dispatch ntnfs
-
- switch event := e.(type) {
- // A new channel has been opened and is still
- // pending. We can still create a backup, even
- // if the final channel ID is not yet available.
- case channelnotifier.PendingOpenChannelEvent:
- pendingChan := event.PendingChannel
- sendChanOpenUpdate(pendingChan)
-
- // A new channel has been confirmed, we'll
- // obtain the node address, then send to the
- // sub-swapper.
- case channelnotifier.OpenChannelEvent:
- sendChanOpenUpdate(event.Channel)
-
- // An existing channel has been closed, we'll
- // send only the chanPoint of the closed
- // channel to the sub-swapper.
- case channelnotifier.ClosedChannelEvent:
- chanPoint := event.CloseSummary.ChanPoint
- chanEvent := chanbackup.ChannelEvent{
- ClosedChans: []wire.OutPoint{
- chanPoint,
- },
- }
-
- select {
- case chanUpdates <- chanEvent:
- case <-quit:
- return
- }
- }
-
- // The cancel method has been called, signalling us to
- // exit
- case <-quit:
- return
- }
- }
- }()
-
- return &chanbackup.ChannelSubscription{
- ChanUpdates: chanUpdates,
- Cancel: func() {
- close(quit)
- },
- }, nil
-}
-
-// A compile-time constraint to ensure channelNotifier implements
-// chanbackup.ChannelNotifier.
-var _ chanbackup.ChannelNotifier = (*channelNotifier)(nil)
diff --git a/lnd/channeldb/README.md b/lnd/channeldb/README.md
deleted file mode 100644
index 7e3a81ef..00000000
--- a/lnd/channeldb/README.md
+++ /dev/null
@@ -1,24 +0,0 @@
-channeldb
-==========
-
-[![Build Status](http://img.shields.io/travis/lightningnetwork/lnd.svg)](https://travis-ci.org/lightningnetwork/lnd)
-[![MIT licensed](https://img.shields.io/badge/license-MIT-blue.svg)](https://github.com/lightningnetwork/lnd/blob/master/LICENSE)
-[![GoDoc](https://img.shields.io/badge/godoc-reference-blue.svg)](http://godoc.org/github.com/lightningnetwork/lnd/channeldb)
-
-The channeldb implements the persistent storage engine for `lnd` and
-generically a data storage layer for the required state within the Lightning
-Network. The backing storage engine is
-[boltdb](https://github.com/coreos/bbolt), an embedded pure-go key-value store
-based off of LMDB.
-
-The package implements an object-oriented storage model with queries and
-mutations flowing through a particular object instance rather than the database
-itself. The storage implemented by the objects includes: open channels, past
-commitment revocation states, the channel graph which includes authenticated
-node and channel announcements, outgoing payments, and invoices
-
-## Installation and Updating
-
-```bash
-$ go get -u github.com/lightningnetwork/lnd/channeldb
-```
diff --git a/lnd/channeldb/addr.go b/lnd/channeldb/addr.go
deleted file mode 100644
index 843ee963..00000000
--- a/lnd/channeldb/addr.go
+++ /dev/null
@@ -1,221 +0,0 @@
-package channeldb
-
-import (
- "encoding/binary"
- "io"
- "net"
-
- "github.com/pkt-cash/pktd/btcutil/er"
- "github.com/pkt-cash/pktd/btcutil/util"
- "github.com/pkt-cash/pktd/lnd/tor"
-)
-
-// addressType specifies the network protocol and version that should be used
-// when connecting to a node at a particular address.
-type addressType uint8
-
-const (
- // tcp4Addr denotes an IPv4 TCP address.
- tcp4Addr addressType = 0
-
- // tcp6Addr denotes an IPv6 TCP address.
- tcp6Addr addressType = 1
-
- // v2OnionAddr denotes a version 2 Tor onion service address.
- v2OnionAddr addressType = 2
-
- // v3OnionAddr denotes a version 3 Tor (prop224) onion service address.
- v3OnionAddr addressType = 3
-)
-
-// encodeTCPAddr serializes a TCP address into its compact raw bytes
-// representation.
-func encodeTCPAddr(w io.Writer, addr *net.TCPAddr) er.R {
- var (
- addrType byte
- ip []byte
- )
-
- if addr.IP.To4() != nil {
- addrType = byte(tcp4Addr)
- ip = addr.IP.To4()
- } else {
- addrType = byte(tcp6Addr)
- ip = addr.IP.To16()
- }
-
- if ip == nil {
- return er.Errorf("unable to encode IP %v", addr.IP)
- }
-
- if _, err := util.Write(w, []byte{addrType}); err != nil {
- return err
- }
-
- if _, err := util.Write(w, ip); err != nil {
- return err
- }
-
- var port [2]byte
- byteOrder.PutUint16(port[:], uint16(addr.Port))
- if _, err := util.Write(w, port[:]); err != nil {
- return err
- }
-
- return nil
-}
-
-// encodeOnionAddr serializes an onion address into its compact raw bytes
-// representation.
-func encodeOnionAddr(w io.Writer, addr *tor.OnionAddr) er.R {
- var suffixIndex int
- hostLen := len(addr.OnionService)
- switch hostLen {
- case tor.V2Len:
- if _, err := util.Write(w, []byte{byte(v2OnionAddr)}); err != nil {
- return err
- }
- suffixIndex = tor.V2Len - tor.OnionSuffixLen
- case tor.V3Len:
- if _, err := util.Write(w, []byte{byte(v3OnionAddr)}); err != nil {
- return err
- }
- suffixIndex = tor.V3Len - tor.OnionSuffixLen
- default:
- return er.New("unknown onion service length")
- }
-
- suffix := addr.OnionService[suffixIndex:]
- if suffix != tor.OnionSuffix {
- return er.Errorf("invalid suffix \"%v\"", suffix)
- }
-
- host, errr := tor.Base32Encoding.DecodeString(
- addr.OnionService[:suffixIndex],
- )
- if errr != nil {
- return er.E(errr)
- }
-
- // Sanity check the decoded length.
- switch {
- case hostLen == tor.V2Len && len(host) != tor.V2DecodedLen:
- return er.Errorf("onion service %v decoded to invalid host %x",
- addr.OnionService, host)
-
- case hostLen == tor.V3Len && len(host) != tor.V3DecodedLen:
- return er.Errorf("onion service %v decoded to invalid host %x",
- addr.OnionService, host)
- }
-
- if _, err := util.Write(w, host); err != nil {
- return err
- }
-
- var port [2]byte
- byteOrder.PutUint16(port[:], uint16(addr.Port))
- if _, err := util.Write(w, port[:]); err != nil {
- return err
- }
-
- return nil
-}
-
-// deserializeAddr reads the serialized raw representation of an address and
-// deserializes it into the actual address. This allows us to avoid address
-// resolution within the channeldb package.
-func deserializeAddr(r io.Reader) (net.Addr, er.R) {
- var addrType [1]byte
- if _, err := r.Read(addrType[:]); err != nil {
- return nil, er.E(err)
- }
-
- var address net.Addr
- switch addressType(addrType[0]) {
- case tcp4Addr:
- var ip [4]byte
- if _, err := r.Read(ip[:]); err != nil {
- return nil, er.E(err)
- }
-
- var port [2]byte
- if _, err := r.Read(port[:]); err != nil {
- return nil, er.E(err)
- }
-
- address = &net.TCPAddr{
- IP: net.IP(ip[:]),
- Port: int(binary.BigEndian.Uint16(port[:])),
- }
- case tcp6Addr:
- var ip [16]byte
- if _, err := r.Read(ip[:]); err != nil {
- return nil, er.E(err)
- }
-
- var port [2]byte
- if _, err := r.Read(port[:]); err != nil {
- return nil, er.E(err)
- }
-
- address = &net.TCPAddr{
- IP: net.IP(ip[:]),
- Port: int(binary.BigEndian.Uint16(port[:])),
- }
- case v2OnionAddr:
- var h [tor.V2DecodedLen]byte
- if _, err := r.Read(h[:]); err != nil {
- return nil, er.E(err)
- }
-
- var p [2]byte
- if _, err := r.Read(p[:]); err != nil {
- return nil, er.E(err)
- }
-
- onionService := tor.Base32Encoding.EncodeToString(h[:])
- onionService += tor.OnionSuffix
- port := int(binary.BigEndian.Uint16(p[:]))
-
- address = &tor.OnionAddr{
- OnionService: onionService,
- Port: port,
- }
- case v3OnionAddr:
- var h [tor.V3DecodedLen]byte
- if _, err := r.Read(h[:]); err != nil {
- return nil, er.E(err)
- }
-
- var p [2]byte
- if _, err := r.Read(p[:]); err != nil {
- return nil, er.E(err)
- }
-
- onionService := tor.Base32Encoding.EncodeToString(h[:])
- onionService += tor.OnionSuffix
- port := int(binary.BigEndian.Uint16(p[:]))
-
- address = &tor.OnionAddr{
- OnionService: onionService,
- Port: port,
- }
- default:
- return nil, ErrUnknownAddressType.Default()
- }
-
- return address, nil
-}
-
-// serializeAddr serializes an address into its raw bytes representation so that
-// it can be deserialized without requiring address resolution.
-func serializeAddr(w io.Writer, address net.Addr) er.R {
- switch addr := address.(type) {
- case *net.TCPAddr:
- return encodeTCPAddr(w, addr)
- case *tor.OnionAddr:
- return encodeOnionAddr(w, addr)
- default:
- return ErrUnknownAddressType.Default()
- }
-}
diff --git a/lnd/channeldb/addr_test.go b/lnd/channeldb/addr_test.go
deleted file mode 100644
index 460e2837..00000000
--- a/lnd/channeldb/addr_test.go
+++ /dev/null
@@ -1,149 +0,0 @@
-package channeldb
-
-import (
- "bytes"
- "net"
- "strings"
- "testing"
-
- "github.com/pkt-cash/pktd/lnd/tor"
-)
-
-type unknownAddrType struct{}
-
-func (t unknownAddrType) Network() string { return "unknown" }
-func (t unknownAddrType) String() string { return "unknown" }
-
-var testIP4 = net.ParseIP("192.168.1.1")
-var testIP6 = net.ParseIP("2001:0db8:0000:0000:0000:ff00:0042:8329")
-
-var addrTests = []struct {
- expAddr net.Addr
- serErr string
-}{
- // Valid addresses.
- {
- expAddr: &net.TCPAddr{
- IP: testIP4,
- Port: 12345,
- },
- },
- {
- expAddr: &net.TCPAddr{
- IP: testIP6,
- Port: 65535,
- },
- },
- {
- expAddr: &tor.OnionAddr{
- OnionService: "3g2upl4pq6kufc4m.onion",
- Port: 9735,
- },
- },
- {
- expAddr: &tor.OnionAddr{
- OnionService: "vww6ybal4bd7szmgncyruucpgfkqahzddi37ktceo3ah7ngmcopnpyyd.onion",
- Port: 80,
- },
- },
-
- // Invalid addresses.
- {
- expAddr: unknownAddrType{},
- serErr: "ErrUnknownAddressType",
- },
- {
- expAddr: &net.TCPAddr{
- // Remove last byte of IPv4 address.
- IP: testIP4[:len(testIP4)-1],
- Port: 12345,
- },
- serErr: "unable to encode",
- },
- {
- expAddr: &net.TCPAddr{
- // Add an extra byte of IPv4 address.
- IP: append(testIP4, 0xff),
- Port: 12345,
- },
- serErr: "unable to encode",
- },
- {
- expAddr: &net.TCPAddr{
- // Remove last byte of IPv6 address.
- IP: testIP6[:len(testIP6)-1],
- Port: 65535,
- },
- serErr: "unable to encode",
- },
- {
- expAddr: &net.TCPAddr{
- // Add an extra byte to the IPv6 address.
- IP: append(testIP6, 0xff),
- Port: 65535,
- },
- serErr: "unable to encode",
- },
- {
- expAddr: &tor.OnionAddr{
- // Invalid suffix.
- OnionService: "vww6ybal4bd7szmgncyruucpgfkqahzddi37ktceo3ah7ngmcopnpyyd.inion",
- Port: 80,
- },
- serErr: "invalid suffix",
- },
- {
- expAddr: &tor.OnionAddr{
- // Invalid length.
- OnionService: "vww6ybal4bd7szmgncyruucpgfkqahzddi37ktceo3ah7ngmcopnpyy.onion",
- Port: 80,
- },
- serErr: "unknown onion service length",
- },
- {
- expAddr: &tor.OnionAddr{
- // Invalid encoding.
- OnionService: "vww6ybal4bd7szmgncyruucpgfkqahzddi37ktceo3ah7ngmcopnpyyA.onion",
- Port: 80,
- },
- serErr: "illegal base32",
- },
-}
-
-// TestAddrSerialization tests that the serialization method used by channeldb
-// for net.Addr's works as intended.
-func TestAddrSerialization(t *testing.T) {
- t.Parallel()
-
- var b bytes.Buffer
- for _, test := range addrTests {
- err := serializeAddr(&b, test.expAddr)
- switch {
- case err == nil && test.serErr != "":
- t.Fatalf("expected serialization err for addr %v",
- test.expAddr)
-
- case err != nil && test.serErr == "":
- t.Fatalf("unexpected serialization err for addr %v: %v",
- test.expAddr, err)
-
- case err != nil && !strings.Contains(err.String(), test.serErr):
- t.Fatalf("unexpected serialization err for addr %v, "+
- "want: %v, got %v", test.expAddr, test.serErr,
- err)
-
- case err != nil:
- continue
- }
-
- addr, err := deserializeAddr(&b)
- if err != nil {
- t.Fatalf("unable to deserialize address: %v", err)
- }
-
- if addr.String() != test.expAddr.String() {
- t.Fatalf("expected address %v after serialization, "+
- "got %v", addr, test.expAddr)
- }
- }
-}
diff --git a/lnd/channeldb/channel.go b/lnd/channeldb/channel.go
deleted file mode 100644
index aa7f4f3a..00000000
--- a/lnd/channeldb/channel.go
+++ /dev/null
@@ -1,3506 +0,0 @@
-package channeldb
-
-import (
- "bytes"
- "crypto/sha256"
- "encoding/binary"
- "fmt"
- "io"
- "net"
- "strconv"
- "strings"
- "sync"
-
- "github.com/pkt-cash/pktd/btcec"
- "github.com/pkt-cash/pktd/btcutil"
- "github.com/pkt-cash/pktd/btcutil/er"
- "github.com/pkt-cash/pktd/btcutil/util"
- "github.com/pkt-cash/pktd/chaincfg/chainhash"
- "github.com/pkt-cash/pktd/lnd/channeldb/kvdb"
- "github.com/pkt-cash/pktd/lnd/input"
- "github.com/pkt-cash/pktd/lnd/keychain"
- "github.com/pkt-cash/pktd/lnd/lnwire"
- "github.com/pkt-cash/pktd/lnd/shachain"
- "github.com/pkt-cash/pktd/wire"
- "github.com/pkt-cash/pktd/wire/protocol"
-)
-
-const (
- // AbsoluteThawHeightThreshold is the threshold at which a thaw height
- // begins to be interpreted as an absolute block height, rather than a
- // relative one.
- AbsoluteThawHeightThreshold uint32 = 500000
-)
-
-var (
- // closedChannelBucket stores summarization information concerning
- // previously open, but now closed channels.
- closedChannelBucket = []byte("closed-chan-bucket")
-
- // openChanBucket stores all the currently open channels. This bucket
- // has a second, nested bucket which is keyed by a node's ID. Within
- // that node ID bucket, all attributes required to track, update, and
- // close a channel are stored.
- //
- // openChan -> nodeID -> chanPoint
- //
- // TODO(roasbeef): flesh out comment
- openChannelBucket = []byte("open-chan-bucket")
-
- // historicalChannelBucket stores all channels that have seen their
- // commitment tx confirm. All information from their previous open state
- // is retained.
- historicalChannelBucket = []byte("historical-chan-bucket")
-
- // chanInfoKey can be accessed within the bucket for a channel
- // (identified by its chanPoint). This key stores all the static
- // information for a channel which is decided at the end of the
- // funding flow.
- chanInfoKey = []byte("chan-info-key")
-
- // localUpfrontShutdownKey can be accessed within the bucket for a channel
- // (identified by its chanPoint). This key stores an optional upfront
- // shutdown script for the local peer.
- localUpfrontShutdownKey = []byte("local-upfront-shutdown-key")
-
- // remoteUpfrontShutdownKey can be accessed within the bucket for a channel
- // (identified by its chanPoint). This key stores an optional upfront
- // shutdown script for the remote peer.
- remoteUpfrontShutdownKey = []byte("remote-upfront-shutdown-key")
-
- // chanCommitmentKey can be accessed within the sub-bucket for a
- // particular channel. This key stores the up to date commitment state
- // for a particular channel party. Appending a 0 to the end of this key
- // indicates it's the commitment for the local party, and appending a 1
- // to the end of this key indicates it's the commitment for the remote
- // party.
- chanCommitmentKey = []byte("chan-commitment-key")
-
- // unsignedAckedUpdatesKey is an entry in the channel bucket that
- // contains the remote updates that we have acked, but not yet signed
- // for in one of our remote commits.
- unsignedAckedUpdatesKey = []byte("unsigned-acked-updates-key")
-
- // remoteUnsignedLocalUpdatesKey is an entry in the channel bucket that
- // contains the local updates that the remote party has acked, but
- // has not yet signed for in one of their local commits.
- remoteUnsignedLocalUpdatesKey = []byte("remote-unsigned-local-updates-key")
-
- // revocationStateKey stores their current revocation hash, our
- // preimage producer and their preimage store.
- revocationStateKey = []byte("revocation-state-key")
-
- // dataLossCommitPointKey stores the commitment point received from the
- // remote peer during a channel sync in case we have lost channel state.
- dataLossCommitPointKey = []byte("data-loss-commit-point-key")
-
- // forceCloseTxKey points to a the unilateral closing tx that we
- // broadcasted when moving the channel to state CommitBroadcasted.
- forceCloseTxKey = []byte("closing-tx-key")
-
- // coopCloseTxKey points to a the cooperative closing tx that we
- // broadcasted when moving the channel to state CoopBroadcasted.
- coopCloseTxKey = []byte("coop-closing-tx-key")
-
- // commitDiffKey stores the current pending commitment state we've
- // extended to the remote party (if any). Each time we propose a new
- // state, we store the information necessary to reconstruct this state
- // from the prior commitment. This allows us to resync the remote party
- // to their expected state in the case of message loss.
- //
- // TODO(roasbeef): rename to commit chain?
- commitDiffKey = []byte("commit-diff-key")
-
- // revocationLogBucket is dedicated for storing the necessary delta
- // state between channel updates required to re-construct a past state
- // in order to punish a counterparty attempting a non-cooperative
- // channel closure. This key should be accessed from within the
- // sub-bucket of a target channel, identified by its channel point.
- revocationLogBucket = []byte("revocation-log-key")
-
- // frozenChanKey is the key where we store the information for any
- // active "frozen" channels. This key is present only in the leaf
- // bucket for a given channel.
- frozenChanKey = []byte("frozen-chans")
-)
-
-var (
- // ErrNoCommitmentsFound is returned when a channel has not set
- // commitment states.
- ErrNoCommitmentsFound = Err.CodeWithDetail("ErrNoCommitmentsFound",
- "no commitments found")
-
- // ErrNoChanInfoFound is returned when a particular channel does not
- // have any channels state.
- ErrNoChanInfoFound = Err.CodeWithDetail("ErrNoChanInfoFound",
- "no chan info found")
-
- // ErrNoRevocationsFound is returned when revocation state for a
- // particular channel cannot be found.
- ErrNoRevocationsFound = Err.CodeWithDetail("ErrNoRevocationsFound",
- "no revocations found")
-
- // ErrNoPendingCommit is returned when there is not a pending
- // commitment for a remote party. A new commitment is written to disk
- // each time we write a new state in order to be properly fault
- // tolerant.
- ErrNoPendingCommit = Err.CodeWithDetail("ErrNoPendingCommit",
- "no pending commits found")
-
- // ErrInvalidCircuitKeyLen signals that a circuit key could not be
- // decoded because the byte slice is of an invalid length.
- ErrInvalidCircuitKeyLen = Err.CodeWithDetail("ErrInvalidCircuitKeyLen",
- "length of serialized circuit key must be 16 bytes")
-
- // ErrNoCommitPoint is returned when no data loss commit point is found
- // in the database.
- ErrNoCommitPoint = Err.CodeWithDetail("ErrNoCommitPoint",
- "no commit point found")
-
- // ErrNoCloseTx is returned when no closing tx is found for a channel
- // in the state CommitBroadcasted.
- ErrNoCloseTx = Err.CodeWithDetail("ErrNoCloseTx",
- "no closing tx found")
-
- // ErrNoRestoredChannelMutation is returned when a caller attempts to
- // mutate a channel that's been recovered.
- ErrNoRestoredChannelMutation = Err.CodeWithDetail("ErrNoRestoredChannelMutation",
- "cannot mutate restored channel state")
-
- // ErrChanBorked is returned when a caller attempts to mutate a borked
- // channel.
- ErrChanBorked = Err.CodeWithDetail("ErrChanBorked",
- "cannot mutate borked channel")
-
- // errLogEntryNotFound is returned when we cannot find a log entry at
- // the height requested in the revocation log.
- errLogEntryNotFound = Err.CodeWithDetail("errLogEntryNotFound",
- "log entry not found")
-
- // errHeightNotFound is returned when a query for channel balances at
- // a height that we have not reached yet is made.
- errHeightNotReached = Err.CodeWithDetail("errHeightNotReached",
- "height requested greater than current commit height")
-)
-
-// ChannelType is an enum-like type that describes one of several possible
-// channel types. Each open channel is associated with a particular type as the
-// channel type may determine how higher level operations are conducted such as
-// fee negotiation, channel closing, the format of HTLCs, etc. Structure-wise,
-// a ChannelType is a bit field, with each bit denoting a modification from the
-// base channel type of single funder.
-type ChannelType uint8
-
-const (
- // NOTE: iota isn't used here for this enum needs to be stable
- // long-term as it will be persisted to the database.
-
- // SingleFunderBit represents a channel wherein one party solely funds
- // the entire capacity of the channel.
- SingleFunderBit ChannelType = 0
-
- // DualFunderBit represents a channel wherein both parties contribute
- // funds towards the total capacity of the channel. The channel may be
- // funded symmetrically or asymmetrically.
- DualFunderBit ChannelType = 1 << 0
-
- // SingleFunderTweakless is similar to the basic SingleFunder channel
- // type, but it omits the tweak for one's key in the commitment
- // transaction of the remote party.
- SingleFunderTweaklessBit ChannelType = 1 << 1
-
- // NoFundingTxBit denotes if we have the funding transaction locally on
- // disk. This bit may be on if the funding transaction was crafted by a
- // wallet external to the primary daemon.
- NoFundingTxBit ChannelType = 1 << 2
-
- // AnchorOutputsBit indicates that the channel makes use of anchor
- // outputs to bump the commitment transaction's effective feerate. This
- // channel type also uses a delayed to_remote output script.
- AnchorOutputsBit ChannelType = 1 << 3
-
- // FrozenBit indicates that the channel is a frozen channel, meaning
- // that only the responder can decide to cooperatively close the
- // channel.
- FrozenBit ChannelType = 1 << 4
-)
-
-// IsSingleFunder returns true if the channel type if one of the known single
-// funder variants.
-func (c ChannelType) IsSingleFunder() bool {
- return c&DualFunderBit == 0
-}
-
-// IsDualFunder returns true if the ChannelType has the DualFunderBit set.
-func (c ChannelType) IsDualFunder() bool {
- return c&DualFunderBit == DualFunderBit
-}
-
-// IsTweakless returns true if the target channel uses a commitment that
-// doesn't tweak the key for the remote party.
-func (c ChannelType) IsTweakless() bool {
- return c&SingleFunderTweaklessBit == SingleFunderTweaklessBit
-}
-
-// HasFundingTx returns true if this channel type is one that has a funding
-// transaction stored locally.
-func (c ChannelType) HasFundingTx() bool {
- return c&NoFundingTxBit == 0
-}
-
-// HasAnchors returns true if this channel type has anchor ouputs on its
-// commitment.
-func (c ChannelType) HasAnchors() bool {
- return c&AnchorOutputsBit == AnchorOutputsBit
-}
-
-// IsFrozen returns true if the channel is considered to be "frozen". A frozen
-// channel means that only the responder can initiate a cooperative channel
-// closure.
-func (c ChannelType) IsFrozen() bool {
- return c&FrozenBit == FrozenBit
-}
-
-// ChannelConstraints represents a set of constraints meant to allow a node to
-// limit their exposure, enact flow control and ensure that all HTLCs are
-// economically relevant. This struct will be mirrored for both sides of the
-// channel, as each side will enforce various constraints that MUST be adhered
-// to for the life time of the channel. The parameters for each of these
-// constraints are static for the duration of the channel, meaning the channel
-// must be torn down for them to change.
-type ChannelConstraints struct {
- // DustLimit is the threshold (in satoshis) below which any outputs
- // should be trimmed. When an output is trimmed, it isn't materialized
- // as an actual output, but is instead burned to miner's fees.
- DustLimit btcutil.Amount
-
- // ChanReserve is an absolute reservation on the channel for the
- // owner of this set of constraints. This means that the current
- // settled balance for this node CANNOT dip below the reservation
- // amount. This acts as a defense against costless attacks when
- // either side no longer has any skin in the game.
- ChanReserve btcutil.Amount
-
- // MaxPendingAmount is the maximum pending HTLC value that the
- // owner of these constraints can offer the remote node at a
- // particular time.
- MaxPendingAmount lnwire.MilliSatoshi
-
- // MinHTLC is the minimum HTLC value that the owner of these
- // constraints can offer the remote node. If any HTLCs below this
- // amount are offered, then the HTLC will be rejected. This, in
- // tandem with the dust limit allows a node to regulate the
- // smallest HTLC that it deems economically relevant.
- MinHTLC lnwire.MilliSatoshi
-
- // MaxAcceptedHtlcs is the maximum number of HTLCs that the owner of
- // this set of constraints can offer the remote node. This allows each
- // node to limit their over all exposure to HTLCs that may need to be
- // acted upon in the case of a unilateral channel closure or a contract
- // breach.
- MaxAcceptedHtlcs uint16
-
- // CsvDelay is the relative time lock delay expressed in blocks. Any
- // settled outputs that pay to the owner of this channel configuration
- // MUST ensure that the delay branch uses this value as the relative
- // time lock. Similarly, any HTLC's offered by this node should use
- // this value as well.
- CsvDelay uint16
-}
-
-// ChannelConfig is a struct that houses the various configuration opens for
-// channels. Each side maintains an instance of this configuration file as it
-// governs: how the funding and commitment transaction to be created, the
-// nature of HTLC's allotted, the keys to be used for delivery, and relative
-// time lock parameters.
-type ChannelConfig struct {
- // ChannelConstraints is the set of constraints that must be upheld for
- // the duration of the channel for the owner of this channel
- // configuration. Constraints govern a number of flow control related
- // parameters, also including the smallest HTLC that will be accepted
- // by a participant.
- ChannelConstraints
-
- // MultiSigKey is the key to be used within the 2-of-2 output script
- // for the owner of this channel config.
- MultiSigKey keychain.KeyDescriptor
-
- // RevocationBasePoint is the base public key to be used when deriving
- // revocation keys for the remote node's commitment transaction. This
- // will be combined along with a per commitment secret to derive a
- // unique revocation key for each state.
- RevocationBasePoint keychain.KeyDescriptor
-
- // PaymentBasePoint is the base public key to be used when deriving
- // the key used within the non-delayed pay-to-self output on the
- // commitment transaction for a node. This will be combined with a
- // tweak derived from the per-commitment point to ensure unique keys
- // for each commitment transaction.
- PaymentBasePoint keychain.KeyDescriptor
-
- // DelayBasePoint is the base public key to be used when deriving the
- // key used within the delayed pay-to-self output on the commitment
- // transaction for a node. This will be combined with a tweak derived
- // from the per-commitment point to ensure unique keys for each
- // commitment transaction.
- DelayBasePoint keychain.KeyDescriptor
-
- // HtlcBasePoint is the base public key to be used when deriving the
- // local HTLC key. The derived key (combined with the tweak derived
- // from the per-commitment point) is used within the "to self" clause
- // within any HTLC output scripts.
- HtlcBasePoint keychain.KeyDescriptor
-}
-
-// ChannelCommitment is a snapshot of the commitment state at a particular
-// point in the commitment chain. With each state transition, a snapshot of the
-// current state along with all non-settled HTLCs are recorded. These snapshots
-// detail the state of the _remote_ party's commitment at a particular state
-// number. For ourselves (the local node) we ONLY store our most recent
-// (unrevoked) state for safety purposes.
-type ChannelCommitment struct {
- // CommitHeight is the update number that this ChannelDelta represents
- // the total number of commitment updates to this point. This can be
- // viewed as sort of a "commitment height" as this number is
- // monotonically increasing.
- CommitHeight uint64
-
- // LocalLogIndex is the cumulative log index index of the local node at
- // this point in the commitment chain. This value will be incremented
- // for each _update_ added to the local update log.
- LocalLogIndex uint64
-
- // LocalHtlcIndex is the current local running HTLC index. This value
- // will be incremented for each outgoing HTLC the local node offers.
- LocalHtlcIndex uint64
-
- // RemoteLogIndex is the cumulative log index index of the remote node
- // at this point in the commitment chain. This value will be
- // incremented for each _update_ added to the remote update log.
- RemoteLogIndex uint64
-
- // RemoteHtlcIndex is the current remote running HTLC index. This value
- // will be incremented for each outgoing HTLC the remote node offers.
- RemoteHtlcIndex uint64
-
- // LocalBalance is the current available settled balance within the
- // channel directly spendable by us.
- //
- // NOTE: This is the balance *after* subtracting any commitment fee,
- // AND anchor output values.
- LocalBalance lnwire.MilliSatoshi
-
- // RemoteBalance is the current available settled balance within the
- // channel directly spendable by the remote node.
- //
- // NOTE: This is the balance *after* subtracting any commitment fee,
- // AND anchor output values.
- RemoteBalance lnwire.MilliSatoshi
-
- // CommitFee is the amount calculated to be paid in fees for the
- // current set of commitment transactions. The fee amount is persisted
- // with the channel in order to allow the fee amount to be removed and
- // recalculated with each channel state update, including updates that
- // happen after a system restart.
- CommitFee btcutil.Amount
-
- // FeePerKw is the min satoshis/kilo-weight that should be paid within
- // the commitment transaction for the entire duration of the channel's
- // lifetime. This field may be updated during normal operation of the
- // channel as on-chain conditions change.
- //
- // TODO(halseth): make this SatPerKWeight. Cannot be done atm because
- // this will cause the import cycle lnwallet<->channeldb. Fee
- // estimation stuff should be in its own package.
- FeePerKw btcutil.Amount
-
- // CommitTx is the latest version of the commitment state, broadcast
- // able by us.
- CommitTx *wire.MsgTx
-
- // CommitSig is one half of the signature required to fully complete
- // the script for the commitment transaction above. This is the
- // signature signed by the remote party for our version of the
- // commitment transactions.
- CommitSig []byte
-
- // Htlcs is the set of HTLC's that are pending at this particular
- // commitment height.
- Htlcs []HTLC
-
- // TODO(roasbeef): pending commit pointer?
- // * lets just walk through
-}
-
-// ChannelStatus is a bit vector used to indicate whether an OpenChannel is in
-// the default usable state, or a state where it shouldn't be used.
-type ChannelStatus uint8
-
-var (
- // ChanStatusDefault is the normal state of an open channel.
- ChanStatusDefault ChannelStatus
-
- // ChanStatusBorked indicates that the channel has entered an
- // irreconcilable state, triggered by a state desynchronization or
- // channel breach. Channels in this state should never be added to the
- // htlc switch.
- ChanStatusBorked ChannelStatus = 1
-
- // ChanStatusCommitBroadcasted indicates that a commitment for this
- // channel has been broadcasted.
- ChanStatusCommitBroadcasted ChannelStatus = 1 << 1
-
- // ChanStatusLocalDataLoss indicates that we have lost channel state
- // for this channel, and broadcasting our latest commitment might be
- // considered a breach.
- //
- // TODO(halseh): actually enforce that we are not force closing such a
- // channel.
- ChanStatusLocalDataLoss ChannelStatus = 1 << 2
-
- // ChanStatusRestored is a status flag that signals that the channel
- // has been restored, and doesn't have all the fields a typical channel
- // will have.
- ChanStatusRestored ChannelStatus = 1 << 3
-
- // ChanStatusCoopBroadcasted indicates that a cooperative close for
- // this channel has been broadcasted. Older cooperatively closed
- // channels will only have this status set. Newer ones will also have
- // close initiator information stored using the local/remote initiator
- // status. This status is set in conjunction with the initiator status
- // so that we do not need to check multiple channel statues for
- // cooperative closes.
- ChanStatusCoopBroadcasted ChannelStatus = 1 << 4
-
- // ChanStatusLocalCloseInitiator indicates that we initiated closing
- // the channel.
- ChanStatusLocalCloseInitiator ChannelStatus = 1 << 5
-
- // ChanStatusRemoteCloseInitiator indicates that the remote node
- // initiated closing the channel.
- ChanStatusRemoteCloseInitiator ChannelStatus = 1 << 6
-)
-
-// chanStatusStrings maps a ChannelStatus to a human friendly string that
-// describes that status.
-var chanStatusStrings = map[ChannelStatus]string{
- ChanStatusDefault: "ChanStatusDefault",
- ChanStatusBorked: "ChanStatusBorked",
- ChanStatusCommitBroadcasted: "ChanStatusCommitBroadcasted",
- ChanStatusLocalDataLoss: "ChanStatusLocalDataLoss",
- ChanStatusRestored: "ChanStatusRestored",
- ChanStatusCoopBroadcasted: "ChanStatusCoopBroadcasted",
- ChanStatusLocalCloseInitiator: "ChanStatusLocalCloseInitiator",
- ChanStatusRemoteCloseInitiator: "ChanStatusRemoteCloseInitiator",
-}
-
-// orderedChanStatusFlags is an in-order list of all that channel status flags.
-var orderedChanStatusFlags = []ChannelStatus{
- ChanStatusBorked,
- ChanStatusCommitBroadcasted,
- ChanStatusLocalDataLoss,
- ChanStatusRestored,
- ChanStatusCoopBroadcasted,
- ChanStatusLocalCloseInitiator,
- ChanStatusRemoteCloseInitiator,
-}
-
-// String returns a human-readable representation of the ChannelStatus.
-func (c ChannelStatus) String() string {
- // If no flags are set, then this is the default case.
- if c == ChanStatusDefault {
- return chanStatusStrings[ChanStatusDefault]
- }
-
- // Add individual bit flags.
- statusStr := ""
- for _, flag := range orderedChanStatusFlags {
- if c&flag == flag {
- statusStr += chanStatusStrings[flag] + "|"
- c -= flag
- }
- }
-
- // Remove anything to the right of the final bar, including it as well.
- statusStr = strings.TrimRight(statusStr, "|")
-
- // Add any remaining flags which aren't accounted for as hex.
- if c != 0 {
- statusStr += "|0x" + strconv.FormatUint(uint64(c), 16)
- }
-
- // If this was purely an unknown flag, then remove the extra bar at the
- // start of the string.
- statusStr = strings.TrimLeft(statusStr, "|")
-
- return statusStr
-}
-
-// OpenChannel encapsulates the persistent and dynamic state of an open channel
-// with a remote node. An open channel supports several options for on-disk
-// serialization depending on the exact context. Full (upon channel creation)
-// state commitments, and partial (due to a commitment update) writes are
-// supported. Each partial write due to a state update appends the new update
-// to an on-disk log, which can then subsequently be queried in order to
-// "time-travel" to a prior state.
-type OpenChannel struct {
- // ChanType denotes which type of channel this is.
- ChanType ChannelType
-
- // ChainHash is a hash which represents the blockchain that this
- // channel will be opened within. This value is typically the genesis
- // hash. In the case that the original chain went through a contentious
- // hard-fork, then this value will be tweaked using the unique fork
- // point on each branch.
- ChainHash chainhash.Hash
-
- // FundingOutpoint is the outpoint of the final funding transaction.
- // This value uniquely and globally identifies the channel within the
- // target blockchain as specified by the chain hash parameter.
- FundingOutpoint wire.OutPoint
-
- // ShortChannelID encodes the exact location in the chain in which the
- // channel was initially confirmed. This includes: the block height,
- // transaction index, and the output within the target transaction.
- ShortChannelID lnwire.ShortChannelID
-
- // IsPending indicates whether a channel's funding transaction has been
- // confirmed.
- IsPending bool
-
- // IsInitiator is a bool which indicates if we were the original
- // initiator for the channel. This value may affect how higher levels
- // negotiate fees, or close the channel.
- IsInitiator bool
-
- // chanStatus is the current status of this channel. If it is not in
- // the state Default, it should not be used for forwarding payments.
- chanStatus ChannelStatus
-
- // FundingBroadcastHeight is the height in which the funding
- // transaction was broadcast. This value can be used by higher level
- // sub-systems to determine if a channel is stale and/or should have
- // been confirmed before a certain height.
- FundingBroadcastHeight uint32
-
- // NumConfsRequired is the number of confirmations a channel's funding
- // transaction must have received in order to be considered available
- // for normal transactional use.
- NumConfsRequired uint16
-
- // ChannelFlags holds the flags that were sent as part of the
- // open_channel message.
- ChannelFlags lnwire.FundingFlag
-
- // IdentityPub is the identity public key of the remote node this
- // channel has been established with.
- IdentityPub *btcec.PublicKey
-
- // Capacity is the total capacity of this channel.
- Capacity btcutil.Amount
-
- // TotalMSatSent is the total number of milli-satoshis we've sent
- // within this channel.
- TotalMSatSent lnwire.MilliSatoshi
-
- // TotalMSatReceived is the total number of milli-satoshis we've
- // received within this channel.
- TotalMSatReceived lnwire.MilliSatoshi
-
- // LocalChanCfg is the channel configuration for the local node.
- LocalChanCfg ChannelConfig
-
- // RemoteChanCfg is the channel configuration for the remote node.
- RemoteChanCfg ChannelConfig
-
- // LocalCommitment is the current local commitment state for the local
- // party. This is stored distinct from the state of the remote party
- // as there are certain asymmetric parameters which affect the
- // structure of each commitment.
- LocalCommitment ChannelCommitment
-
- // RemoteCommitment is the current remote commitment state for the
- // remote party. This is stored distinct from the state of the local
- // party as there are certain asymmetric parameters which affect the
- // structure of each commitment.
- RemoteCommitment ChannelCommitment
-
- // RemoteCurrentRevocation is the current revocation for their
- // commitment transaction. However, since this the derived public key,
- // we don't yet have the private key so we aren't yet able to verify
- // that it's actually in the hash chain.
- RemoteCurrentRevocation *btcec.PublicKey
-
- // RemoteNextRevocation is the revocation key to be used for the *next*
- // commitment transaction we create for the local node. Within the
- // specification, this value is referred to as the
- // per-commitment-point.
- RemoteNextRevocation *btcec.PublicKey
-
- // RevocationProducer is used to generate the revocation in such a way
- // that remote side might store it efficiently and have the ability to
- // restore the revocation by index if needed. Current implementation of
- // secret producer is shachain producer.
- RevocationProducer shachain.Producer
-
- // RevocationStore is used to efficiently store the revocations for
- // previous channels states sent to us by remote side. Current
- // implementation of secret store is shachain store.
- RevocationStore shachain.Store
-
- // Packager is used to create and update forwarding packages for this
- // channel, which encodes all necessary information to recover from
- // failures and reforward HTLCs that were not fully processed.
- Packager FwdPackager
-
- // FundingTxn is the transaction containing this channel's funding
- // outpoint. Upon restarts, this txn will be rebroadcast if the channel
- // is found to be pending.
- //
- // NOTE: This value will only be populated for single-funder channels
- // for which we are the initiator, and that we also have the funding
- // transaction for. One can check this by using the HasFundingTx()
- // method on the ChanType field.
- FundingTxn *wire.MsgTx
-
- // LocalShutdownScript is set to a pre-set script if the channel was opened
- // by the local node with option_upfront_shutdown_script set. If the option
- // was not set, the field is empty.
- LocalShutdownScript lnwire.DeliveryAddress
-
- // RemoteShutdownScript is set to a pre-set script if the channel was opened
- // by the remote node with option_upfront_shutdown_script set. If the option
- // was not set, the field is empty.
- RemoteShutdownScript lnwire.DeliveryAddress
-
- // ThawHeight is the height when a frozen channel once again becomes a
- // normal channel. If this is zero, then there're no restrictions on
- // this channel. If the value is lower than 500,000, then it's
- // interpreted as a relative height, or an absolute height otherwise.
- ThawHeight uint32
-
- // TODO(roasbeef): eww
- Db *DB
-
- // TODO(roasbeef): just need to store local and remote HTLC's?
-
- sync.RWMutex
-}
-
-// ShortChanID returns the current ShortChannelID of this channel.
-func (c *OpenChannel) ShortChanID() lnwire.ShortChannelID {
- c.RLock()
- defer c.RUnlock()
-
- return c.ShortChannelID
-}
-
-// ChanStatus returns the current ChannelStatus of this channel.
-func (c *OpenChannel) ChanStatus() ChannelStatus {
- c.RLock()
- defer c.RUnlock()
-
- return c.chanStatus
-}
-
-// ApplyChanStatus allows the caller to modify the internal channel state in a
-// thead-safe manner.
-func (c *OpenChannel) ApplyChanStatus(status ChannelStatus) er.R {
- c.Lock()
- defer c.Unlock()
-
- return c.putChanStatus(status)
-}
-
-// ClearChanStatus allows the caller to clear a particular channel status from
-// the primary channel status bit field. After this method returns, a call to
-// HasChanStatus(status) should return false.
-func (c *OpenChannel) ClearChanStatus(status ChannelStatus) er.R {
- c.Lock()
- defer c.Unlock()
-
- return c.clearChanStatus(status)
-}
-
-// HasChanStatus returns true if the internal bitfield channel status of the
-// target channel has the specified status bit set.
-func (c *OpenChannel) HasChanStatus(status ChannelStatus) bool {
- c.RLock()
- defer c.RUnlock()
-
- return c.hasChanStatus(status)
-}
-
-func (c *OpenChannel) hasChanStatus(status ChannelStatus) bool {
- // Special case ChanStatusDefualt since it isn't actually flag, but a
- // particular combination (or lack-there-of) of flags.
- if status == ChanStatusDefault {
- return c.chanStatus == ChanStatusDefault
- }
-
- return c.chanStatus&status == status
-}
-
-// RefreshShortChanID updates the in-memory channel state using the latest
-// value observed on disk.
-//
-// TODO: the name of this function should be changed to reflect the fact that
-// it is not only refreshing the short channel id but all the channel state.
-// maybe Refresh/Reload?
-func (c *OpenChannel) RefreshShortChanID() er.R {
- c.Lock()
- defer c.Unlock()
-
- err := kvdb.View(c.Db, func(tx kvdb.RTx) er.R {
- chanBucket, err := fetchChanBucket(
- tx, c.IdentityPub, &c.FundingOutpoint, c.ChainHash,
- )
- if err != nil {
- return err
- }
-
- // We'll re-populating the in-memory channel with the info
- // fetched from disk.
- if err := fetchChanInfo(chanBucket, c); err != nil {
- return er.Errorf("unable to fetch chan info: %v", err)
- }
-
- return nil
- }, func() {})
- if err != nil {
- return err
- }
-
- return nil
-}
-
-// fetchChanBucket is a helper function that returns the bucket where a
-// channel's data resides in given: the public key for the node, the outpoint,
-// and the chainhash that the channel resides on.
-func fetchChanBucket(tx kvdb.RTx, nodeKey *btcec.PublicKey,
- outPoint *wire.OutPoint, chainHash chainhash.Hash) (kvdb.RBucket, er.R) {
-
- // First fetch the top level bucket which stores all data related to
- // current, active channels.
- openChanBucket := tx.ReadBucket(openChannelBucket)
- if openChanBucket == nil {
- return nil, ErrNoChanDBExists.Default()
- }
-
- // TODO(roasbeef): CreateTopLevelBucket on the interface isn't like
- // CreateIfNotExists, will return error
-
- // Within this top level bucket, fetch the bucket dedicated to storing
- // open channel data specific to the remote node.
- nodePub := nodeKey.SerializeCompressed()
- nodeChanBucket := openChanBucket.NestedReadBucket(nodePub)
- if nodeChanBucket == nil {
- return nil, ErrNoActiveChannels.Default()
- }
-
- // We'll then recurse down an additional layer in order to fetch the
- // bucket for this particular chain.
- chainBucket := nodeChanBucket.NestedReadBucket(chainHash[:])
- if chainBucket == nil {
- return nil, ErrNoActiveChannels.Default()
- }
-
- // With the bucket for the node and chain fetched, we can now go down
- // another level, for this channel itself.
- var chanPointBuf bytes.Buffer
- if err := writeOutpoint(&chanPointBuf, outPoint); err != nil {
- return nil, err
- }
- chanBucket := chainBucket.NestedReadBucket(chanPointBuf.Bytes())
- if chanBucket == nil {
- return nil, ErrChannelNotFound.Default()
- }
-
- return chanBucket, nil
-}
-
-// fetchChanBucketRw is a helper function that returns the bucket where a
-// channel's data resides in given: the public key for the node, the outpoint,
-// and the chainhash that the channel resides on. This differs from
-// fetchChanBucket in that it returns a writeable bucket.
-func fetchChanBucketRw(tx kvdb.RwTx, nodeKey *btcec.PublicKey, // nolint:interfacer
- outPoint *wire.OutPoint, chainHash chainhash.Hash) (kvdb.RwBucket, er.R) {
-
- readBucket, err := fetchChanBucket(tx, nodeKey, outPoint, chainHash)
- if err != nil {
- return nil, err
- }
-
- return readBucket.(kvdb.RwBucket), nil
-}
-
-// fullSync syncs the contents of an OpenChannel while re-using an existing
-// database transaction.
-func (c *OpenChannel) fullSync(tx kvdb.RwTx) er.R {
- // First fetch the top level bucket which stores all data related to
- // current, active channels.
- openChanBucket, err := tx.CreateTopLevelBucket(openChannelBucket)
- if err != nil {
- return err
- }
-
- // Within this top level bucket, fetch the bucket dedicated to storing
- // open channel data specific to the remote node.
- nodePub := c.IdentityPub.SerializeCompressed()
- nodeChanBucket, err := openChanBucket.CreateBucketIfNotExists(nodePub)
- if err != nil {
- return err
- }
-
- // We'll then recurse down an additional layer in order to fetch the
- // bucket for this particular chain.
- chainBucket, err := nodeChanBucket.CreateBucketIfNotExists(c.ChainHash[:])
- if err != nil {
- return err
- }
-
- // With the bucket for the node fetched, we can now go down another
- // level, creating the bucket for this channel itself.
- var chanPointBuf bytes.Buffer
- if err := writeOutpoint(&chanPointBuf, &c.FundingOutpoint); err != nil {
- return err
- }
- chanBucket, err := chainBucket.CreateBucket(
- chanPointBuf.Bytes(),
- )
- switch {
- case kvdb.ErrBucketExists.Is(err):
- // If this channel already exists, then in order to avoid
- // overriding it, we'll return an error back up to the caller.
- return ErrChanAlreadyExists.Default()
- case err != nil:
- return err
- }
-
- return putOpenChannel(chanBucket, c)
-}
-
-// MarkAsOpen marks a channel as fully open given a locator that uniquely
-// describes its location within the chain.
-func (c *OpenChannel) MarkAsOpen(openLoc lnwire.ShortChannelID) er.R {
- c.Lock()
- defer c.Unlock()
-
- if err := kvdb.Update(c.Db, func(tx kvdb.RwTx) er.R {
- chanBucket, err := fetchChanBucket(
- tx, c.IdentityPub, &c.FundingOutpoint, c.ChainHash,
- )
- if err != nil {
- return err
- }
-
- channel, err := fetchOpenChannel(chanBucket, &c.FundingOutpoint)
- if err != nil {
- return err
- }
-
- channel.IsPending = false
- channel.ShortChannelID = openLoc
-
- return putOpenChannel(chanBucket.(kvdb.RwBucket), channel)
- }, func() {}); err != nil {
- return err
- }
-
- c.IsPending = false
- c.ShortChannelID = openLoc
- c.Packager = NewChannelPackager(openLoc)
-
- return nil
-}
-
-// MarkDataLoss marks sets the channel status to LocalDataLoss and stores the
-// passed commitPoint for use to retrieve funds in case the remote force closes
-// the channel.
-func (c *OpenChannel) MarkDataLoss(commitPoint *btcec.PublicKey) er.R {
- c.Lock()
- defer c.Unlock()
-
- var b bytes.Buffer
- if err := WriteElement(&b, commitPoint); err != nil {
- return err
- }
-
- putCommitPoint := func(chanBucket kvdb.RwBucket) er.R {
- return chanBucket.Put(dataLossCommitPointKey, b.Bytes())
- }
-
- return c.putChanStatus(ChanStatusLocalDataLoss, putCommitPoint)
-}
-
-func mapErr(err er.R, code *er.ErrorCode) (er.R, bool) {
- switch {
- case err == nil:
- return nil, false
- case ErrNoChanDBExists.Is(err), ErrNoActiveChannels.Is(err), ErrChannelNotFound.Is(err):
- if code != nil {
- return code.New("", err), true
- }
- return nil, true
- default:
- return err, true
- }
-}
-
-// DataLossCommitPoint retrieves the stored commit point set during
-// MarkDataLoss. If not found ErrNoCommitPoint is returned.
-func (c *OpenChannel) DataLossCommitPoint() (*btcec.PublicKey, er.R) {
- var commitPoint *btcec.PublicKey
-
- err := kvdb.View(c.Db, func(tx kvdb.RTx) er.R {
- chanBucket, err := fetchChanBucket(
- tx, c.IdentityPub, &c.FundingOutpoint, c.ChainHash,
- )
- if err, stop := mapErr(err, ErrNoCommitPoint); stop {
- return err
- }
-
- bs := chanBucket.Get(dataLossCommitPointKey)
- if bs == nil {
- return ErrNoCommitPoint.Default()
- }
- r := bytes.NewReader(bs)
- if err := ReadElements(r, &commitPoint); err != nil {
- return err
- }
-
- return nil
- }, func() {
- commitPoint = nil
- })
- if err != nil {
- return nil, err
- }
-
- return commitPoint, nil
-}
-
-// MarkBorked marks the event when the channel as reached an irreconcilable
-// state, such as a channel breach or state desynchronization. Borked channels
-// should never be added to the switch.
-func (c *OpenChannel) MarkBorked() er.R {
- c.Lock()
- defer c.Unlock()
-
- return c.putChanStatus(ChanStatusBorked)
-}
-
-// ChanSyncMsg returns the ChannelReestablish message that should be sent upon
-// reconnection with the remote peer that we're maintaining this channel with.
-// The information contained within this message is necessary to re-sync our
-// commitment chains in the case of a last or only partially processed message.
-// When the remote party receiver this message one of three things may happen:
-//
-// 1. We're fully synced and no messages need to be sent.
-// 2. We didn't get the last CommitSig message they sent, to they'll re-send
-// it.
-// 3. We didn't get the last RevokeAndAck message they sent, so they'll
-// re-send it.
-//
-// If this is a restored channel, having status ChanStatusRestored, then we'll
-// modify our typical chan sync message to ensure they force close even if
-// we're on the very first state.
-func (c *OpenChannel) ChanSyncMsg() (*lnwire.ChannelReestablish, er.R) {
- c.Lock()
- defer c.Unlock()
-
- // The remote commitment height that we'll send in the
- // ChannelReestablish message is our current commitment height plus
- // one. If the receiver thinks that our commitment height is actually
- // *equal* to this value, then they'll re-send the last commitment that
- // they sent but we never fully processed.
- localHeight := c.LocalCommitment.CommitHeight
- nextLocalCommitHeight := localHeight + 1
-
- // The second value we'll send is the height of the remote commitment
- // from our PoV. If the receiver thinks that their height is actually
- // *one plus* this value, then they'll re-send their last revocation.
- remoteChainTipHeight := c.RemoteCommitment.CommitHeight
-
- // If this channel has undergone a commitment update, then in order to
- // prove to the remote party our knowledge of their prior commitment
- // state, we'll also send over the last commitment secret that the
- // remote party sent.
- var lastCommitSecret [32]byte
- if remoteChainTipHeight != 0 {
- remoteSecret, err := c.RevocationStore.LookUp(
- remoteChainTipHeight - 1,
- )
- if err != nil {
- return nil, err
- }
- lastCommitSecret = [32]byte(*remoteSecret)
- }
-
- // Additionally, we'll send over the current unrevoked commitment on
- // our local commitment transaction.
- currentCommitSecret, err := c.RevocationProducer.AtIndex(
- localHeight,
- )
- if err != nil {
- return nil, err
- }
-
- // If we've restored this channel, then we'll purposefully give them an
- // invalid LocalUnrevokedCommitPoint so they'll force close the channel
- // allowing us to sweep our funds.
- if c.hasChanStatus(ChanStatusRestored) {
- currentCommitSecret[0] ^= 1
-
- // If this is a tweakless channel, then we'll purposefully send
- // a next local height taht's invalid to trigger a force close
- // on their end. We do this as tweakless channels don't require
- // that the commitment point is valid, only that it's present.
- if c.ChanType.IsTweakless() {
- nextLocalCommitHeight = 0
- }
- }
-
- return &lnwire.ChannelReestablish{
- ChanID: lnwire.NewChanIDFromOutPoint(
- &c.FundingOutpoint,
- ),
- NextLocalCommitHeight: nextLocalCommitHeight,
- RemoteCommitTailHeight: remoteChainTipHeight,
- LastRemoteCommitSecret: lastCommitSecret,
- LocalUnrevokedCommitPoint: input.ComputeCommitmentPoint(
- currentCommitSecret[:],
- ),
- }, nil
-}
-
-// isBorked returns true if the channel has been marked as borked in the
-// database. This requires an existing database transaction to already be
-// active.
-//
-// NOTE: The primary mutex should already be held before this method is called.
-func (c *OpenChannel) isBorked(chanBucket kvdb.RBucket) (bool, er.R) {
- channel, err := fetchOpenChannel(chanBucket, &c.FundingOutpoint)
- if err != nil {
- return false, err
- }
-
- return channel.chanStatus != ChanStatusDefault, nil
-}
-
-// MarkCommitmentBroadcasted marks the channel as a commitment transaction has
-// been broadcast, either our own or the remote, and we should watch the chain
-// for it to confirm before taking any further action. It takes as argument the
-// closing tx _we believe_ will appear in the chain. This is only used to
-// republish this tx at startup to ensure propagation, and we should still
-// handle the case where a different tx actually hits the chain.
-func (c *OpenChannel) MarkCommitmentBroadcasted(closeTx *wire.MsgTx,
- locallyInitiated bool) er.R {
-
- return c.markBroadcasted(
- ChanStatusCommitBroadcasted, forceCloseTxKey, closeTx,
- locallyInitiated,
- )
-}
-
-// MarkCoopBroadcasted marks the channel to indicate that a cooperative close
-// transaction has been broadcast, either our own or the remote, and that we
-// should watch the chain for it to confirm before taking further action. It
-// takes as argument a cooperative close tx that could appear on chain, and
-// should be rebroadcast upon startup. This is only used to republish and
-// ensure propagation, and we should still handle the case where a different tx
-// actually hits the chain.
-func (c *OpenChannel) MarkCoopBroadcasted(closeTx *wire.MsgTx,
- locallyInitiated bool) er.R {
-
- return c.markBroadcasted(
- ChanStatusCoopBroadcasted, coopCloseTxKey, closeTx,
- locallyInitiated,
- )
-}
-
-// markBroadcasted is a helper function which modifies the channel status of the
-// receiving channel and inserts a close transaction under the requested key,
-// which should specify either a coop or force close. It adds a status which
-// indicates the party that initiated the channel close.
-func (c *OpenChannel) markBroadcasted(status ChannelStatus, key []byte,
- closeTx *wire.MsgTx, locallyInitiated bool) er.R {
-
- c.Lock()
- defer c.Unlock()
-
- // If a closing tx is provided, we'll generate a closure to write the
- // transaction in the appropriate bucket under the given key.
- var putClosingTx func(kvdb.RwBucket) er.R
- if closeTx != nil {
- var b bytes.Buffer
- if err := WriteElement(&b, closeTx); err != nil {
- return err
- }
-
- putClosingTx = func(chanBucket kvdb.RwBucket) er.R {
- return chanBucket.Put(key, b.Bytes())
- }
- }
-
- // Add the initiator status to the status provided. These statuses are
- // set in addition to the broadcast status so that we do not need to
- // migrate the original logic which does not store initiator.
- if locallyInitiated {
- status |= ChanStatusLocalCloseInitiator
- } else {
- status |= ChanStatusRemoteCloseInitiator
- }
-
- return c.putChanStatus(status, putClosingTx)
-}
-
-// BroadcastedCommitment retrieves the stored unilateral closing tx set during
-// MarkCommitmentBroadcasted. If not found ErrNoCloseTx is returned.
-func (c *OpenChannel) BroadcastedCommitment() (*wire.MsgTx, er.R) {
- return c.getClosingTx(forceCloseTxKey)
-}
-
-// BroadcastedCooperative retrieves the stored cooperative closing tx set during
-// MarkCoopBroadcasted. If not found ErrNoCloseTx is returned.
-func (c *OpenChannel) BroadcastedCooperative() (*wire.MsgTx, er.R) {
- return c.getClosingTx(coopCloseTxKey)
-}
-
-// getClosingTx is a helper method which returns the stored closing transaction
-// for key. The caller should use either the force or coop closing keys.
-func (c *OpenChannel) getClosingTx(key []byte) (*wire.MsgTx, er.R) {
- var closeTx *wire.MsgTx
-
- err := kvdb.View(c.Db, func(tx kvdb.RTx) er.R {
- chanBucket, err := fetchChanBucket(
- tx, c.IdentityPub, &c.FundingOutpoint, c.ChainHash,
- )
- if err, stop := mapErr(err, ErrNoCloseTx); stop {
- return err
- }
-
- bs := chanBucket.Get(key)
- if bs == nil {
- return ErrNoCloseTx.Default()
- }
- r := bytes.NewReader(bs)
- return ReadElement(r, &closeTx)
- }, func() {
- closeTx = nil
- })
- if err != nil {
- return nil, err
- }
-
- return closeTx, nil
-}
-
-// putChanStatus appends the given status to the channel. fs is an optional
-// list of closures that are given the chanBucket in order to atomically add
-// extra information together with the new status.
-func (c *OpenChannel) putChanStatus(status ChannelStatus,
- fs ...func(kvdb.RwBucket) er.R) er.R {
-
- if err := kvdb.Update(c.Db, func(tx kvdb.RwTx) er.R {
- chanBucket, err := fetchChanBucketRw(
- tx, c.IdentityPub, &c.FundingOutpoint, c.ChainHash,
- )
- if err != nil {
- return err
- }
-
- channel, err := fetchOpenChannel(chanBucket, &c.FundingOutpoint)
- if err != nil {
- return err
- }
-
- // Add this status to the existing bitvector found in the DB.
- status = channel.chanStatus | status
- channel.chanStatus = status
-
- if err := putOpenChannel(chanBucket, channel); err != nil {
- return err
- }
-
- for _, f := range fs {
- // Skip execution of nil closures.
- if f == nil {
- continue
- }
-
- if err := f(chanBucket); err != nil {
- return err
- }
- }
-
- return nil
- }, func() {}); err != nil {
- return err
- }
-
- // Update the in-memory representation to keep it in sync with the DB.
- c.chanStatus = status
-
- return nil
-}
-
-func (c *OpenChannel) clearChanStatus(status ChannelStatus) er.R {
- if err := kvdb.Update(c.Db, func(tx kvdb.RwTx) er.R {
- chanBucket, err := fetchChanBucketRw(
- tx, c.IdentityPub, &c.FundingOutpoint, c.ChainHash,
- )
- if err != nil {
- return err
- }
-
- channel, err := fetchOpenChannel(chanBucket, &c.FundingOutpoint)
- if err != nil {
- return err
- }
-
- // Unset this bit in the bitvector on disk.
- status = channel.chanStatus & ^status
- channel.chanStatus = status
-
- return putOpenChannel(chanBucket, channel)
- }, func() {}); err != nil {
- return err
- }
-
- // Update the in-memory representation to keep it in sync with the DB.
- c.chanStatus = status
-
- return nil
-}
-
-// putChannel serializes, and stores the current state of the channel in its
-// entirety.
-func putOpenChannel(chanBucket kvdb.RwBucket, channel *OpenChannel) er.R {
- // First, we'll write out all the relatively static fields, that are
- // decided upon initial channel creation.
- if err := putChanInfo(chanBucket, channel); err != nil {
- return er.Errorf("unable to store chan info: %v", err)
- }
-
- // With the static channel info written out, we'll now write out the
- // current commitment state for both parties.
- if err := putChanCommitments(chanBucket, channel); err != nil {
- return er.Errorf("unable to store chan commitments: %v", err)
- }
-
- // Next, if this is a frozen channel, we'll add in the axillary
- // information we need to store.
- if channel.ChanType.IsFrozen() {
- err := storeThawHeight(
- chanBucket, channel.ThawHeight,
- )
- if err != nil {
- return er.Errorf("unable to store thaw height: %v", err)
- }
- }
-
- // Finally, we'll write out the revocation state for both parties
- // within a distinct key space.
- if err := putChanRevocationState(chanBucket, channel); err != nil {
- return er.Errorf("unable to store chan revocations: %v", err)
- }
-
- return nil
-}
-
-// fetchOpenChannel retrieves, and deserializes (including decrypting
-// sensitive) the complete channel currently active with the passed nodeID.
-func fetchOpenChannel(chanBucket kvdb.RBucket,
- chanPoint *wire.OutPoint) (*OpenChannel, er.R) {
-
- channel := &OpenChannel{
- FundingOutpoint: *chanPoint,
- }
-
- // First, we'll read all the static information that changes less
- // frequently from disk.
- if err := fetchChanInfo(chanBucket, channel); err != nil {
- return nil, er.Errorf("unable to fetch chan info: %v", err)
- }
-
- // With the static information read, we'll now read the current
- // commitment state for both sides of the channel.
- if err := fetchChanCommitments(chanBucket, channel); err != nil {
- return nil, er.Errorf("unable to fetch chan commitments: %v", err)
- }
-
- // Next, if this is a frozen channel, we'll add in the axillary
- // information we need to store.
- if channel.ChanType.IsFrozen() {
- thawHeight, err := fetchThawHeight(chanBucket)
- if err != nil {
- return nil, er.Errorf("unable to store thaw "+
- "height: %v", err)
- }
-
- channel.ThawHeight = thawHeight
- }
-
- // Finally, we'll retrieve the current revocation state so we can
- // properly
- if err := fetchChanRevocationState(chanBucket, channel); err != nil {
- return nil, er.Errorf("unable to fetch chan revocations: %v", err)
- }
-
- channel.Packager = NewChannelPackager(channel.ShortChannelID)
-
- return channel, nil
-}
-
-// SyncPending writes the contents of the channel to the database while it's in
-// the pending (waiting for funding confirmation) state. The IsPending flag
-// will be set to true. When the channel's funding transaction is confirmed,
-// the channel should be marked as "open" and the IsPending flag set to false.
-// Note that this function also creates a LinkNode relationship between this
-// newly created channel and a new LinkNode instance. This allows listing all
-// channels in the database globally, or according to the LinkNode they were
-// created with.
-//
-// TODO(roasbeef): addr param should eventually be an lnwire.NetAddress type
-// that includes service bits.
-func (c *OpenChannel) SyncPending(addr net.Addr, pendingHeight uint32) er.R {
- c.Lock()
- defer c.Unlock()
-
- c.FundingBroadcastHeight = pendingHeight
-
- return kvdb.Update(c.Db, func(tx kvdb.RwTx) er.R {
- return syncNewChannel(tx, c, []net.Addr{addr})
- }, func() {})
-}
-
-// syncNewChannel will write the passed channel to disk, and also create a
-// LinkNode (if needed) for the channel peer.
-func syncNewChannel(tx kvdb.RwTx, c *OpenChannel, addrs []net.Addr) er.R {
- // First, sync all the persistent channel state to disk.
- if err := c.fullSync(tx); err != nil {
- return err
- }
-
- nodeInfoBucket, err := tx.CreateTopLevelBucket(nodeInfoBucket)
- if err != nil {
- return err
- }
-
- // If a LinkNode for this identity public key already exists,
- // then we can exit early.
- nodePub := c.IdentityPub.SerializeCompressed()
- if nodeInfoBucket.Get(nodePub) != nil {
- return nil
- }
-
- // Next, we need to establish a (possibly) new LinkNode relationship
- // for this channel. The LinkNode metadata contains reachability,
- // up-time, and service bits related information.
- linkNode := c.Db.NewLinkNode(protocol.MainNet, c.IdentityPub, addrs...)
-
- // TODO(roasbeef): do away with link node all together?
-
- return putLinkNode(nodeInfoBucket, linkNode)
-}
-
-// UpdateCommitment updates the local commitment state. It locks in the pending
-// local updates that were received by us from the remote party. The commitment
-// state completely describes the balance state at this point in the commitment
-// chain. In addition to that, it persists all the remote log updates that we
-// have acked, but not signed a remote commitment for yet. These need to be
-// persisted to be able to produce a valid commit signature if a restart would
-// occur. This method its to be called when we revoke our prior commitment
-// state.
-func (c *OpenChannel) UpdateCommitment(newCommitment *ChannelCommitment,
- unsignedAckedUpdates []LogUpdate) er.R {
-
- c.Lock()
- defer c.Unlock()
-
- // If this is a restored channel, then we want to avoid mutating the
- // state as all, as it's impossible to do so in a protocol compliant
- // manner.
- if c.hasChanStatus(ChanStatusRestored) {
- return ErrNoRestoredChannelMutation.Default()
- }
-
- err := kvdb.Update(c.Db, func(tx kvdb.RwTx) er.R {
- chanBucket, err := fetchChanBucketRw(
- tx, c.IdentityPub, &c.FundingOutpoint, c.ChainHash,
- )
- if err != nil {
- return err
- }
-
- // If the channel is marked as borked, then for safety reasons,
- // we shouldn't attempt any further updates.
- isBorked, err := c.isBorked(chanBucket)
- if err != nil {
- return err
- }
- if isBorked {
- return ErrChanBorked.Default()
- }
-
- if err = putChanInfo(chanBucket, c); err != nil {
- return er.Errorf("unable to store chan info: %v", err)
- }
-
- // With the proper bucket fetched, we'll now write the latest
- // commitment state to disk for the target party.
- err = putChanCommitment(
- chanBucket, newCommitment, true,
- )
- if err != nil {
- return er.Errorf("unable to store chan "+
- "revocations: %v", err)
- }
-
- // Persist unsigned but acked remote updates that need to be
- // restored after a restart.
- var b bytes.Buffer
- err = serializeLogUpdates(&b, unsignedAckedUpdates)
- if err != nil {
- return err
- }
-
- err = chanBucket.Put(unsignedAckedUpdatesKey, b.Bytes())
- if err != nil {
- return er.Errorf("unable to store dangline remote "+
- "updates: %v", err)
- }
-
- // Persist the remote unsigned local updates that are not included
- // in our new commitment.
- updateBytes := chanBucket.Get(remoteUnsignedLocalUpdatesKey)
- if updateBytes == nil {
- return nil
- }
-
- r := bytes.NewReader(updateBytes)
- updates, err := deserializeLogUpdates(r)
- if err != nil {
- return err
- }
-
- var validUpdates []LogUpdate
- for _, upd := range updates {
- // Filter for updates that are not on our local
- // commitment.
- if upd.LogIndex >= newCommitment.LocalLogIndex {
- validUpdates = append(validUpdates, upd)
- }
- }
-
- var b2 bytes.Buffer
- err = serializeLogUpdates(&b2, validUpdates)
- if err != nil {
- return er.Errorf("unable to serialize log updates: %v", err)
- }
-
- err = chanBucket.Put(remoteUnsignedLocalUpdatesKey, b2.Bytes())
- if err != nil {
- return er.Errorf("unable to restore chanbucket: %v", err)
- }
-
- return nil
- }, func() {})
- if err != nil {
- return err
- }
-
- c.LocalCommitment = *newCommitment
-
- return nil
-}
-
-// BalancesAtHeight returns the local and remote balances on our commitment
-// transactions as of a given height.
-//
-// NOTE: these are our balances *after* subtracting the commitment fee and
-// anchor outputs.
-func (c *OpenChannel) BalancesAtHeight(height uint64) (lnwire.MilliSatoshi,
- lnwire.MilliSatoshi, er.R) {
-
- if height > c.LocalCommitment.CommitHeight &&
- height > c.RemoteCommitment.CommitHeight {
-
- return 0, 0, errHeightNotReached.Default()
- }
-
- // If our current commit is as the desired height, we can return our
- // current balances.
- if c.LocalCommitment.CommitHeight == height {
- return c.LocalCommitment.LocalBalance,
- c.LocalCommitment.RemoteBalance, nil
- }
-
- // If our current remote commit is at the desired height, we can return
- // the current balances.
- if c.RemoteCommitment.CommitHeight == height {
- return c.RemoteCommitment.LocalBalance,
- c.RemoteCommitment.RemoteBalance, nil
- }
-
- // If we are not currently on the height requested, we need to look up
- // the previous height to obtain our balances at the given height.
- commit, err := c.FindPreviousState(height)
- if err != nil {
- return 0, 0, err
- }
-
- return commit.LocalBalance, commit.RemoteBalance, nil
-}
-
-// ActiveHtlcs returns a slice of HTLC's which are currently active on *both*
-// commitment transactions.
-func (c *OpenChannel) ActiveHtlcs() []HTLC {
- c.RLock()
- defer c.RUnlock()
-
- // We'll only return HTLC's that are locked into *both* commitment
- // transactions. So we'll iterate through their set of HTLC's to note
- // which ones are present on their commitment.
- remoteHtlcs := make(map[[32]byte]struct{})
- for _, htlc := range c.RemoteCommitment.Htlcs {
- onionHash := sha256.Sum256(htlc.OnionBlob)
- remoteHtlcs[onionHash] = struct{}{}
- }
-
- // Now that we know which HTLC's they have, we'll only mark the HTLC's
- // as active if *we* know them as well.
- activeHtlcs := make([]HTLC, 0, len(remoteHtlcs))
- for _, htlc := range c.LocalCommitment.Htlcs {
- onionHash := sha256.Sum256(htlc.OnionBlob)
- if _, ok := remoteHtlcs[onionHash]; !ok {
- continue
- }
-
- activeHtlcs = append(activeHtlcs, htlc)
- }
-
- return activeHtlcs
-}
-
-// HTLC is the on-disk representation of a hash time-locked contract. HTLCs are
-// contained within ChannelDeltas which encode the current state of the
-// commitment between state updates.
-//
-// TODO(roasbeef): save space by using smaller ints at tail end?
-type HTLC struct {
- // Signature is the signature for the second level covenant transaction
- // for this HTLC. The second level transaction is a timeout tx in the
- // case that this is an outgoing HTLC, and a success tx in the case
- // that this is an incoming HTLC.
- //
- // TODO(roasbeef): make [64]byte instead?
- Signature []byte
-
- // RHash is the payment hash of the HTLC.
- RHash [32]byte
-
- // Amt is the amount of milli-satoshis this HTLC escrows.
- Amt lnwire.MilliSatoshi
-
- // RefundTimeout is the absolute timeout on the HTLC that the sender
- // must wait before reclaiming the funds in limbo.
- RefundTimeout uint32
-
- // OutputIndex is the output index for this particular HTLC output
- // within the commitment transaction.
- OutputIndex int32
-
- // Incoming denotes whether we're the receiver or the sender of this
- // HTLC.
- Incoming bool
-
- // OnionBlob is an opaque blob which is used to complete multi-hop
- // routing.
- OnionBlob []byte
-
- // HtlcIndex is the HTLC counter index of this active, outstanding
- // HTLC. This differs from the LogIndex, as the HtlcIndex is only
- // incremented for each offered HTLC, while they LogIndex is
- // incremented for each update (includes settle+fail).
- HtlcIndex uint64
-
- // LogIndex is the cumulative log index of this HTLC. This differs
- // from the HtlcIndex as this will be incremented for each new log
- // update added.
- LogIndex uint64
-}
-
-// SerializeHtlcs writes out the passed set of HTLC's into the passed writer
-// using the current default on-disk serialization format.
-//
-// NOTE: This API is NOT stable, the on-disk format will likely change in the
-// future.
-func SerializeHtlcs(b io.Writer, htlcs ...HTLC) er.R {
- numHtlcs := uint16(len(htlcs))
- if err := WriteElement(b, numHtlcs); err != nil {
- return err
- }
-
- for _, htlc := range htlcs {
- if err := WriteElements(b,
- htlc.Signature, htlc.RHash, htlc.Amt, htlc.RefundTimeout,
- htlc.OutputIndex, htlc.Incoming, htlc.OnionBlob[:],
- htlc.HtlcIndex, htlc.LogIndex,
- ); err != nil {
- return err
- }
- }
-
- return nil
-}
-
-// DeserializeHtlcs attempts to read out a slice of HTLC's from the passed
-// io.Reader. The bytes within the passed reader MUST have been previously
-// written to using the SerializeHtlcs function.
-//
-// NOTE: This API is NOT stable, the on-disk format will likely change in the
-// future.
-func DeserializeHtlcs(r io.Reader) ([]HTLC, er.R) {
- var numHtlcs uint16
- if err := ReadElement(r, &numHtlcs); err != nil {
- return nil, err
- }
-
- var htlcs []HTLC
- if numHtlcs == 0 {
- return htlcs, nil
- }
-
- htlcs = make([]HTLC, numHtlcs)
- for i := uint16(0); i < numHtlcs; i++ {
- if err := ReadElements(r,
- &htlcs[i].Signature, &htlcs[i].RHash, &htlcs[i].Amt,
- &htlcs[i].RefundTimeout, &htlcs[i].OutputIndex,
- &htlcs[i].Incoming, &htlcs[i].OnionBlob,
- &htlcs[i].HtlcIndex, &htlcs[i].LogIndex,
- ); err != nil {
- return htlcs, err
- }
- }
-
- return htlcs, nil
-}
-
-// Copy returns a full copy of the target HTLC.
-func (h *HTLC) Copy() HTLC {
- clone := HTLC{
- Incoming: h.Incoming,
- Amt: h.Amt,
- RefundTimeout: h.RefundTimeout,
- OutputIndex: h.OutputIndex,
- }
- copy(clone.Signature[:], h.Signature)
- copy(clone.RHash[:], h.RHash[:])
-
- return clone
-}
-
-// LogUpdate represents a pending update to the remote commitment chain. The
-// log update may be an add, fail, or settle entry. We maintain this data in
-// order to be able to properly retransmit our proposed
-// state if necessary.
-type LogUpdate struct {
- // LogIndex is the log index of this proposed commitment update entry.
- LogIndex uint64
-
- // UpdateMsg is the update message that was included within the our
- // local update log. The LogIndex value denotes the log index of this
- // update which will be used when restoring our local update log if
- // we're left with a dangling update on restart.
- UpdateMsg lnwire.Message
-}
-
-// Encode writes a log update to the provided io.Writer.
-func (l *LogUpdate) Encode(w io.Writer) er.R {
- return WriteElements(w, l.LogIndex, l.UpdateMsg)
-}
-
-// Decode reads a log update from the provided io.Reader.
-func (l *LogUpdate) Decode(r io.Reader) er.R {
- return ReadElements(r, &l.LogIndex, &l.UpdateMsg)
-}
-
-// CircuitKey is used by a channel to uniquely identify the HTLCs it receives
-// from the switch, and is used to purge our in-memory state of HTLCs that have
-// already been processed by a link. Two list of CircuitKeys are included in
-// each CommitDiff to allow a link to determine which in-memory htlcs directed
-// the opening and closing of circuits in the switch's circuit map.
-type CircuitKey struct {
- // ChanID is the short chanid indicating the HTLC's origin.
- //
- // NOTE: It is fine for this value to be blank, as this indicates a
- // locally-sourced payment.
- ChanID lnwire.ShortChannelID
-
- // HtlcID is the unique htlc index predominately assigned by links,
- // though can also be assigned by switch in the case of locally-sourced
- // payments.
- HtlcID uint64
-}
-
-// SetBytes deserializes the given bytes into this CircuitKey.
-func (k *CircuitKey) SetBytes(bs []byte) er.R {
- if len(bs) != 16 {
- return ErrInvalidCircuitKeyLen.Default()
- }
-
- k.ChanID = lnwire.NewShortChanIDFromInt(
- binary.BigEndian.Uint64(bs[:8]))
- k.HtlcID = binary.BigEndian.Uint64(bs[8:])
-
- return nil
-}
-
-// Bytes returns the serialized bytes for this circuit key.
-func (k CircuitKey) Bytes() []byte {
- var bs = make([]byte, 16)
- binary.BigEndian.PutUint64(bs[:8], k.ChanID.ToUint64())
- binary.BigEndian.PutUint64(bs[8:], k.HtlcID)
- return bs
-}
-
-// Encode writes a CircuitKey to the provided io.Writer.
-func (k *CircuitKey) Encode(w io.Writer) er.R {
- var scratch [16]byte
- binary.BigEndian.PutUint64(scratch[:8], k.ChanID.ToUint64())
- binary.BigEndian.PutUint64(scratch[8:], k.HtlcID)
-
- _, err := util.Write(w, scratch[:])
- return err
-}
-
-// Decode reads a CircuitKey from the provided io.Reader.
-func (k *CircuitKey) Decode(r io.Reader) er.R {
- var scratch [16]byte
-
- if _, err := util.ReadFull(r, scratch[:]); err != nil {
- return err
- }
- k.ChanID = lnwire.NewShortChanIDFromInt(
- binary.BigEndian.Uint64(scratch[:8]))
- k.HtlcID = binary.BigEndian.Uint64(scratch[8:])
-
- return nil
-}
-
-// String returns a string representation of the CircuitKey.
-func (k CircuitKey) String() string {
- return fmt.Sprintf("(Chan ID=%s, HTLC ID=%d)", k.ChanID, k.HtlcID)
-}
-
-// CommitDiff represents the delta needed to apply the state transition between
-// two subsequent commitment states. Given state N and state N+1, one is able
-// to apply the set of messages contained within the CommitDiff to N to arrive
-// at state N+1. Each time a new commitment is extended, we'll write a new
-// commitment (along with the full commitment state) to disk so we can
-// re-transmit the state in the case of a connection loss or message drop.
-type CommitDiff struct {
- // ChannelCommitment is the full commitment state that one would arrive
- // at by applying the set of messages contained in the UpdateDiff to
- // the prior accepted commitment.
- Commitment ChannelCommitment
-
- // LogUpdates is the set of messages sent prior to the commitment state
- // transition in question. Upon reconnection, if we detect that they
- // don't have the commitment, then we re-send this along with the
- // proper signature.
- LogUpdates []LogUpdate
-
- // CommitSig is the exact CommitSig message that should be sent after
- // the set of LogUpdates above has been retransmitted. The signatures
- // within this message should properly cover the new commitment state
- // and also the HTLC's within the new commitment state.
- CommitSig *lnwire.CommitSig
-
- // OpenedCircuitKeys is a set of unique identifiers for any downstream
- // Add packets included in this commitment txn. After a restart, this
- // set of htlcs is acked from the link's incoming mailbox to ensure
- // there isn't an attempt to re-add them to this commitment txn.
- OpenedCircuitKeys []CircuitKey
-
- // ClosedCircuitKeys records the unique identifiers for any settle/fail
- // packets that were resolved by this commitment txn. After a restart,
- // this is used to ensure those circuits are removed from the circuit
- // map, and the downstream packets in the link's mailbox are removed.
- ClosedCircuitKeys []CircuitKey
-
- // AddAcks specifies the locations (commit height, pkg index) of any
- // Adds that were failed/settled in this commit diff. This will ack
- // entries in *this* channel's forwarding packages.
- //
- // NOTE: This value is not serialized, it is used to atomically mark the
- // resolution of adds, such that they will not be reprocessed after a
- // restart.
- AddAcks []AddRef
-
- // SettleFailAcks specifies the locations (chan id, commit height, pkg
- // index) of any Settles or Fails that were locked into this commit
- // diff, and originate from *another* channel, i.e. the outgoing link.
- //
- // NOTE: This value is not serialized, it is used to atomically acks
- // settles and fails from the forwarding packages of other channels,
- // such that they will not be reforwarded internally after a restart.
- SettleFailAcks []SettleFailRef
-}
-
-// serializeLogUpdates serializes provided list of updates to a stream.
-func serializeLogUpdates(w io.Writer, logUpdates []LogUpdate) er.R {
- numUpdates := uint16(len(logUpdates))
- if err := util.WriteBin(w, byteOrder, numUpdates); err != nil {
- return err
- }
-
- for _, diff := range logUpdates {
- err := WriteElements(w, diff.LogIndex, diff.UpdateMsg)
- if err != nil {
- return err
- }
- }
-
- return nil
-}
-
-// deserializeLogUpdates deserializes a list of updates from a stream.
-func deserializeLogUpdates(r io.Reader) ([]LogUpdate, er.R) {
- var numUpdates uint16
- if err := util.ReadBin(r, byteOrder, &numUpdates); err != nil {
- return nil, err
- }
-
- logUpdates := make([]LogUpdate, numUpdates)
- for i := 0; i < int(numUpdates); i++ {
- err := ReadElements(r,
- &logUpdates[i].LogIndex, &logUpdates[i].UpdateMsg,
- )
- if err != nil {
- return nil, err
- }
- }
- return logUpdates, nil
-}
-
-func serializeCommitDiff(w io.Writer, diff *CommitDiff) er.R {
- if err := serializeChanCommit(w, &diff.Commitment); err != nil {
- return err
- }
-
- if err := diff.CommitSig.Encode(w, 0); err != nil {
- return err
- }
-
- if err := serializeLogUpdates(w, diff.LogUpdates); err != nil {
- return err
- }
-
- numOpenRefs := uint16(len(diff.OpenedCircuitKeys))
- if err := util.WriteBin(w, byteOrder, numOpenRefs); err != nil {
- return err
- }
-
- for _, openRef := range diff.OpenedCircuitKeys {
- err := WriteElements(w, openRef.ChanID, openRef.HtlcID)
- if err != nil {
- return err
- }
- }
-
- numClosedRefs := uint16(len(diff.ClosedCircuitKeys))
- if err := util.WriteBin(w, byteOrder, numClosedRefs); err != nil {
- return err
- }
-
- for _, closedRef := range diff.ClosedCircuitKeys {
- err := WriteElements(w, closedRef.ChanID, closedRef.HtlcID)
- if err != nil {
- return err
- }
- }
-
- return nil
-}
-
-func deserializeCommitDiff(r io.Reader) (*CommitDiff, er.R) {
- var (
- d CommitDiff
- err er.R
- )
-
- d.Commitment, err = deserializeChanCommit(r)
- if err != nil {
- return nil, err
- }
-
- d.CommitSig = &lnwire.CommitSig{}
- if err := d.CommitSig.Decode(r, 0); err != nil {
- return nil, err
- }
-
- d.LogUpdates, err = deserializeLogUpdates(r)
- if err != nil {
- return nil, err
- }
-
- var numOpenRefs uint16
- if err := util.ReadBin(r, byteOrder, &numOpenRefs); err != nil {
- return nil, err
- }
-
- d.OpenedCircuitKeys = make([]CircuitKey, numOpenRefs)
- for i := 0; i < int(numOpenRefs); i++ {
- err := ReadElements(r,
- &d.OpenedCircuitKeys[i].ChanID,
- &d.OpenedCircuitKeys[i].HtlcID)
- if err != nil {
- return nil, err
- }
- }
-
- var numClosedRefs uint16
- if err := util.ReadBin(r, byteOrder, &numClosedRefs); err != nil {
- return nil, err
- }
-
- d.ClosedCircuitKeys = make([]CircuitKey, numClosedRefs)
- for i := 0; i < int(numClosedRefs); i++ {
- err := ReadElements(r,
- &d.ClosedCircuitKeys[i].ChanID,
- &d.ClosedCircuitKeys[i].HtlcID)
- if err != nil {
- return nil, err
- }
- }
-
- return &d, nil
-}
-
-// AppendRemoteCommitChain appends a new CommitDiff to the end of the
-// commitment chain for the remote party. This method is to be used once we
-// have prepared a new commitment state for the remote party, but before we
-// transmit it to the remote party. The contents of the argument should be
-// sufficient to retransmit the updates and signature needed to reconstruct the
-// state in full, in the case that we need to retransmit.
-func (c *OpenChannel) AppendRemoteCommitChain(diff *CommitDiff) er.R {
- c.Lock()
- defer c.Unlock()
-
- // If this is a restored channel, then we want to avoid mutating the
- // state at all, as it's impossible to do so in a protocol compliant
- // manner.
- if c.hasChanStatus(ChanStatusRestored) {
- return ErrNoRestoredChannelMutation.Default()
- }
-
- return kvdb.Update(c.Db, func(tx kvdb.RwTx) er.R {
- // First, we'll grab the writable bucket where this channel's
- // data resides.
- chanBucket, err := fetchChanBucketRw(
- tx, c.IdentityPub, &c.FundingOutpoint, c.ChainHash,
- )
- if err != nil {
- return err
- }
-
- // If the channel is marked as borked, then for safety reasons,
- // we shouldn't attempt any further updates.
- isBorked, err := c.isBorked(chanBucket)
- if err != nil {
- return err
- }
- if isBorked {
- return ErrChanBorked.Default()
- }
-
- // Any outgoing settles and fails necessarily have a
- // corresponding adds in this channel's forwarding packages.
- // Mark all of these as being fully processed in our forwarding
- // package, which prevents us from reprocessing them after
- // startup.
- err = c.Packager.AckAddHtlcs(tx, diff.AddAcks...)
- if err != nil {
- return err
- }
-
- // Additionally, we ack from any fails or settles that are
- // persisted in another channel's forwarding package. This
- // prevents the same fails and settles from being retransmitted
- // after restarts. The actual fail or settle we need to
- // propagate to the remote party is now in the commit diff.
- err = c.Packager.AckSettleFails(tx, diff.SettleFailAcks...)
- if err != nil {
- return err
- }
-
- // TODO(roasbeef): use seqno to derive key for later LCP
-
- // With the bucket retrieved, we'll now serialize the commit
- // diff itself, and write it to disk.
- var b bytes.Buffer
- if err := serializeCommitDiff(&b, diff); err != nil {
- return err
- }
- return chanBucket.Put(commitDiffKey, b.Bytes())
- }, func() {})
-}
-
-// RemoteCommitChainTip returns the "tip" of the current remote commitment
-// chain. This value will be non-nil iff, we've created a new commitment for
-// the remote party that they haven't yet ACK'd. In this case, their commitment
-// chain will have a length of two: their current unrevoked commitment, and
-// this new pending commitment. Once they revoked their prior state, we'll swap
-// these pointers, causing the tip and the tail to point to the same entry.
-func (c *OpenChannel) RemoteCommitChainTip() (*CommitDiff, er.R) {
- var cd *CommitDiff
- err := kvdb.View(c.Db, func(tx kvdb.RTx) er.R {
- chanBucket, err := fetchChanBucket(
- tx, c.IdentityPub, &c.FundingOutpoint, c.ChainHash,
- )
- if err, stop := mapErr(err, ErrNoPendingCommit); stop {
- return err
- }
-
- tipBytes := chanBucket.Get(commitDiffKey)
- if tipBytes == nil {
- return ErrNoPendingCommit.Default()
- }
-
- tipReader := bytes.NewReader(tipBytes)
- dcd, err := deserializeCommitDiff(tipReader)
- if err != nil {
- return err
- }
-
- cd = dcd
- return nil
- }, func() {
- cd = nil
- })
- if err != nil {
- return nil, err
- }
-
- return cd, err
-}
-
-// UnsignedAckedUpdates retrieves the persisted unsigned acked remote log
-// updates that still need to be signed for.
-func (c *OpenChannel) UnsignedAckedUpdates() ([]LogUpdate, er.R) {
- var updates []LogUpdate
- err := kvdb.View(c.Db, func(tx kvdb.RTx) er.R {
- chanBucket, err := fetchChanBucket(
- tx, c.IdentityPub, &c.FundingOutpoint, c.ChainHash,
- )
- if err, stop := mapErr(err, nil); stop {
- return err
- }
-
- updateBytes := chanBucket.Get(unsignedAckedUpdatesKey)
- if updateBytes == nil {
- return nil
- }
-
- r := bytes.NewReader(updateBytes)
- updates, err = deserializeLogUpdates(r)
- return err
- }, func() {
- updates = nil
- })
- if err != nil {
- return nil, err
- }
-
- return updates, nil
-}
-
-// RemoteUnsignedLocalUpdates retrieves the persisted, unsigned local log
-// updates that the remote still needs to sign for.
-func (c *OpenChannel) RemoteUnsignedLocalUpdates() ([]LogUpdate, er.R) {
- var updates []LogUpdate
- err := kvdb.View(c.Db, func(tx kvdb.RTx) er.R {
- chanBucket, err := fetchChanBucket(
- tx, c.IdentityPub, &c.FundingOutpoint, c.ChainHash,
- )
- if err, stop := mapErr(err, nil); stop {
- return err
- }
-
- updateBytes := chanBucket.Get(remoteUnsignedLocalUpdatesKey)
- if updateBytes == nil {
- return nil
- }
-
- r := bytes.NewReader(updateBytes)
- updates, err = deserializeLogUpdates(r)
- return err
- }, func() {
- updates = nil
- })
- if err != nil {
- return nil, err
- }
-
- return updates, nil
-}
-
-// InsertNextRevocation inserts the _next_ commitment point (revocation) into
-// the database, and also modifies the internal RemoteNextRevocation attribute
-// to point to the passed key. This method is to be using during final channel
-// set up, _after_ the channel has been fully confirmed.
-//
-// NOTE: If this method isn't called, then the target channel won't be able to
-// propose new states for the commitment state of the remote party.
-func (c *OpenChannel) InsertNextRevocation(revKey *btcec.PublicKey) er.R {
- c.Lock()
- defer c.Unlock()
-
- c.RemoteNextRevocation = revKey
-
- err := kvdb.Update(c.Db, func(tx kvdb.RwTx) er.R {
- chanBucket, err := fetchChanBucketRw(
- tx, c.IdentityPub, &c.FundingOutpoint, c.ChainHash,
- )
- if err != nil {
- return err
- }
-
- return putChanRevocationState(chanBucket, c)
- }, func() {})
- if err != nil {
- return err
- }
-
- return nil
-}
-
-// AdvanceCommitChainTail records the new state transition within an on-disk
-// append-only log which records all state transitions by the remote peer. In
-// the case of an uncooperative broadcast of a prior state by the remote peer,
-// this log can be consulted in order to reconstruct the state needed to
-// rectify the situation. This method will add the current commitment for the
-// remote party to the revocation log, and promote the current pending
-// commitment to the current remote commitment. The updates parameter is the
-// set of local updates that the peer still needs to send us a signature for.
-// We store this set of updates in case we go down.
-func (c *OpenChannel) AdvanceCommitChainTail(fwdPkg *FwdPkg,
- updates []LogUpdate) er.R {
-
- c.Lock()
- defer c.Unlock()
-
- // If this is a restored channel, then we want to avoid mutating the
- // state at all, as it's impossible to do so in a protocol compliant
- // manner.
- if c.hasChanStatus(ChanStatusRestored) {
- return ErrNoRestoredChannelMutation.Default()
- }
-
- var newRemoteCommit *ChannelCommitment
-
- err := kvdb.Update(c.Db, func(tx kvdb.RwTx) er.R {
- chanBucket, err := fetchChanBucketRw(
- tx, c.IdentityPub, &c.FundingOutpoint, c.ChainHash,
- )
- if err != nil {
- return err
- }
-
- // If the channel is marked as borked, then for safety reasons,
- // we shouldn't attempt any further updates.
- isBorked, err := c.isBorked(chanBucket)
- if err != nil {
- return err
- }
- if isBorked {
- return ErrChanBorked.Default()
- }
-
- // Persist the latest preimage state to disk as the remote peer
- // has just added to our local preimage store, and given us a
- // new pending revocation key.
- if err := putChanRevocationState(chanBucket, c); err != nil {
- return err
- }
-
- // With the current preimage producer/store state updated,
- // append a new log entry recording this the delta of this
- // state transition.
- //
- // TODO(roasbeef): could make the deltas relative, would save
- // space, but then tradeoff for more disk-seeks to recover the
- // full state.
- logKey := revocationLogBucket
- logBucket, err := chanBucket.CreateBucketIfNotExists(logKey)
- if err != nil {
- return err
- }
-
- // Before we append this revoked state to the revocation log,
- // we'll swap out what's currently the tail of the commit tip,
- // with the current locked-in commitment for the remote party.
- tipBytes := chanBucket.Get(commitDiffKey)
- tipReader := bytes.NewReader(tipBytes)
- newCommit, err := deserializeCommitDiff(tipReader)
- if err != nil {
- return err
- }
- err = putChanCommitment(
- chanBucket, &newCommit.Commitment, false,
- )
- if err != nil {
- return err
- }
- if err := chanBucket.Delete(commitDiffKey); err != nil {
- return err
- }
-
- // With the commitment pointer swapped, we can now add the
- // revoked (prior) state to the revocation log.
- //
- // TODO(roasbeef): store less
- err = appendChannelLogEntry(logBucket, &c.RemoteCommitment)
- if err != nil {
- return err
- }
-
- // Lastly, we write the forwarding package to disk so that we
- // can properly recover from failures and reforward HTLCs that
- // have not received a corresponding settle/fail.
- if err := c.Packager.AddFwdPkg(tx, fwdPkg); err != nil {
- return err
- }
-
- // Persist the unsigned acked updates that are not included
- // in their new commitment.
- updateBytes := chanBucket.Get(unsignedAckedUpdatesKey)
- if updateBytes == nil {
- // If there are no updates to sign, we don't need to
- // filter out any updates.
- newRemoteCommit = &newCommit.Commitment
- return nil
- }
-
- r := bytes.NewReader(updateBytes)
- unsignedUpdates, err := deserializeLogUpdates(r)
- if err != nil {
- return err
- }
-
- var validUpdates []LogUpdate
- for _, upd := range unsignedUpdates {
- lIdx := upd.LogIndex
-
- // Filter for updates that are not on the remote
- // commitment.
- if lIdx >= newCommit.Commitment.RemoteLogIndex {
- validUpdates = append(validUpdates, upd)
- }
- }
-
- var b bytes.Buffer
- err = serializeLogUpdates(&b, validUpdates)
- if err != nil {
- return er.Errorf("unable to serialize log updates: %v", err)
- }
-
- err = chanBucket.Put(unsignedAckedUpdatesKey, b.Bytes())
- if err != nil {
- return er.Errorf("unable to store under unsignedAckedUpdatesKey: %v", err)
- }
-
- // Persist the local updates the peer hasn't yet signed so they
- // can be restored after restart.
- var b2 bytes.Buffer
- err = serializeLogUpdates(&b2, updates)
- if err != nil {
- return err
- }
-
- err = chanBucket.Put(remoteUnsignedLocalUpdatesKey, b2.Bytes())
- if err != nil {
- return er.Errorf("unable to restore remote unsigned "+
- "local updates: %v", err)
- }
-
- newRemoteCommit = &newCommit.Commitment
-
- return nil
- }, func() {
- newRemoteCommit = nil
- })
- if err != nil {
- return err
- }
-
- // With the db transaction complete, we'll swap over the in-memory
- // pointer of the new remote commitment, which was previously the tip
- // of the commit chain.
- c.RemoteCommitment = *newRemoteCommit
-
- return nil
-}
-
-// NextLocalHtlcIndex returns the next unallocated local htlc index. To ensure
-// this always returns the next index that has been not been allocated, this
-// will first try to examine any pending commitments, before falling back to the
-// last locked-in remote commitment.
-func (c *OpenChannel) NextLocalHtlcIndex() (uint64, er.R) {
- // First, load the most recent commit diff that we initiated for the
- // remote party. If no pending commit is found, this is not treated as
- // a critical error, since we can always fall back.
- pendingRemoteCommit, err := c.RemoteCommitChainTip()
- if err != nil && !ErrNoPendingCommit.Is(err) {
- return 0, err
- }
-
- // If a pending commit was found, its local htlc index will be at least
- // as large as the one on our local commitment.
- if pendingRemoteCommit != nil {
- return pendingRemoteCommit.Commitment.LocalHtlcIndex, nil
- }
-
- // Otherwise, fallback to using the local htlc index of their commitment.
- return c.RemoteCommitment.LocalHtlcIndex, nil
-}
-
-// LoadFwdPkgs scans the forwarding log for any packages that haven't been
-// processed, and returns their deserialized log updates in map indexed by the
-// remote commitment height at which the updates were locked in.
-func (c *OpenChannel) LoadFwdPkgs() ([]*FwdPkg, er.R) {
- c.RLock()
- defer c.RUnlock()
-
- var fwdPkgs []*FwdPkg
- if err := kvdb.View(c.Db, func(tx kvdb.RTx) er.R {
- var err er.R
- fwdPkgs, err = c.Packager.LoadFwdPkgs(tx)
- return err
- }, func() {
- fwdPkgs = nil
- }); err != nil {
- return nil, err
- }
-
- return fwdPkgs, nil
-}
-
-// AckAddHtlcs updates the AckAddFilter containing any of the provided AddRefs
-// indicating that a response to this Add has been committed to the remote party.
-// Doing so will prevent these Add HTLCs from being reforwarded internally.
-func (c *OpenChannel) AckAddHtlcs(addRefs ...AddRef) er.R {
- c.Lock()
- defer c.Unlock()
-
- return kvdb.Update(c.Db, func(tx kvdb.RwTx) er.R {
- return c.Packager.AckAddHtlcs(tx, addRefs...)
- }, func() {})
-}
-
-// AckSettleFails updates the SettleFailFilter containing any of the provided
-// SettleFailRefs, indicating that the response has been delivered to the
-// incoming link, corresponding to a particular AddRef. Doing so will prevent
-// the responses from being retransmitted internally.
-func (c *OpenChannel) AckSettleFails(settleFailRefs ...SettleFailRef) er.R {
- c.Lock()
- defer c.Unlock()
-
- return kvdb.Update(c.Db, func(tx kvdb.RwTx) er.R {
- return c.Packager.AckSettleFails(tx, settleFailRefs...)
- }, func() {})
-}
-
-// SetFwdFilter atomically sets the forwarding filter for the forwarding package
-// identified by `height`.
-func (c *OpenChannel) SetFwdFilter(height uint64, fwdFilter *PkgFilter) er.R {
- c.Lock()
- defer c.Unlock()
-
- return kvdb.Update(c.Db, func(tx kvdb.RwTx) er.R {
- return c.Packager.SetFwdFilter(tx, height, fwdFilter)
- }, func() {})
-}
-
-// RemoveFwdPkgs atomically removes forwarding packages specified by the remote
-// commitment heights. If one of the intermediate RemovePkg calls fails, then the
-// later packages won't be removed.
-//
-// NOTE: This method should only be called on packages marked FwdStateCompleted.
-func (c *OpenChannel) RemoveFwdPkgs(heights ...uint64) er.R {
- c.Lock()
- defer c.Unlock()
-
- return kvdb.Update(c.Db, func(tx kvdb.RwTx) er.R {
- for _, height := range heights {
- err := c.Packager.RemovePkg(tx, height)
- if err != nil {
- return err
- }
- }
-
- return nil
- }, func() {})
-}
-
-// RevocationLogTail returns the "tail", or the end of the current revocation
-// log. This entry represents the last previous state for the remote node's
-// commitment chain. The ChannelDelta returned by this method will always lag
-// one state behind the most current (unrevoked) state of the remote node's
-// commitment chain.
-func (c *OpenChannel) RevocationLogTail() (*ChannelCommitment, er.R) {
- c.RLock()
- defer c.RUnlock()
-
- // If we haven't created any state updates yet, then we'll exit early as
- // there's nothing to be found on disk in the revocation bucket.
- if c.RemoteCommitment.CommitHeight == 0 {
- return nil, nil
- }
-
- var commit ChannelCommitment
- if err := kvdb.View(c.Db, func(tx kvdb.RTx) er.R {
- chanBucket, err := fetchChanBucket(
- tx, c.IdentityPub, &c.FundingOutpoint, c.ChainHash,
- )
- if err != nil {
- return err
- }
-
- logBucket := chanBucket.NestedReadBucket(revocationLogBucket)
- if logBucket == nil {
- return ErrNoPastDeltas.Default()
- }
-
- // Once we have the bucket that stores the revocation log from
- // this channel, we'll jump to the _last_ key in bucket. As we
- // store the update number on disk in a big-endian format,
- // this will retrieve the latest entry.
- cursor := logBucket.ReadCursor()
- _, tailLogEntry := cursor.Last()
- logEntryReader := bytes.NewReader(tailLogEntry)
-
- // Once we have the entry, we'll decode it into the channel
- // delta pointer we created above.
- var dbErr er.R
- commit, dbErr = deserializeChanCommit(logEntryReader)
- if dbErr != nil {
- return dbErr
- }
-
- return nil
- }, func() {}); err != nil {
- return nil, err
- }
-
- return &commit, nil
-}
-
-// CommitmentHeight returns the current commitment height. The commitment
-// height represents the number of updates to the commitment state to date.
-// This value is always monotonically increasing. This method is provided in
-// order to allow multiple instances of a particular open channel to obtain a
-// consistent view of the number of channel updates to date.
-func (c *OpenChannel) CommitmentHeight() (uint64, er.R) {
- c.RLock()
- defer c.RUnlock()
-
- var height uint64
- err := kvdb.View(c.Db, func(tx kvdb.RTx) er.R {
- // Get the bucket dedicated to storing the metadata for open
- // channels.
- chanBucket, err := fetchChanBucket(
- tx, c.IdentityPub, &c.FundingOutpoint, c.ChainHash,
- )
- if err != nil {
- return err
- }
-
- commit, err := fetchChanCommitment(chanBucket, true)
- if err != nil {
- return err
- }
-
- height = commit.CommitHeight
- return nil
- }, func() {
- height = 0
- })
- if err != nil {
- return 0, err
- }
-
- return height, nil
-}
-
-// FindPreviousState scans through the append-only log in an attempt to recover
-// the previous channel state indicated by the update number. This method is
-// intended to be used for obtaining the relevant data needed to claim all
-// funds rightfully spendable in the case of an on-chain broadcast of the
-// commitment transaction.
-func (c *OpenChannel) FindPreviousState(updateNum uint64) (*ChannelCommitment, er.R) {
- c.RLock()
- defer c.RUnlock()
-
- var commit ChannelCommitment
- err := kvdb.View(c.Db, func(tx kvdb.RTx) er.R {
- chanBucket, err := fetchChanBucket(
- tx, c.IdentityPub, &c.FundingOutpoint, c.ChainHash,
- )
- if err != nil {
- return err
- }
-
- logBucket := chanBucket.NestedReadBucket(revocationLogBucket)
- if logBucket == nil {
- return ErrNoPastDeltas.Default()
- }
-
- c, err := fetchChannelLogEntry(logBucket, updateNum)
- if err != nil {
- return err
- }
-
- commit = c
- return nil
- }, func() {})
- if err != nil {
- return nil, err
- }
-
- return &commit, nil
-}
-
-// ClosureType is an enum like structure that details exactly _how_ a channel
-// was closed. Three closure types are currently possible: none, cooperative,
-// local force close, remote force close, and (remote) breach.
-type ClosureType uint8
-
-const (
- // CooperativeClose indicates that a channel has been closed
- // cooperatively. This means that both channel peers were online and
- // signed a new transaction paying out the settled balance of the
- // contract.
- CooperativeClose ClosureType = 0
-
- // LocalForceClose indicates that we have unilaterally broadcast our
- // current commitment state on-chain.
- LocalForceClose ClosureType = 1
-
- // RemoteForceClose indicates that the remote peer has unilaterally
- // broadcast their current commitment state on-chain.
- RemoteForceClose ClosureType = 4
-
- // BreachClose indicates that the remote peer attempted to broadcast a
- // prior _revoked_ channel state.
- BreachClose ClosureType = 2
-
- // FundingCanceled indicates that the channel never was fully opened
- // before it was marked as closed in the database. This can happen if
- // we or the remote fail at some point during the opening workflow, or
- // we timeout waiting for the funding transaction to be confirmed.
- FundingCanceled ClosureType = 3
-
- // Abandoned indicates that the channel state was removed without
- // any further actions. This is intended to clean up unusable
- // channels during development.
- Abandoned ClosureType = 5
-)
-
-// ChannelCloseSummary contains the final state of a channel at the point it
-// was closed. Once a channel is closed, all the information pertaining to that
-// channel within the openChannelBucket is deleted, and a compact summary is
-// put in place instead.
-type ChannelCloseSummary struct {
- // ChanPoint is the outpoint for this channel's funding transaction,
- // and is used as a unique identifier for the channel.
- ChanPoint wire.OutPoint
-
- // ShortChanID encodes the exact location in the chain in which the
- // channel was initially confirmed. This includes: the block height,
- // transaction index, and the output within the target transaction.
- ShortChanID lnwire.ShortChannelID
-
- // ChainHash is the hash of the genesis block that this channel resides
- // within.
- ChainHash chainhash.Hash
-
- // ClosingTXID is the txid of the transaction which ultimately closed
- // this channel.
- ClosingTXID chainhash.Hash
-
- // RemotePub is the public key of the remote peer that we formerly had
- // a channel with.
- RemotePub *btcec.PublicKey
-
- // Capacity was the total capacity of the channel.
- Capacity btcutil.Amount
-
- // CloseHeight is the height at which the funding transaction was
- // spent.
- CloseHeight uint32
-
- // SettledBalance is our total balance settled balance at the time of
- // channel closure. This _does not_ include the sum of any outputs that
- // have been time-locked as a result of the unilateral channel closure.
- SettledBalance btcutil.Amount
-
- // TimeLockedBalance is the sum of all the time-locked outputs at the
- // time of channel closure. If we triggered the force closure of this
- // channel, then this value will be non-zero if our settled output is
- // above the dust limit. If we were on the receiving side of a channel
- // force closure, then this value will be non-zero if we had any
- // outstanding outgoing HTLC's at the time of channel closure.
- TimeLockedBalance btcutil.Amount
-
- // CloseType details exactly _how_ the channel was closed. Five closure
- // types are possible: cooperative, local force, remote force, breach
- // and funding canceled.
- CloseType ClosureType
-
- // IsPending indicates whether this channel is in the 'pending close'
- // state, which means the channel closing transaction has been
- // confirmed, but not yet been fully resolved. In the case of a channel
- // that has been cooperatively closed, it will go straight into the
- // fully resolved state as soon as the closing transaction has been
- // confirmed. However, for channels that have been force closed, they'll
- // stay marked as "pending" until _all_ the pending funds have been
- // swept.
- IsPending bool
-
- // RemoteCurrentRevocation is the current revocation for their
- // commitment transaction. However, since this is the derived public key,
- // we don't yet have the private key so we aren't yet able to verify
- // that it's actually in the hash chain.
- RemoteCurrentRevocation *btcec.PublicKey
-
- // RemoteNextRevocation is the revocation key to be used for the *next*
- // commitment transaction we create for the local node. Within the
- // specification, this value is referred to as the
- // per-commitment-point.
- RemoteNextRevocation *btcec.PublicKey
-
- // LocalChanCfg is the channel configuration for the local node.
- LocalChanConfig ChannelConfig
-
- // LastChanSyncMsg is the ChannelReestablish message for this channel
- // for the state at the point where it was closed.
- LastChanSyncMsg *lnwire.ChannelReestablish
-}
-
-// CloseChannel closes a previously active Lightning channel. Closing a channel
-// entails deleting all saved state within the database concerning this
-// channel. This method also takes a struct that summarizes the state of the
-// channel at closing, this compact representation will be the only component
-// of a channel left over after a full closing. It takes an optional set of
-// channel statuses which will be written to the historical channel bucket.
-// These statuses are used to record close initiators.
-func (c *OpenChannel) CloseChannel(summary *ChannelCloseSummary,
- statuses ...ChannelStatus) er.R {
-
- c.Lock()
- defer c.Unlock()
-
- return kvdb.Update(c.Db, func(tx kvdb.RwTx) er.R {
- openChanBucket := tx.ReadWriteBucket(openChannelBucket)
- if openChanBucket == nil {
- return ErrNoChanDBExists.Default()
- }
-
- nodePub := c.IdentityPub.SerializeCompressed()
- nodeChanBucket := openChanBucket.NestedReadWriteBucket(nodePub)
- if nodeChanBucket == nil {
- return ErrNoActiveChannels.Default()
- }
-
- chainBucket := nodeChanBucket.NestedReadWriteBucket(c.ChainHash[:])
- if chainBucket == nil {
- return ErrNoActiveChannels.Default()
- }
-
- var chanPointBuf bytes.Buffer
- err := writeOutpoint(&chanPointBuf, &c.FundingOutpoint)
- if err != nil {
- return err
- }
- chanKey := chanPointBuf.Bytes()
- chanBucket := chainBucket.NestedReadWriteBucket(
- chanKey,
- )
- if chanBucket == nil {
- return ErrNoActiveChannels.Default()
- }
-
- // Before we delete the channel state, we'll read out the full
- // details, as we'll also store portions of this information
- // for record keeping.
- chanState, err := fetchOpenChannel(
- chanBucket, &c.FundingOutpoint,
- )
- if err != nil {
- return err
- }
-
- // Now that the index to this channel has been deleted, purge
- // the remaining channel metadata from the database.
- err = deleteOpenChannel(chanBucket)
- if err != nil {
- return err
- }
-
- // We'll also remove the channel from the frozen channel bucket
- // if we need to.
- if c.ChanType.IsFrozen() {
- err := deleteThawHeight(chanBucket)
- if err != nil {
- return err
- }
- }
-
- // With the base channel data deleted, attempt to delete the
- // information stored within the revocation log.
- logBucket := chanBucket.NestedReadWriteBucket(revocationLogBucket)
- if logBucket != nil {
- err = chanBucket.DeleteNestedBucket(revocationLogBucket)
- if err != nil {
- return err
- }
- }
-
- err = chainBucket.DeleteNestedBucket(chanPointBuf.Bytes())
- if err != nil {
- return err
- }
-
- // Add channel state to the historical channel bucket.
- historicalBucket, err := tx.CreateTopLevelBucket(
- historicalChannelBucket,
- )
- if err != nil {
- return err
- }
-
- historicalChanBucket, err :=
- historicalBucket.CreateBucketIfNotExists(chanKey)
- if err != nil {
- return err
- }
-
- // Apply any additional statuses to the channel state.
- for _, status := range statuses {
- chanState.chanStatus |= status
- }
-
- err = putOpenChannel(historicalChanBucket, chanState)
- if err != nil {
- return err
- }
-
- // Finally, create a summary of this channel in the closed
- // channel bucket for this node.
- return putChannelCloseSummary(
- tx, chanPointBuf.Bytes(), summary, chanState,
- )
- }, func() {})
-}
-
-// ChannelSnapshot is a frozen snapshot of the current channel state. A
-// snapshot is detached from the original channel that generated it, providing
-// read-only access to the current or prior state of an active channel.
-//
-// TODO(roasbeef): remove all together? pretty much just commitment
-type ChannelSnapshot struct {
- // RemoteIdentity is the identity public key of the remote node that we
- // are maintaining the open channel with.
- RemoteIdentity btcec.PublicKey
-
- // ChanPoint is the outpoint that created the channel. This output is
- // found within the funding transaction and uniquely identified the
- // channel on the resident chain.
- ChannelPoint wire.OutPoint
-
- // ChainHash is the genesis hash of the chain that the channel resides
- // within.
- ChainHash chainhash.Hash
-
- // Capacity is the total capacity of the channel.
- Capacity btcutil.Amount
-
- // TotalMSatSent is the total number of milli-satoshis we've sent
- // within this channel.
- TotalMSatSent lnwire.MilliSatoshi
-
- // TotalMSatReceived is the total number of milli-satoshis we've
- // received within this channel.
- TotalMSatReceived lnwire.MilliSatoshi
-
- // ChannelCommitment is the current up-to-date commitment for the
- // target channel.
- ChannelCommitment
-}
-
-// Snapshot returns a read-only snapshot of the current channel state. This
-// snapshot includes information concerning the current settled balance within
-// the channel, metadata detailing total flows, and any outstanding HTLCs.
-func (c *OpenChannel) Snapshot() *ChannelSnapshot {
- c.RLock()
- defer c.RUnlock()
-
- localCommit := c.LocalCommitment
- snapshot := &ChannelSnapshot{
- RemoteIdentity: *c.IdentityPub,
- ChannelPoint: c.FundingOutpoint,
- Capacity: c.Capacity,
- TotalMSatSent: c.TotalMSatSent,
- TotalMSatReceived: c.TotalMSatReceived,
- ChainHash: c.ChainHash,
- ChannelCommitment: ChannelCommitment{
- LocalBalance: localCommit.LocalBalance,
- RemoteBalance: localCommit.RemoteBalance,
- CommitHeight: localCommit.CommitHeight,
- CommitFee: localCommit.CommitFee,
- },
- }
-
- // Copy over the current set of HTLCs to ensure the caller can't mutate
- // our internal state.
- snapshot.Htlcs = make([]HTLC, len(localCommit.Htlcs))
- for i, h := range localCommit.Htlcs {
- snapshot.Htlcs[i] = h.Copy()
- }
-
- return snapshot
-}
-
-// LatestCommitments returns the two latest commitments for both the local and
-// remote party. These commitments are read from disk to ensure that only the
-// latest fully committed state is returned. The first commitment returned is
-// the local commitment, and the second returned is the remote commitment.
-func (c *OpenChannel) LatestCommitments() (*ChannelCommitment, *ChannelCommitment, er.R) {
- err := kvdb.View(c.Db, func(tx kvdb.RTx) er.R {
- chanBucket, err := fetchChanBucket(
- tx, c.IdentityPub, &c.FundingOutpoint, c.ChainHash,
- )
- if err != nil {
- return err
- }
-
- return fetchChanCommitments(chanBucket, c)
- }, func() {})
- if err != nil {
- return nil, nil, err
- }
-
- return &c.LocalCommitment, &c.RemoteCommitment, nil
-}
-
-// RemoteRevocationStore returns the most up to date commitment version of the
-// revocation storage tree for the remote party. This method can be used when
-// acting on a possible contract breach to ensure, that the caller has the most
-// up to date information required to deliver justice.
-func (c *OpenChannel) RemoteRevocationStore() (shachain.Store, er.R) {
- err := kvdb.View(c.Db, func(tx kvdb.RTx) er.R {
- chanBucket, err := fetchChanBucket(
- tx, c.IdentityPub, &c.FundingOutpoint, c.ChainHash,
- )
- if err != nil {
- return err
- }
-
- return fetchChanRevocationState(chanBucket, c)
- }, func() {})
- if err != nil {
- return nil, err
- }
-
- return c.RevocationStore, nil
-}
-
-// AbsoluteThawHeight determines a frozen channel's absolute thaw height. If the
-// channel is not frozen, then 0 is returned.
-func (c *OpenChannel) AbsoluteThawHeight() (uint32, er.R) {
- // Only frozen channels have a thaw height.
- if !c.ChanType.IsFrozen() {
- return 0, nil
- }
-
- // If the channel's thaw height is below the absolute threshold, then
- // it's interpreted as a relative height to the chain's current height.
- if c.ThawHeight < AbsoluteThawHeightThreshold {
- // We'll only known of the channel's short ID once it's
- // confirmed.
- if c.IsPending {
- return 0, er.New("cannot use relative thaw " +
- "height for unconfirmed channel")
- }
- return c.ShortChannelID.BlockHeight + c.ThawHeight, nil
- }
- return c.ThawHeight, nil
-}
-
-func putChannelCloseSummary(tx kvdb.RwTx, chanID []byte,
- summary *ChannelCloseSummary, lastChanState *OpenChannel) er.R {
-
- closedChanBucket, err := tx.CreateTopLevelBucket(closedChannelBucket)
- if err != nil {
- return err
- }
-
- summary.RemoteCurrentRevocation = lastChanState.RemoteCurrentRevocation
- summary.RemoteNextRevocation = lastChanState.RemoteNextRevocation
- summary.LocalChanConfig = lastChanState.LocalChanCfg
-
- var b bytes.Buffer
- if err := serializeChannelCloseSummary(&b, summary); err != nil {
- return err
- }
-
- return closedChanBucket.Put(chanID, b.Bytes())
-}
-
-func serializeChannelCloseSummary(w io.Writer, cs *ChannelCloseSummary) er.R {
- err := WriteElements(w,
- cs.ChanPoint, cs.ShortChanID, cs.ChainHash, cs.ClosingTXID,
- cs.CloseHeight, cs.RemotePub, cs.Capacity, cs.SettledBalance,
- cs.TimeLockedBalance, cs.CloseType, cs.IsPending,
- )
- if err != nil {
- return err
- }
-
- // If this is a close channel summary created before the addition of
- // the new fields, then we can exit here.
- if cs.RemoteCurrentRevocation == nil {
- return WriteElements(w, false)
- }
-
- // If fields are present, write boolean to indicate this, and continue.
- if err := WriteElements(w, true); err != nil {
- return err
- }
-
- if err := WriteElements(w, cs.RemoteCurrentRevocation); err != nil {
- return err
- }
-
- if err := writeChanConfig(w, &cs.LocalChanConfig); err != nil {
- return err
- }
-
- // The RemoteNextRevocation field is optional, as it's possible for a
- // channel to be closed before we learn of the next unrevoked
- // revocation point for the remote party. Write a boolen indicating
- // whether this field is present or not.
- if err := WriteElements(w, cs.RemoteNextRevocation != nil); err != nil {
- return err
- }
-
- // Write the field, if present.
- if cs.RemoteNextRevocation != nil {
- if err = WriteElements(w, cs.RemoteNextRevocation); err != nil {
- return err
- }
- }
-
- // Write whether the channel sync message is present.
- if err := WriteElements(w, cs.LastChanSyncMsg != nil); err != nil {
- return err
- }
-
- // Write the channel sync message, if present.
- if cs.LastChanSyncMsg != nil {
- if err := WriteElements(w, cs.LastChanSyncMsg); err != nil {
- return err
- }
- }
-
- return nil
-}
-
-func deserializeCloseChannelSummary(r io.Reader) (*ChannelCloseSummary, er.R) {
- c := &ChannelCloseSummary{}
-
- err := ReadElements(r,
- &c.ChanPoint, &c.ShortChanID, &c.ChainHash, &c.ClosingTXID,
- &c.CloseHeight, &c.RemotePub, &c.Capacity, &c.SettledBalance,
- &c.TimeLockedBalance, &c.CloseType, &c.IsPending,
- )
- if err != nil {
- return nil, err
- }
-
- // We'll now check to see if the channel close summary was encoded with
- // any of the additional optional fields.
- var hasNewFields bool
- err = ReadElements(r, &hasNewFields)
- if err != nil {
- return nil, err
- }
-
- // If fields are not present, we can return.
- if !hasNewFields {
- return c, nil
- }
-
- // Otherwise read the new fields.
- if err := ReadElements(r, &c.RemoteCurrentRevocation); err != nil {
- return nil, err
- }
-
- if err := readChanConfig(r, &c.LocalChanConfig); err != nil {
- return nil, err
- }
-
- // Finally, we'll attempt to read the next unrevoked commitment point
- // for the remote party. If we closed the channel before receiving a
- // funding locked message then this might not be present. A boolean
- // indicating whether the field is present will come first.
- var hasRemoteNextRevocation bool
- err = ReadElements(r, &hasRemoteNextRevocation)
- if err != nil {
- return nil, err
- }
-
- // If this field was written, read it.
- if hasRemoteNextRevocation {
- err = ReadElements(r, &c.RemoteNextRevocation)
- if err != nil {
- return nil, err
- }
- }
-
- // Check if we have a channel sync message to read.
- var hasChanSyncMsg bool
- err = ReadElements(r, &hasChanSyncMsg)
- if er.Wrapped(err) == io.EOF {
- return c, nil
- } else if err != nil {
- return nil, err
- }
-
- // If a chan sync message is present, read it.
- if hasChanSyncMsg {
- // We must pass in reference to a lnwire.Message for the codec
- // to support it.
- var msg lnwire.Message
- if err := ReadElements(r, &msg); err != nil {
- return nil, err
- }
-
- chanSync, ok := msg.(*lnwire.ChannelReestablish)
- if !ok {
- return nil, er.New("unable cast db Message to " +
- "ChannelReestablish")
- }
- c.LastChanSyncMsg = chanSync
- }
-
- return c, nil
-}
-
-func writeChanConfig(b io.Writer, c *ChannelConfig) er.R {
- return WriteElements(b,
- c.DustLimit, c.MaxPendingAmount, c.ChanReserve, c.MinHTLC,
- c.MaxAcceptedHtlcs, c.CsvDelay, c.MultiSigKey,
- c.RevocationBasePoint, c.PaymentBasePoint, c.DelayBasePoint,
- c.HtlcBasePoint,
- )
-}
-
-// fundingTxPresent returns true if expect the funding transcation to be found
-// on disk or already populated within the passed oen chanel struct.
-func fundingTxPresent(channel *OpenChannel) bool {
- chanType := channel.ChanType
-
- return chanType.IsSingleFunder() && chanType.HasFundingTx() &&
- channel.IsInitiator &&
- !channel.hasChanStatus(ChanStatusRestored)
-}
-
-func putChanInfo(chanBucket kvdb.RwBucket, channel *OpenChannel) er.R {
- var w bytes.Buffer
- if err := WriteElements(&w,
- channel.ChanType, channel.ChainHash, channel.FundingOutpoint,
- channel.ShortChannelID, channel.IsPending, channel.IsInitiator,
- channel.chanStatus, channel.FundingBroadcastHeight,
- channel.NumConfsRequired, channel.ChannelFlags,
- channel.IdentityPub, channel.Capacity, channel.TotalMSatSent,
- channel.TotalMSatReceived,
- ); err != nil {
- return err
- }
-
- // For single funder channels that we initiated, and we have the
- // funding transaction, then write the funding txn.
- if fundingTxPresent(channel) {
- if err := WriteElement(&w, channel.FundingTxn); err != nil {
- return err
- }
- }
-
- if err := writeChanConfig(&w, &channel.LocalChanCfg); err != nil {
- return err
- }
- if err := writeChanConfig(&w, &channel.RemoteChanCfg); err != nil {
- return err
- }
-
- if err := chanBucket.Put(chanInfoKey, w.Bytes()); err != nil {
- return err
- }
-
- // Finally, add optional shutdown scripts for the local and remote peer if
- // they are present.
- if err := putOptionalUpfrontShutdownScript(
- chanBucket, localUpfrontShutdownKey, channel.LocalShutdownScript,
- ); err != nil {
- return err
- }
-
- return putOptionalUpfrontShutdownScript(
- chanBucket, remoteUpfrontShutdownKey, channel.RemoteShutdownScript,
- )
-}
-
-// putOptionalUpfrontShutdownScript adds a shutdown script under the key
-// provided if it has a non-zero length.
-func putOptionalUpfrontShutdownScript(chanBucket kvdb.RwBucket, key []byte,
- script []byte) er.R {
- // If the script is empty, we do not need to add anything.
- if len(script) == 0 {
- return nil
- }
-
- var w bytes.Buffer
- if err := WriteElement(&w, script); err != nil {
- return err
- }
-
- return chanBucket.Put(key, w.Bytes())
-}
-
-// getOptionalUpfrontShutdownScript reads the shutdown script stored under the
-// key provided if it is present. Upfront shutdown scripts are optional, so the
-// function returns with no error if the key is not present.
-func getOptionalUpfrontShutdownScript(chanBucket kvdb.RBucket, key []byte,
- script *lnwire.DeliveryAddress) er.R {
-
- // Return early if the bucket does not exit, a shutdown script was not set.
- bs := chanBucket.Get(key)
- if bs == nil {
- return nil
- }
-
- var tempScript []byte
- r := bytes.NewReader(bs)
- if err := ReadElement(r, &tempScript); err != nil {
- return err
- }
- *script = tempScript
-
- return nil
-}
-
-func serializeChanCommit(w io.Writer, c *ChannelCommitment) er.R {
- if err := WriteElements(w,
- c.CommitHeight, c.LocalLogIndex, c.LocalHtlcIndex,
- c.RemoteLogIndex, c.RemoteHtlcIndex, c.LocalBalance,
- c.RemoteBalance, c.CommitFee, c.FeePerKw, c.CommitTx,
- c.CommitSig,
- ); err != nil {
- return err
- }
-
- return SerializeHtlcs(w, c.Htlcs...)
-}
-
-func putChanCommitment(chanBucket kvdb.RwBucket, c *ChannelCommitment,
- local bool) er.R {
-
- var commitKey []byte
- if local {
- commitKey = append(chanCommitmentKey, byte(0x00))
- } else {
- commitKey = append(chanCommitmentKey, byte(0x01))
- }
-
- var b bytes.Buffer
- if err := serializeChanCommit(&b, c); err != nil {
- return err
- }
-
- return chanBucket.Put(commitKey, b.Bytes())
-}
-
-func putChanCommitments(chanBucket kvdb.RwBucket, channel *OpenChannel) er.R {
- // If this is a restored channel, then we don't have any commitments to
- // write.
- if channel.hasChanStatus(ChanStatusRestored) {
- return nil
- }
-
- err := putChanCommitment(
- chanBucket, &channel.LocalCommitment, true,
- )
- if err != nil {
- return err
- }
-
- return putChanCommitment(
- chanBucket, &channel.RemoteCommitment, false,
- )
-}
-
-func putChanRevocationState(chanBucket kvdb.RwBucket, channel *OpenChannel) er.R {
-
- var b bytes.Buffer
- err := WriteElements(
- &b, channel.RemoteCurrentRevocation, channel.RevocationProducer,
- channel.RevocationStore,
- )
- if err != nil {
- return err
- }
-
- // TODO(roasbeef): don't keep producer on disk
-
- // If the next revocation is present, which is only the case after the
- // FundingLocked message has been sent, then we'll write it to disk.
- if channel.RemoteNextRevocation != nil {
- err = WriteElements(&b, channel.RemoteNextRevocation)
- if err != nil {
- return err
- }
- }
-
- return chanBucket.Put(revocationStateKey, b.Bytes())
-}
-
-func readChanConfig(b io.Reader, c *ChannelConfig) er.R {
- return ReadElements(b,
- &c.DustLimit, &c.MaxPendingAmount, &c.ChanReserve,
- &c.MinHTLC, &c.MaxAcceptedHtlcs, &c.CsvDelay,
- &c.MultiSigKey, &c.RevocationBasePoint,
- &c.PaymentBasePoint, &c.DelayBasePoint,
- &c.HtlcBasePoint,
- )
-}
-
-func fetchChanInfo(chanBucket kvdb.RBucket, channel *OpenChannel) er.R {
- infoBytes := chanBucket.Get(chanInfoKey)
- if infoBytes == nil {
- return ErrNoChanInfoFound.Default()
- }
- r := bytes.NewReader(infoBytes)
-
- if err := ReadElements(r,
- &channel.ChanType, &channel.ChainHash, &channel.FundingOutpoint,
- &channel.ShortChannelID, &channel.IsPending, &channel.IsInitiator,
- &channel.chanStatus, &channel.FundingBroadcastHeight,
- &channel.NumConfsRequired, &channel.ChannelFlags,
- &channel.IdentityPub, &channel.Capacity, &channel.TotalMSatSent,
- &channel.TotalMSatReceived,
- ); err != nil {
- return err
- }
-
- // For single funder channels that we initiated and have the funding
- // transaction to, read the funding txn.
- if fundingTxPresent(channel) {
- if err := ReadElement(r, &channel.FundingTxn); err != nil {
- return err
- }
- }
-
- if err := readChanConfig(r, &channel.LocalChanCfg); err != nil {
- return err
- }
- if err := readChanConfig(r, &channel.RemoteChanCfg); err != nil {
- return err
- }
-
- channel.Packager = NewChannelPackager(channel.ShortChannelID)
-
- // Finally, read the optional shutdown scripts.
- if err := getOptionalUpfrontShutdownScript(
- chanBucket, localUpfrontShutdownKey, &channel.LocalShutdownScript,
- ); err != nil {
- return err
- }
-
- return getOptionalUpfrontShutdownScript(
- chanBucket, remoteUpfrontShutdownKey, &channel.RemoteShutdownScript,
- )
-}
-
-func deserializeChanCommit(r io.Reader) (ChannelCommitment, er.R) {
- var c ChannelCommitment
-
- err := ReadElements(r,
- &c.CommitHeight, &c.LocalLogIndex, &c.LocalHtlcIndex, &c.RemoteLogIndex,
- &c.RemoteHtlcIndex, &c.LocalBalance, &c.RemoteBalance,
- &c.CommitFee, &c.FeePerKw, &c.CommitTx, &c.CommitSig,
- )
- if err != nil {
- return c, err
- }
-
- c.Htlcs, err = DeserializeHtlcs(r)
- if err != nil {
- return c, err
- }
-
- return c, nil
-}
-
-func fetchChanCommitment(chanBucket kvdb.RBucket, local bool) (ChannelCommitment, er.R) {
- var commitKey []byte
- if local {
- commitKey = append(chanCommitmentKey, byte(0x00))
- } else {
- commitKey = append(chanCommitmentKey, byte(0x01))
- }
-
- commitBytes := chanBucket.Get(commitKey)
- if commitBytes == nil {
- return ChannelCommitment{}, ErrNoCommitmentsFound.Default()
- }
-
- r := bytes.NewReader(commitBytes)
- return deserializeChanCommit(r)
-}
-
-func fetchChanCommitments(chanBucket kvdb.RBucket, channel *OpenChannel) er.R {
- var err er.R
-
- // If this is a restored channel, then we don't have any commitments to
- // read.
- if channel.hasChanStatus(ChanStatusRestored) {
- return nil
- }
-
- channel.LocalCommitment, err = fetchChanCommitment(chanBucket, true)
- if err != nil {
- return err
- }
- channel.RemoteCommitment, err = fetchChanCommitment(chanBucket, false)
- if err != nil {
- return err
- }
-
- return nil
-}
-
-func fetchChanRevocationState(chanBucket kvdb.RBucket, channel *OpenChannel) er.R {
- revBytes := chanBucket.Get(revocationStateKey)
- if revBytes == nil {
- return ErrNoRevocationsFound.Default()
- }
- r := bytes.NewReader(revBytes)
-
- err := ReadElements(
- r, &channel.RemoteCurrentRevocation, &channel.RevocationProducer,
- &channel.RevocationStore,
- )
- if err != nil {
- return err
- }
-
- // If there aren't any bytes left in the buffer, then we don't yet have
- // the next remote revocation, so we can exit early here.
- if r.Len() == 0 {
- return nil
- }
-
- // Otherwise we'll read the next revocation for the remote party which
- // is always the last item within the buffer.
- return ReadElements(r, &channel.RemoteNextRevocation)
-}
-
-func deleteOpenChannel(chanBucket kvdb.RwBucket) er.R {
-
- if err := chanBucket.Delete(chanInfoKey); err != nil {
- return err
- }
-
- err := chanBucket.Delete(append(chanCommitmentKey, byte(0x00)))
- if err != nil {
- return err
- }
- err = chanBucket.Delete(append(chanCommitmentKey, byte(0x01)))
- if err != nil {
- return err
- }
-
- if err := chanBucket.Delete(revocationStateKey); err != nil {
- return err
- }
-
- if diff := chanBucket.Get(commitDiffKey); diff != nil {
- return chanBucket.Delete(commitDiffKey)
- }
-
- return nil
-
-}
-
-// makeLogKey converts a uint64 into an 8 byte array.
-func makeLogKey(updateNum uint64) [8]byte {
- var key [8]byte
- byteOrder.PutUint64(key[:], updateNum)
- return key
-}
-
-func appendChannelLogEntry(log kvdb.RwBucket,
- commit *ChannelCommitment) er.R {
-
- var b bytes.Buffer
- if err := serializeChanCommit(&b, commit); err != nil {
- return err
- }
-
- logEntrykey := makeLogKey(commit.CommitHeight)
- return log.Put(logEntrykey[:], b.Bytes())
-}
-
-func fetchChannelLogEntry(log kvdb.RBucket,
- updateNum uint64) (ChannelCommitment, er.R) {
-
- logEntrykey := makeLogKey(updateNum)
- commitBytes := log.Get(logEntrykey[:])
- if commitBytes == nil {
- return ChannelCommitment{}, errLogEntryNotFound.Default()
- }
-
- commitReader := bytes.NewReader(commitBytes)
- return deserializeChanCommit(commitReader)
-}
-
-func fetchThawHeight(chanBucket kvdb.RBucket) (uint32, er.R) {
- var height uint32
-
- heightBytes := chanBucket.Get(frozenChanKey)
- heightReader := bytes.NewReader(heightBytes)
-
- if err := ReadElements(heightReader, &height); err != nil {
- return 0, err
- }
-
- return height, nil
-}
-
-func storeThawHeight(chanBucket kvdb.RwBucket, height uint32) er.R {
- var heightBuf bytes.Buffer
- if err := WriteElements(&heightBuf, height); err != nil {
- return err
- }
-
- return chanBucket.Put(frozenChanKey, heightBuf.Bytes())
-}
-
-func deleteThawHeight(chanBucket kvdb.RwBucket) er.R {
- return chanBucket.Delete(frozenChanKey)
-}
diff --git a/lnd/channeldb/channel_cache.go b/lnd/channeldb/channel_cache.go
deleted file mode 100644
index 2f26c185..00000000
--- a/lnd/channeldb/channel_cache.go
+++ /dev/null
@@ -1,50 +0,0 @@
-package channeldb
-
-// channelCache is an in-memory cache used to improve the performance of
-// ChanUpdatesInHorizon. It caches the chan info and edge policies for a
-// particular channel.
-type channelCache struct {
- n int
- channels map[uint64]ChannelEdge
-}
-
-// newChannelCache creates a new channelCache with maximum capacity of n
-// channels.
-func newChannelCache(n int) *channelCache {
- return &channelCache{
- n: n,
- channels: make(map[uint64]ChannelEdge),
- }
-}
-
-// get returns the channel from the cache, if it exists.
-func (c *channelCache) get(chanid uint64) (ChannelEdge, bool) {
- channel, ok := c.channels[chanid]
- return channel, ok
-}
-
-// insert adds the entry to the channel cache. If an entry for chanid already
-// exists, it will be replaced with the new entry. If the entry doesn't exist,
-// it will be inserted to the cache, performing a random eviction if the cache
-// is at capacity.
-func (c *channelCache) insert(chanid uint64, channel ChannelEdge) {
- // If entry exists, replace it.
- if _, ok := c.channels[chanid]; ok {
- c.channels[chanid] = channel
- return
- }
-
- // Otherwise, evict an entry at random and insert.
- if len(c.channels) == c.n {
- for id := range c.channels {
- delete(c.channels, id)
- break
- }
- }
- c.channels[chanid] = channel
-}
-
-// remove deletes an edge for chanid from the cache, if it exists.
-func (c *channelCache) remove(chanid uint64) {
- delete(c.channels, chanid)
-}
diff --git a/lnd/channeldb/channel_cache_test.go b/lnd/channeldb/channel_cache_test.go
deleted file mode 100644
index d776c131..00000000
--- a/lnd/channeldb/channel_cache_test.go
+++ /dev/null
@@ -1,105 +0,0 @@
-package channeldb
-
-import (
- "reflect"
- "testing"
-)
-
-// TestChannelCache checks the behavior of the channelCache with respect to
-// insertion, eviction, and removal of cache entries.
-func TestChannelCache(t *testing.T) {
- const cacheSize = 100
-
- // Create a new channel cache with the configured max size.
- c := newChannelCache(cacheSize)
-
- // As a sanity check, assert that querying the empty cache does not
- // return an entry.
- _, ok := c.get(0)
- if ok {
- t.Fatalf("channel cache should be empty")
- }
-
- // Now, fill up the cache entirely.
- for i := uint64(0); i < cacheSize; i++ {
- c.insert(i, channelForInt(i))
- }
-
- // Assert that the cache has all of the entries just inserted, since no
- // eviction should occur until we try to surpass the max size.
- assertHasChanEntries(t, c, 0, cacheSize)
-
- // Now, insert a new element that causes the cache to evict an element.
- c.insert(cacheSize, channelForInt(cacheSize))
-
- // Assert that the cache has this last entry, as the cache should evict
- // some prior element and not the newly inserted one.
- assertHasChanEntries(t, c, cacheSize, cacheSize)
-
- // Iterate over all inserted elements and construct a set of the evicted
- // elements.
- evicted := make(map[uint64]struct{})
- for i := uint64(0); i < cacheSize+1; i++ {
- _, ok := c.get(i)
- if !ok {
- evicted[i] = struct{}{}
- }
- }
-
- // Assert that exactly one element has been evicted.
- numEvicted := len(evicted)
- if numEvicted != 1 {
- t.Fatalf("expected one evicted entry, got: %d", numEvicted)
- }
-
- // Remove the highest item which initially caused the eviction and
- // reinsert the element that was evicted prior.
- c.remove(cacheSize)
- for i := range evicted {
- c.insert(i, channelForInt(i))
- }
-
- // Since the removal created an extra slot, the last insertion should
- // not have caused an eviction and the entries for all channels in the
- // original set that filled the cache should be present.
- assertHasChanEntries(t, c, 0, cacheSize)
-
- // Finally, reinsert the existing set back into the cache and test that
- // the cache still has all the entries. If the randomized eviction were
- // happening on inserts for existing cache items, we expect this to fail
- // with high probability.
- for i := uint64(0); i < cacheSize; i++ {
- c.insert(i, channelForInt(i))
- }
- assertHasChanEntries(t, c, 0, cacheSize)
-
-}
-
-// assertHasEntries queries the edge cache for all channels in the range [start,
-// end), asserting that they exist and their value matches the entry produced by
-// entryForInt.
-func assertHasChanEntries(t *testing.T, c *channelCache, start, end uint64) {
- t.Helper()
-
- for i := start; i < end; i++ {
- entry, ok := c.get(i)
- if !ok {
- t.Fatalf("channel cache should contain chan %d", i)
- }
-
- expEntry := channelForInt(i)
- if !reflect.DeepEqual(entry, expEntry) {
- t.Fatalf("entry mismatch, want: %v, got: %v",
- expEntry, entry)
- }
- }
-}
-
-// channelForInt generates a unique ChannelEdge given an integer.
-func channelForInt(i uint64) ChannelEdge {
- return ChannelEdge{
- Info: &ChannelEdgeInfo{
- ChannelID: i,
- },
- }
-}
diff --git a/lnd/channeldb/channel_test.go b/lnd/channeldb/channel_test.go
deleted file mode 100644
index 00df46b9..00000000
--- a/lnd/channeldb/channel_test.go
+++ /dev/null
@@ -1,1617 +0,0 @@
-package channeldb
-
-import (
- "bytes"
- "math/rand"
- "net"
- "reflect"
- "runtime"
- "testing"
-
- "github.com/pkt-cash/pktd/btcutil/er"
- "github.com/pkt-cash/pktd/lnd/channeldb/kvdb"
-
- "github.com/davecgh/go-spew/spew"
- "github.com/pkt-cash/pktd/btcec"
- "github.com/pkt-cash/pktd/btcutil"
- "github.com/pkt-cash/pktd/chaincfg/chainhash"
- "github.com/pkt-cash/pktd/lnd/clock"
- "github.com/pkt-cash/pktd/lnd/keychain"
- "github.com/pkt-cash/pktd/lnd/lnwire"
- "github.com/pkt-cash/pktd/lnd/shachain"
- _ "github.com/pkt-cash/pktd/pktwallet/walletdb/bdb"
- "github.com/pkt-cash/pktd/wire"
-)
-
-var (
- key = [chainhash.HashSize]byte{
- 0x81, 0xb6, 0x37, 0xd8, 0xfc, 0xd2, 0xc6, 0xda,
- 0x68, 0x59, 0xe6, 0x96, 0x31, 0x13, 0xa1, 0x17,
- 0xd, 0xe7, 0x93, 0xe4, 0xb7, 0x25, 0xb8, 0x4d,
- 0x1e, 0xb, 0x4c, 0xf9, 0x9e, 0xc5, 0x8c, 0xe9,
- }
- rev = [chainhash.HashSize]byte{
- 0x51, 0xb6, 0x37, 0xd8, 0xfc, 0xd2, 0xc6, 0xda,
- 0x48, 0x59, 0xe6, 0x96, 0x31, 0x13, 0xa1, 0x17,
- 0x2d, 0xe7, 0x93, 0xe4,
- }
- testTx = &wire.MsgTx{
- Version: 1,
- TxIn: []*wire.TxIn{
- {
- PreviousOutPoint: wire.OutPoint{
- Hash: chainhash.Hash{},
- Index: 0xffffffff,
- },
- SignatureScript: []byte{0x04, 0x31, 0xdc, 0x00, 0x1b, 0x01, 0x62},
- Sequence: 0xffffffff,
- },
- },
- TxOut: []*wire.TxOut{
- {
- Value: 5000000000,
- PkScript: []byte{
- 0x41, // OP_DATA_65
- 0x04, 0xd6, 0x4b, 0xdf, 0xd0, 0x9e, 0xb1, 0xc5,
- 0xfe, 0x29, 0x5a, 0xbd, 0xeb, 0x1d, 0xca, 0x42,
- 0x81, 0xbe, 0x98, 0x8e, 0x2d, 0xa0, 0xb6, 0xc1,
- 0xc6, 0xa5, 0x9d, 0xc2, 0x26, 0xc2, 0x86, 0x24,
- 0xe1, 0x81, 0x75, 0xe8, 0x51, 0xc9, 0x6b, 0x97,
- 0x3d, 0x81, 0xb0, 0x1c, 0xc3, 0x1f, 0x04, 0x78,
- 0x34, 0xbc, 0x06, 0xd6, 0xd6, 0xed, 0xf6, 0x20,
- 0xd1, 0x84, 0x24, 0x1a, 0x6a, 0xed, 0x8b, 0x63,
- 0xa6, // 65-byte signature
- 0xac, // OP_CHECKSIG
- },
- },
- },
- LockTime: 5,
- }
- privKey, pubKey = btcec.PrivKeyFromBytes(btcec.S256(), key[:])
-
- wireSig, _ = lnwire.NewSigFromSignature(testSig)
-
- testClock = clock.NewTestClock(testNow)
-
- // defaultPendingHeight is the default height at which we set
- // channels to pending.
- defaultPendingHeight = 100
-
- // defaultAddr is the default address that we mark test channels pending
- // with.
- defaultAddr = &net.TCPAddr{
- IP: net.ParseIP("127.0.0.1"),
- Port: 18555,
- }
-)
-
-// testChannelParams is a struct which details the specifics of how a channel
-// should be created.
-type testChannelParams struct {
- // channel is the channel that will be written to disk.
- channel *OpenChannel
-
- // addr is the address that the channel will be synced pending with.
- addr *net.TCPAddr
-
- // pendingHeight is the height that the channel should be recorded as
- // pending.
- pendingHeight uint32
-
- // openChannel is set to true if the channel should be fully marked as
- // open if this is false, the channel will be left in pending state.
- openChannel bool
-}
-
-// testChannelOption is a functional option which can be used to alter the
-// default channel that is creates for testing.
-type testChannelOption func(params *testChannelParams)
-
-// channelCommitmentOption is an option which allows overwriting of the default
-// commitment height and balances. The local boolean can be used to set these
-// balances on the local or remote commit.
-func channelCommitmentOption(height uint64, localBalance,
- remoteBalance lnwire.MilliSatoshi, local bool) testChannelOption {
-
- return func(params *testChannelParams) {
- if local {
- params.channel.LocalCommitment.CommitHeight = height
- params.channel.LocalCommitment.LocalBalance = localBalance
- params.channel.LocalCommitment.RemoteBalance = remoteBalance
- } else {
- params.channel.RemoteCommitment.CommitHeight = height
- params.channel.RemoteCommitment.LocalBalance = localBalance
- params.channel.RemoteCommitment.RemoteBalance = remoteBalance
- }
- }
-}
-
-// pendingHeightOption is an option which can be used to set the height the
-// channel is marked as pending at.
-func pendingHeightOption(height uint32) testChannelOption {
- return func(params *testChannelParams) {
- params.pendingHeight = height
- }
-}
-
-// openChannelOption is an option which can be used to create a test channel
-// that is open.
-func openChannelOption() testChannelOption {
- return func(params *testChannelParams) {
- params.openChannel = true
- }
-}
-
-// localHtlcsOption is an option which allows setting of htlcs on the local
-// commitment.
-func localHtlcsOption(htlcs []HTLC) testChannelOption {
- return func(params *testChannelParams) {
- params.channel.LocalCommitment.Htlcs = htlcs
- }
-}
-
-// remoteHtlcsOption is an option which allows setting of htlcs on the remote
-// commitment.
-func remoteHtlcsOption(htlcs []HTLC) testChannelOption {
- return func(params *testChannelParams) {
- params.channel.RemoteCommitment.Htlcs = htlcs
- }
-}
-
-// localShutdownOption is an option which sets the local upfront shutdown
-// script for the channel.
-func localShutdownOption(addr lnwire.DeliveryAddress) testChannelOption {
- return func(params *testChannelParams) {
- params.channel.LocalShutdownScript = addr
- }
-}
-
-// remoteShutdownOption is an option which sets the remote upfront shutdown
-// script for the channel.
-func remoteShutdownOption(addr lnwire.DeliveryAddress) testChannelOption {
- return func(params *testChannelParams) {
- params.channel.RemoteShutdownScript = addr
- }
-}
-
-// fundingPointOption is an option which sets the funding outpoint of the
-// channel.
-func fundingPointOption(chanPoint wire.OutPoint) testChannelOption {
- return func(params *testChannelParams) {
- params.channel.FundingOutpoint = chanPoint
- }
-}
-
-// channelIDOption is an option which sets the short channel ID of the channel.
-var channelIDOption = func(chanID lnwire.ShortChannelID) testChannelOption {
- return func(params *testChannelParams) {
- params.channel.ShortChannelID = chanID
- }
-}
-
-// createTestChannel writes a test channel to the database. It takes a set of
-// functional options which can be used to overwrite the default of creating
-// a pending channel that was broadcast at height 100.
-func createTestChannel(t *testing.T, cdb *DB,
- opts ...testChannelOption) *OpenChannel {
-
- // Create a default set of parameters.
- params := &testChannelParams{
- channel: createTestChannelState(t, cdb),
- addr: defaultAddr,
- openChannel: false,
- pendingHeight: uint32(defaultPendingHeight),
- }
-
- // Apply all functional options to the test channel params.
- for _, o := range opts {
- o(params)
- }
-
- // Mark the channel as pending.
- err := params.channel.SyncPending(params.addr, params.pendingHeight)
- if err != nil {
- t.Fatalf("unable to save and serialize channel "+
- "state: %v", err)
- }
-
- // If the parameters do not specify that we should open the channel
- // fully, we return the pending channel.
- if !params.openChannel {
- return params.channel
- }
-
- // Mark the channel as open with the short channel id provided.
- err = params.channel.MarkAsOpen(params.channel.ShortChannelID)
- if err != nil {
- t.Fatalf("unable to mark channel open: %v", err)
- }
-
- return params.channel
-}
-
-func createTestChannelState(t *testing.T, cdb *DB) *OpenChannel {
- // Simulate 1000 channel updates.
- producer, err := shachain.NewRevocationProducerFromBytes(key[:])
- if err != nil {
- t.Fatalf("could not get producer: %v", err)
- }
- store := shachain.NewRevocationStore()
- for i := 0; i < 1; i++ {
- preImage, err := producer.AtIndex(uint64(i))
- if err != nil {
- t.Fatalf("could not get "+
- "preimage: %v", err)
- }
-
- if err := store.AddNextEntry(preImage); err != nil {
- t.Fatalf("could not add entry: %v", err)
- }
- }
-
- localCfg := ChannelConfig{
- ChannelConstraints: ChannelConstraints{
- DustLimit: btcutil.Amount(rand.Int63()),
- MaxPendingAmount: lnwire.MilliSatoshi(rand.Int63()),
- ChanReserve: btcutil.Amount(rand.Int63()),
- MinHTLC: lnwire.MilliSatoshi(rand.Int63()),
- MaxAcceptedHtlcs: uint16(rand.Int31()),
- CsvDelay: uint16(rand.Int31()),
- },
- MultiSigKey: keychain.KeyDescriptor{
- PubKey: privKey.PubKey(),
- },
- RevocationBasePoint: keychain.KeyDescriptor{
- PubKey: privKey.PubKey(),
- },
- PaymentBasePoint: keychain.KeyDescriptor{
- PubKey: privKey.PubKey(),
- },
- DelayBasePoint: keychain.KeyDescriptor{
- PubKey: privKey.PubKey(),
- },
- HtlcBasePoint: keychain.KeyDescriptor{
- PubKey: privKey.PubKey(),
- },
- }
- remoteCfg := ChannelConfig{
- ChannelConstraints: ChannelConstraints{
- DustLimit: btcutil.Amount(rand.Int63()),
- MaxPendingAmount: lnwire.MilliSatoshi(rand.Int63()),
- ChanReserve: btcutil.Amount(rand.Int63()),
- MinHTLC: lnwire.MilliSatoshi(rand.Int63()),
- MaxAcceptedHtlcs: uint16(rand.Int31()),
- CsvDelay: uint16(rand.Int31()),
- },
- MultiSigKey: keychain.KeyDescriptor{
- PubKey: privKey.PubKey(),
- KeyLocator: keychain.KeyLocator{
- Family: keychain.KeyFamilyMultiSig,
- Index: 9,
- },
- },
- RevocationBasePoint: keychain.KeyDescriptor{
- PubKey: privKey.PubKey(),
- KeyLocator: keychain.KeyLocator{
- Family: keychain.KeyFamilyRevocationBase,
- Index: 8,
- },
- },
- PaymentBasePoint: keychain.KeyDescriptor{
- PubKey: privKey.PubKey(),
- KeyLocator: keychain.KeyLocator{
- Family: keychain.KeyFamilyPaymentBase,
- Index: 7,
- },
- },
- DelayBasePoint: keychain.KeyDescriptor{
- PubKey: privKey.PubKey(),
- KeyLocator: keychain.KeyLocator{
- Family: keychain.KeyFamilyDelayBase,
- Index: 6,
- },
- },
- HtlcBasePoint: keychain.KeyDescriptor{
- PubKey: privKey.PubKey(),
- KeyLocator: keychain.KeyLocator{
- Family: keychain.KeyFamilyHtlcBase,
- Index: 5,
- },
- },
- }
-
- chanID := lnwire.NewShortChanIDFromInt(uint64(rand.Int63()))
-
- return &OpenChannel{
- ChanType: SingleFunderBit | FrozenBit,
- ChainHash: key,
- FundingOutpoint: wire.OutPoint{Hash: key, Index: rand.Uint32()},
- ShortChannelID: chanID,
- IsInitiator: true,
- IsPending: true,
- IdentityPub: pubKey,
- Capacity: btcutil.Amount(10000),
- LocalChanCfg: localCfg,
- RemoteChanCfg: remoteCfg,
- TotalMSatSent: 8,
- TotalMSatReceived: 2,
- LocalCommitment: ChannelCommitment{
- CommitHeight: 0,
- LocalBalance: lnwire.MilliSatoshi(9000),
- RemoteBalance: lnwire.MilliSatoshi(3000),
- CommitFee: btcutil.Amount(rand.Int63()),
- FeePerKw: btcutil.Amount(5000),
- CommitTx: testTx,
- CommitSig: bytes.Repeat([]byte{1}, 71),
- },
- RemoteCommitment: ChannelCommitment{
- CommitHeight: 0,
- LocalBalance: lnwire.MilliSatoshi(3000),
- RemoteBalance: lnwire.MilliSatoshi(9000),
- CommitFee: btcutil.Amount(rand.Int63()),
- FeePerKw: btcutil.Amount(5000),
- CommitTx: testTx,
- CommitSig: bytes.Repeat([]byte{1}, 71),
- },
- NumConfsRequired: 4,
- RemoteCurrentRevocation: privKey.PubKey(),
- RemoteNextRevocation: privKey.PubKey(),
- RevocationProducer: producer,
- RevocationStore: store,
- Db: cdb,
- Packager: NewChannelPackager(chanID),
- FundingTxn: testTx,
- ThawHeight: uint32(defaultPendingHeight),
- }
-}
-
-func TestOpenChannelPutGetDelete(t *testing.T) {
- t.Parallel()
-
- cdb, cleanUp, err := MakeTestDB()
- if err != nil {
- t.Fatalf("unable to make test database: %v", err)
- }
- defer cleanUp()
-
- // Create the test channel state, with additional htlcs on the local
- // and remote commitment.
- localHtlcs := []HTLC{
- {Signature: testSig.Serialize(),
- Incoming: true,
- Amt: 10,
- RHash: key,
- RefundTimeout: 1,
- OnionBlob: []byte("onionblob"),
- },
- }
-
- remoteHtlcs := []HTLC{
- {
- Signature: testSig.Serialize(),
- Incoming: false,
- Amt: 10,
- RHash: key,
- RefundTimeout: 1,
- OnionBlob: []byte("onionblob"),
- },
- }
-
- state := createTestChannel(
- t, cdb,
- remoteHtlcsOption(remoteHtlcs),
- localHtlcsOption(localHtlcs),
- )
-
- openChannels, err := cdb.FetchOpenChannels(state.IdentityPub)
- if err != nil {
- t.Fatalf("unable to fetch open channel: %v", err)
- }
-
- newState := openChannels[0]
-
- // The decoded channel state should be identical to what we stored
- // above.
- if !reflect.DeepEqual(state, newState) {
- t.Fatalf("channel state doesn't match:: %v vs %v",
- spew.Sdump(state), spew.Sdump(newState))
- }
-
- // We'll also test that the channel is properly able to hot swap the
- // next revocation for the state machine. This tests the initial
- // post-funding revocation exchange.
- nextRevKey, err := btcec.NewPrivateKey(btcec.S256())
- if err != nil {
- t.Fatalf("unable to create new private key: %v", err)
- }
- if err := state.InsertNextRevocation(nextRevKey.PubKey()); err != nil {
- t.Fatalf("unable to update revocation: %v", err)
- }
-
- openChannels, err = cdb.FetchOpenChannels(state.IdentityPub)
- if err != nil {
- t.Fatalf("unable to fetch open channel: %v", err)
- }
- updatedChan := openChannels[0]
-
- // Ensure that the revocation was set properly.
- if !nextRevKey.PubKey().IsEqual(updatedChan.RemoteNextRevocation) {
- t.Fatalf("next revocation wasn't updated")
- }
-
- // Finally to wrap up the test, delete the state of the channel within
- // the database. This involves "closing" the channel which removes all
- // written state, and creates a small "summary" elsewhere within the
- // database.
- closeSummary := &ChannelCloseSummary{
- ChanPoint: state.FundingOutpoint,
- RemotePub: state.IdentityPub,
- SettledBalance: btcutil.Amount(500),
- TimeLockedBalance: btcutil.Amount(10000),
- IsPending: false,
- CloseType: CooperativeClose,
- }
- if err := state.CloseChannel(closeSummary); err != nil {
- t.Fatalf("unable to close channel: %v", err)
- }
-
- // As the channel is now closed, attempting to fetch all open channels
- // for our fake node ID should return an empty slice.
- openChans, err := cdb.FetchOpenChannels(state.IdentityPub)
- if err != nil {
- t.Fatalf("unable to fetch open channels: %v", err)
- }
- if len(openChans) != 0 {
- t.Fatalf("all channels not deleted, found %v", len(openChans))
- }
-
- // Additionally, attempting to fetch all the open channels globally
- // should yield no results.
- openChans, err = cdb.FetchAllChannels()
- if err != nil {
- t.Fatal("unable to fetch all open chans")
- }
- if len(openChans) != 0 {
- t.Fatalf("all channels not deleted, found %v", len(openChans))
- }
-}
-
-// TestOptionalShutdown tests the reading and writing of channels with and
-// without optional shutdown script fields.
-func TestOptionalShutdown(t *testing.T) {
- local := lnwire.DeliveryAddress([]byte("local shutdown script"))
- remote := lnwire.DeliveryAddress([]byte("remote shutdown script"))
-
- if _, err := rand.Read(remote); err != nil {
- t.Fatalf("Could not create random script: %v", err)
- }
-
- tests := []struct {
- name string
- localShutdown lnwire.DeliveryAddress
- remoteShutdown lnwire.DeliveryAddress
- }{
- {
- name: "no shutdown scripts",
- localShutdown: nil,
- remoteShutdown: nil,
- },
- {
- name: "local shutdown script",
- localShutdown: local,
- remoteShutdown: nil,
- },
- {
- name: "remote shutdown script",
- localShutdown: nil,
- remoteShutdown: remote,
- },
- {
- name: "both scripts set",
- localShutdown: local,
- remoteShutdown: remote,
- },
- }
-
- for _, test := range tests {
- test := test
-
- t.Run(test.name, func(t *testing.T) {
- cdb, cleanUp, err := MakeTestDB()
- if err != nil {
- t.Fatalf("unable to make test database: %v", err)
- }
- defer cleanUp()
-
- // Create a channel with upfront scripts set as
- // specified in the test.
- state := createTestChannel(
- t, cdb,
- localShutdownOption(test.localShutdown),
- remoteShutdownOption(test.remoteShutdown),
- )
-
- openChannels, err := cdb.FetchOpenChannels(
- state.IdentityPub,
- )
- if err != nil {
- t.Fatalf("unable to fetch open"+
- " channel: %v", err)
- }
-
- if len(openChannels) != 1 {
- t.Fatalf("Expected one channel open,"+
- " got: %v", len(openChannels))
- }
-
- if !bytes.Equal(openChannels[0].LocalShutdownScript,
- test.localShutdown) {
-
- t.Fatalf("Expected local: %x, got: %x",
- test.localShutdown,
- openChannels[0].LocalShutdownScript)
- }
-
- if !bytes.Equal(openChannels[0].RemoteShutdownScript,
- test.remoteShutdown) {
-
- t.Fatalf("Expected remote: %x, got: %x",
- test.remoteShutdown,
- openChannels[0].RemoteShutdownScript)
- }
- })
- }
-}
-
-func assertCommitmentEqual(t *testing.T, a, b *ChannelCommitment) {
- if !reflect.DeepEqual(a, b) {
- _, _, line, _ := runtime.Caller(1)
- t.Fatalf("line %v: commitments don't match: %v vs %v",
- line, spew.Sdump(a), spew.Sdump(b))
- }
-}
-
-func TestChannelStateTransition(t *testing.T) {
- t.Parallel()
-
- cdb, cleanUp, err := MakeTestDB()
- if err != nil {
- t.Fatalf("unable to make test database: %v", err)
- }
- defer cleanUp()
-
- // First create a minimal channel, then perform a full sync in order to
- // persist the data.
- channel := createTestChannel(t, cdb)
-
- // Add some HTLCs which were added during this new state transition.
- // Half of the HTLCs are incoming, while the other half are outgoing.
- var (
- htlcs []HTLC
- htlcAmt lnwire.MilliSatoshi
- )
- for i := uint32(0); i < 10; i++ {
- var incoming bool
- if i > 5 {
- incoming = true
- }
- htlc := HTLC{
- Signature: testSig.Serialize(),
- Incoming: incoming,
- Amt: 10,
- RHash: key,
- RefundTimeout: i,
- OutputIndex: int32(i * 3),
- LogIndex: uint64(i * 2),
- HtlcIndex: uint64(i),
- }
- htlc.OnionBlob = make([]byte, 10)
- copy(htlc.OnionBlob[:], bytes.Repeat([]byte{2}, 10))
- htlcs = append(htlcs, htlc)
- htlcAmt += htlc.Amt
- }
-
- // Create a new channel delta which includes the above HTLCs, some
- // balance updates, and an increment of the current commitment height.
- // Additionally, modify the signature and commitment transaction.
- newSequence := uint32(129498)
- newSig := bytes.Repeat([]byte{3}, 71)
- newTx := channel.LocalCommitment.CommitTx.Copy()
- newTx.TxIn[0].Sequence = newSequence
- commitment := ChannelCommitment{
- CommitHeight: 1,
- LocalLogIndex: 2,
- LocalHtlcIndex: 1,
- RemoteLogIndex: 2,
- RemoteHtlcIndex: 1,
- LocalBalance: lnwire.MilliSatoshi(1e8),
- RemoteBalance: lnwire.MilliSatoshi(1e8),
- CommitFee: 55,
- FeePerKw: 99,
- CommitTx: newTx,
- CommitSig: newSig,
- Htlcs: htlcs,
- }
-
- // First update the local node's broadcastable state and also add a
- // CommitDiff remote node's as well in order to simulate a proper state
- // transition.
- unsignedAckedUpdates := []LogUpdate{
- {
- LogIndex: 2,
- UpdateMsg: &lnwire.UpdateAddHTLC{
- ChanID: lnwire.ChannelID{1, 2, 3},
- },
- },
- }
-
- err = channel.UpdateCommitment(&commitment, unsignedAckedUpdates)
- if err != nil {
- t.Fatalf("unable to update commitment: %v", err)
- }
-
- // Assert that update is correctly written to the database.
- dbUnsignedAckedUpdates, err := channel.UnsignedAckedUpdates()
- if err != nil {
- t.Fatalf("unable to fetch dangling remote updates: %v", err)
- }
- if len(dbUnsignedAckedUpdates) != 1 {
- t.Fatalf("unexpected number of dangling remote updates")
- }
- if !reflect.DeepEqual(
- dbUnsignedAckedUpdates[0], unsignedAckedUpdates[0],
- ) {
- t.Fatalf("unexpected update")
- }
-
- // The balances, new update, the HTLCs and the changes to the fake
- // commitment transaction along with the modified signature should all
- // have been updated.
- updatedChannel, err := cdb.FetchOpenChannels(channel.IdentityPub)
- if err != nil {
- t.Fatalf("unable to fetch updated channel: %v", err)
- }
- assertCommitmentEqual(t, &commitment, &updatedChannel[0].LocalCommitment)
- numDiskUpdates, err := updatedChannel[0].CommitmentHeight()
- if err != nil {
- t.Fatalf("unable to read commitment height from disk: %v", err)
- }
- if numDiskUpdates != uint64(commitment.CommitHeight) {
- t.Fatalf("num disk updates doesn't match: %v vs %v",
- numDiskUpdates, commitment.CommitHeight)
- }
-
- // Attempting to query for a commitment diff should return
- // ErrNoPendingCommit as we haven't yet created a new state for them.
- _, err = channel.RemoteCommitChainTip()
- if !ErrNoPendingCommit.Is(err) {
- t.Fatalf("expected ErrNoPendingCommit, instead got %v", err)
- }
-
- // To simulate us extending a new state to the remote party, we'll also
- // create a new commit diff for them.
- remoteCommit := commitment
- remoteCommit.LocalBalance = lnwire.MilliSatoshi(2e8)
- remoteCommit.RemoteBalance = lnwire.MilliSatoshi(3e8)
- remoteCommit.CommitHeight = 1
- commitDiff := &CommitDiff{
- Commitment: remoteCommit,
- CommitSig: &lnwire.CommitSig{
- ChanID: lnwire.ChannelID(key),
- CommitSig: wireSig,
- HtlcSigs: []lnwire.Sig{
- wireSig,
- wireSig,
- },
- },
- LogUpdates: []LogUpdate{
- {
- LogIndex: 1,
- UpdateMsg: &lnwire.UpdateAddHTLC{
- ID: 1,
- Amount: lnwire.NewMSatFromSatoshis(100),
- Expiry: 25,
- },
- },
- {
- LogIndex: 2,
- UpdateMsg: &lnwire.UpdateAddHTLC{
- ID: 2,
- Amount: lnwire.NewMSatFromSatoshis(200),
- Expiry: 50,
- },
- },
- },
- OpenedCircuitKeys: []CircuitKey{},
- ClosedCircuitKeys: []CircuitKey{},
- }
- copy(commitDiff.LogUpdates[0].UpdateMsg.(*lnwire.UpdateAddHTLC).PaymentHash[:],
- bytes.Repeat([]byte{1}, 32))
- copy(commitDiff.LogUpdates[1].UpdateMsg.(*lnwire.UpdateAddHTLC).PaymentHash[:],
- bytes.Repeat([]byte{2}, 32))
- if err := channel.AppendRemoteCommitChain(commitDiff); err != nil {
- t.Fatalf("unable to add to commit chain: %v", err)
- }
-
- // The commitment tip should now match the commitment that we just
- // inserted.
- diskCommitDiff, err := channel.RemoteCommitChainTip()
- if err != nil {
- t.Fatalf("unable to fetch commit diff: %v", err)
- }
- if !reflect.DeepEqual(commitDiff, diskCommitDiff) {
- t.Fatalf("commit diffs don't match: %v vs %v", spew.Sdump(remoteCommit),
- spew.Sdump(diskCommitDiff))
- }
-
- // We'll save the old remote commitment as this will be added to the
- // revocation log shortly.
- oldRemoteCommit := channel.RemoteCommitment
-
- // Next, write to the log which tracks the necessary revocation state
- // needed to rectify any fishy behavior by the remote party. Modify the
- // current uncollapsed revocation state to simulate a state transition
- // by the remote party.
- channel.RemoteCurrentRevocation = channel.RemoteNextRevocation
- newPriv, err := btcec.NewPrivateKey(btcec.S256())
- if err != nil {
- t.Fatalf("unable to generate key: %v", err)
- }
- channel.RemoteNextRevocation = newPriv.PubKey()
-
- fwdPkg := NewFwdPkg(channel.ShortChanID(), oldRemoteCommit.CommitHeight,
- diskCommitDiff.LogUpdates, nil)
-
- err = channel.AdvanceCommitChainTail(fwdPkg, nil)
- if err != nil {
- t.Fatalf("unable to append to revocation log: %v", err)
- }
-
- // At this point, the remote commit chain should be nil, and the posted
- // remote commitment should match the one we added as a diff above.
- if _, err := channel.RemoteCommitChainTip(); !ErrNoPendingCommit.Is(err) {
- t.Fatalf("expected ErrNoPendingCommit, instead got %v", err)
- }
-
- // We should be able to fetch the channel delta created above by its
- // update number with all the state properly reconstructed.
- diskPrevCommit, err := channel.FindPreviousState(
- oldRemoteCommit.CommitHeight,
- )
- if err != nil {
- t.Fatalf("unable to fetch past delta: %v", err)
- }
-
- // The two deltas (the original vs the on-disk version) should
- // identical, and all HTLC data should properly be retained.
- assertCommitmentEqual(t, &oldRemoteCommit, diskPrevCommit)
-
- // The state number recovered from the tail of the revocation log
- // should be identical to this current state.
- logTail, err := channel.RevocationLogTail()
- if err != nil {
- t.Fatalf("unable to retrieve log: %v", err)
- }
- if logTail.CommitHeight != oldRemoteCommit.CommitHeight {
- t.Fatal("update number doesn't match")
- }
-
- oldRemoteCommit = channel.RemoteCommitment
-
- // Next modify the posted diff commitment slightly, then create a new
- // commitment diff and advance the tail.
- commitDiff.Commitment.CommitHeight = 2
- commitDiff.Commitment.LocalBalance -= htlcAmt
- commitDiff.Commitment.RemoteBalance += htlcAmt
- commitDiff.LogUpdates = []LogUpdate{}
- if err := channel.AppendRemoteCommitChain(commitDiff); err != nil {
- t.Fatalf("unable to add to commit chain: %v", err)
- }
-
- fwdPkg = NewFwdPkg(channel.ShortChanID(), oldRemoteCommit.CommitHeight, nil, nil)
-
- err = channel.AdvanceCommitChainTail(fwdPkg, nil)
- if err != nil {
- t.Fatalf("unable to append to revocation log: %v", err)
- }
-
- // Once again, fetch the state and ensure it has been properly updated.
- prevCommit, err := channel.FindPreviousState(oldRemoteCommit.CommitHeight)
- if err != nil {
- t.Fatalf("unable to fetch past delta: %v", err)
- }
- assertCommitmentEqual(t, &oldRemoteCommit, prevCommit)
-
- // Once again, state number recovered from the tail of the revocation
- // log should be identical to this current state.
- logTail, err = channel.RevocationLogTail()
- if err != nil {
- t.Fatalf("unable to retrieve log: %v", err)
- }
- if logTail.CommitHeight != oldRemoteCommit.CommitHeight {
- t.Fatal("update number doesn't match")
- }
-
- // The revocation state stored on-disk should now also be identical.
- updatedChannel, err = cdb.FetchOpenChannels(channel.IdentityPub)
- if err != nil {
- t.Fatalf("unable to fetch updated channel: %v", err)
- }
- if !channel.RemoteCurrentRevocation.IsEqual(updatedChannel[0].RemoteCurrentRevocation) {
- t.Fatalf("revocation state was not synced")
- }
- if !channel.RemoteNextRevocation.IsEqual(updatedChannel[0].RemoteNextRevocation) {
- t.Fatalf("revocation state was not synced")
- }
-
- // Now attempt to delete the channel from the database.
- closeSummary := &ChannelCloseSummary{
- ChanPoint: channel.FundingOutpoint,
- RemotePub: channel.IdentityPub,
- SettledBalance: btcutil.Amount(500),
- TimeLockedBalance: btcutil.Amount(10000),
- IsPending: false,
- CloseType: RemoteForceClose,
- }
- if err := updatedChannel[0].CloseChannel(closeSummary); err != nil {
- t.Fatalf("unable to delete updated channel: %v", err)
- }
-
- // If we attempt to fetch the target channel again, it shouldn't be
- // found.
- channels, err := cdb.FetchOpenChannels(channel.IdentityPub)
- if err != nil {
- t.Fatalf("unable to fetch updated channels: %v", err)
- }
- if len(channels) != 0 {
- t.Fatalf("%v channels, found, but none should be",
- len(channels))
- }
-
- // Attempting to find previous states on the channel should fail as the
- // revocation log has been deleted.
- _, err = updatedChannel[0].FindPreviousState(oldRemoteCommit.CommitHeight)
- if err == nil {
- t.Fatal("revocation log search should have failed")
- }
-}
-
-func TestFetchPendingChannels(t *testing.T) {
- t.Parallel()
-
- cdb, cleanUp, err := MakeTestDB()
- if err != nil {
- t.Fatalf("unable to make test database: %v", err)
- }
- defer cleanUp()
-
- // Create a pending channel that was broadcast at height 99.
- const broadcastHeight = 99
- createTestChannel(t, cdb, pendingHeightOption(broadcastHeight))
-
- pendingChannels, err := cdb.FetchPendingChannels()
- if err != nil {
- t.Fatalf("unable to list pending channels: %v", err)
- }
-
- if len(pendingChannels) != 1 {
- t.Fatalf("incorrect number of pending channels: expecting %v,"+
- "got %v", 1, len(pendingChannels))
- }
-
- // The broadcast height of the pending channel should have been set
- // properly.
- if pendingChannels[0].FundingBroadcastHeight != broadcastHeight {
- t.Fatalf("broadcast height mismatch: expected %v, got %v",
- pendingChannels[0].FundingBroadcastHeight,
- broadcastHeight)
- }
-
- chanOpenLoc := lnwire.ShortChannelID{
- BlockHeight: 5,
- TxIndex: 10,
- TxPosition: 15,
- }
- err = pendingChannels[0].MarkAsOpen(chanOpenLoc)
- if err != nil {
- t.Fatalf("unable to mark channel as open: %v", err)
- }
-
- if pendingChannels[0].IsPending {
- t.Fatalf("channel marked open should no longer be pending")
- }
-
- if pendingChannels[0].ShortChanID() != chanOpenLoc {
- t.Fatalf("channel opening height not updated: expected %v, "+
- "got %v", spew.Sdump(pendingChannels[0].ShortChanID()),
- chanOpenLoc)
- }
-
- // Next, we'll re-fetch the channel to ensure that the open height was
- // properly set.
- openChans, err := cdb.FetchAllChannels()
- if err != nil {
- t.Fatalf("unable to fetch channels: %v", err)
- }
- if openChans[0].ShortChanID() != chanOpenLoc {
- t.Fatalf("channel opening heights don't match: expected %v, "+
- "got %v", spew.Sdump(openChans[0].ShortChanID()),
- chanOpenLoc)
- }
- if openChans[0].FundingBroadcastHeight != broadcastHeight {
- t.Fatalf("broadcast height mismatch: expected %v, got %v",
- openChans[0].FundingBroadcastHeight,
- broadcastHeight)
- }
-
- pendingChannels, err = cdb.FetchPendingChannels()
- if err != nil {
- t.Fatalf("unable to list pending channels: %v", err)
- }
-
- if len(pendingChannels) != 0 {
- t.Fatalf("incorrect number of pending channels: expecting %v,"+
- "got %v", 0, len(pendingChannels))
- }
-}
-
-func TestFetchClosedChannels(t *testing.T) {
- t.Parallel()
-
- cdb, cleanUp, err := MakeTestDB()
- if err != nil {
- t.Fatalf("unable to make test database: %v", err)
- }
- defer cleanUp()
-
- // Create an open channel in the database.
- state := createTestChannel(t, cdb, openChannelOption())
-
- // Next, close the channel by including a close channel summary in the
- // database.
- summary := &ChannelCloseSummary{
- ChanPoint: state.FundingOutpoint,
- ClosingTXID: rev,
- RemotePub: state.IdentityPub,
- Capacity: state.Capacity,
- SettledBalance: state.LocalCommitment.LocalBalance.ToSatoshis(),
- TimeLockedBalance: state.RemoteCommitment.LocalBalance.ToSatoshis() + 10000,
- CloseType: RemoteForceClose,
- IsPending: true,
- LocalChanConfig: state.LocalChanCfg,
- }
- if err := state.CloseChannel(summary); err != nil {
- t.Fatalf("unable to close channel: %v", err)
- }
-
- // Query the database to ensure that the channel has now been properly
- // closed. We should get the same result whether querying for pending
- // channels only, or not.
- pendingClosed, err := cdb.FetchClosedChannels(true)
- if err != nil {
- t.Fatalf("failed fetching closed channels: %v", err)
- }
- if len(pendingClosed) != 1 {
- t.Fatalf("incorrect number of pending closed channels: expecting %v,"+
- "got %v", 1, len(pendingClosed))
- }
- if !reflect.DeepEqual(summary, pendingClosed[0]) {
- t.Fatalf("database summaries don't match: expected %v got %v",
- spew.Sdump(summary), spew.Sdump(pendingClosed[0]))
- }
- closed, err := cdb.FetchClosedChannels(false)
- if err != nil {
- t.Fatalf("failed fetching all closed channels: %v", err)
- }
- if len(closed) != 1 {
- t.Fatalf("incorrect number of closed channels: expecting %v, "+
- "got %v", 1, len(closed))
- }
- if !reflect.DeepEqual(summary, closed[0]) {
- t.Fatalf("database summaries don't match: expected %v got %v",
- spew.Sdump(summary), spew.Sdump(closed[0]))
- }
-
- // Mark the channel as fully closed.
- err = cdb.MarkChanFullyClosed(&state.FundingOutpoint)
- if err != nil {
- t.Fatalf("failed fully closing channel: %v", err)
- }
-
- // The channel should no longer be considered pending, but should still
- // be retrieved when fetching all the closed channels.
- closed, err = cdb.FetchClosedChannels(false)
- if err != nil {
- t.Fatalf("failed fetching closed channels: %v", err)
- }
- if len(closed) != 1 {
- t.Fatalf("incorrect number of closed channels: expecting %v, "+
- "got %v", 1, len(closed))
- }
- pendingClose, err := cdb.FetchClosedChannels(true)
- if err != nil {
- t.Fatalf("failed fetching channels pending close: %v", err)
- }
- if len(pendingClose) != 0 {
- t.Fatalf("incorrect number of closed channels: expecting %v, "+
- "got %v", 0, len(closed))
- }
-}
-
-// TestFetchWaitingCloseChannels ensures that the correct channels that are
-// waiting to be closed are returned.
-func TestFetchWaitingCloseChannels(t *testing.T) {
- t.Parallel()
-
- const numChannels = 2
- const broadcastHeight = 99
-
- // We'll start by creating two channels within our test database. One of
- // them will have their funding transaction confirmed on-chain, while
- // the other one will remain unconfirmed.
- db, cleanUp, err := MakeTestDB()
- if err != nil {
- t.Fatalf("unable to make test database: %v", err)
- }
- defer cleanUp()
-
- channels := make([]*OpenChannel, numChannels)
- for i := 0; i < numChannels; i++ {
- // Create a pending channel in the database at the broadcast
- // height.
- channels[i] = createTestChannel(
- t, db, pendingHeightOption(broadcastHeight),
- )
- }
-
- // We'll only confirm the first one.
- channelConf := lnwire.ShortChannelID{
- BlockHeight: broadcastHeight + 1,
- TxIndex: 10,
- TxPosition: 15,
- }
- if err := channels[0].MarkAsOpen(channelConf); err != nil {
- t.Fatalf("unable to mark channel as open: %v", err)
- }
-
- // Then, we'll mark the channels as if their commitments were broadcast.
- // This would happen in the event of a force close and should make the
- // channels enter a state of waiting close.
- for _, channel := range channels {
- closeTx := wire.NewMsgTx(2)
- closeTx.AddTxIn(
- &wire.TxIn{
- PreviousOutPoint: channel.FundingOutpoint,
- },
- )
-
- if err := channel.MarkCommitmentBroadcasted(closeTx, true); err != nil {
- t.Fatalf("unable to mark commitment broadcast: %v", err)
- }
-
- // Now try to marking a coop close with a nil tx. This should
- // succeed, but it shouldn't exit when queried.
- if err = channel.MarkCoopBroadcasted(nil, true); err != nil {
- t.Fatalf("unable to mark nil coop broadcast: %v", err)
- }
- _, err := channel.BroadcastedCooperative()
- if !ErrNoCloseTx.Is(err) {
- t.Fatalf("expected no closing tx error, got: %v", err)
- }
-
- // Finally, modify the close tx deterministically and also mark
- // it as coop closed. Later we will test that distinct
- // transactions are returned for both coop and force closes.
- closeTx.TxIn[0].PreviousOutPoint.Index ^= 1
- if err := channel.MarkCoopBroadcasted(closeTx, true); err != nil {
- t.Fatalf("unable to mark coop broadcast: %v", err)
- }
- }
-
- // Now, we'll fetch all the channels waiting to be closed from the
- // database. We should expect to see both channels above, even if any of
- // them haven't had their funding transaction confirm on-chain.
- waitingCloseChannels, err := db.FetchWaitingCloseChannels()
- if err != nil {
- t.Fatalf("unable to fetch all waiting close channels: %v", err)
- }
- if len(waitingCloseChannels) != numChannels {
- t.Fatalf("expected %d channels waiting to be closed, got %d", 2,
- len(waitingCloseChannels))
- }
- expectedChannels := make(map[wire.OutPoint]struct{})
- for _, channel := range channels {
- expectedChannels[channel.FundingOutpoint] = struct{}{}
- }
- for _, channel := range waitingCloseChannels {
- if _, ok := expectedChannels[channel.FundingOutpoint]; !ok {
- t.Fatalf("expected channel %v to be waiting close",
- channel.FundingOutpoint)
- }
-
- chanPoint := channel.FundingOutpoint
-
- // Assert that the force close transaction is retrievable.
- forceCloseTx, err := channel.BroadcastedCommitment()
- if err != nil {
- t.Fatalf("Unable to retrieve commitment: %v", err)
- }
-
- if forceCloseTx.TxIn[0].PreviousOutPoint != chanPoint {
- t.Fatalf("expected outpoint %v, got %v",
- chanPoint,
- forceCloseTx.TxIn[0].PreviousOutPoint)
- }
-
- // Assert that the coop close transaction is retrievable.
- coopCloseTx, err := channel.BroadcastedCooperative()
- if err != nil {
- t.Fatalf("unable to retrieve coop close: %v", err)
- }
-
- chanPoint.Index ^= 1
- if coopCloseTx.TxIn[0].PreviousOutPoint != chanPoint {
- t.Fatalf("expected outpoint %v, got %v",
- chanPoint,
- coopCloseTx.TxIn[0].PreviousOutPoint)
- }
- }
-}
-
-// TestRefreshShortChanID asserts that RefreshShortChanID updates the in-memory
-// state of another OpenChannel to reflect a preceding call to MarkOpen on a
-// different OpenChannel.
-func TestRefreshShortChanID(t *testing.T) {
- t.Parallel()
-
- cdb, cleanUp, err := MakeTestDB()
- if err != nil {
- t.Fatalf("unable to make test database: %v", err)
- }
- defer cleanUp()
-
- // First create a test channel.
- state := createTestChannel(t, cdb)
-
- // Next, locate the pending channel with the database.
- pendingChannels, err := cdb.FetchPendingChannels()
- if err != nil {
- t.Fatalf("unable to load pending channels; %v", err)
- }
-
- var pendingChannel *OpenChannel
- for _, channel := range pendingChannels {
- if channel.FundingOutpoint == state.FundingOutpoint {
- pendingChannel = channel
- break
- }
- }
- if pendingChannel == nil {
- t.Fatalf("unable to find pending channel with funding "+
- "outpoint=%v: %v", state.FundingOutpoint, err)
- }
-
- // Next, simulate the confirmation of the channel by marking it as
- // pending within the database.
- chanOpenLoc := lnwire.ShortChannelID{
- BlockHeight: 105,
- TxIndex: 10,
- TxPosition: 15,
- }
-
- err = state.MarkAsOpen(chanOpenLoc)
- if err != nil {
- t.Fatalf("unable to mark channel open: %v", err)
- }
-
- // The short_chan_id of the receiver to MarkAsOpen should reflect the
- // open location, but the other pending channel should remain unchanged.
- if state.ShortChanID() == pendingChannel.ShortChanID() {
- t.Fatalf("pending channel short_chan_ID should not have been " +
- "updated before refreshing short_chan_id")
- }
-
- // Now that the receiver's short channel id has been updated, check to
- // ensure that the channel packager's source has been updated as well.
- // This ensures that the packager will read and write to buckets
- // corresponding to the new short chan id, instead of the prior.
- if state.Packager.(*ChannelPackager).source != chanOpenLoc {
- t.Fatalf("channel packager source was not updated: want %v, "+
- "got %v", chanOpenLoc,
- state.Packager.(*ChannelPackager).source)
- }
-
- // Now, refresh the short channel ID of the pending channel.
- err = pendingChannel.RefreshShortChanID()
- if err != nil {
- t.Fatalf("unable to refresh short_chan_id: %v", err)
- }
-
- // This should result in both OpenChannel's now having the same
- // ShortChanID.
- if state.ShortChanID() != pendingChannel.ShortChanID() {
- t.Fatalf("expected pending channel short_chan_id to be "+
- "refreshed: want %v, got %v", state.ShortChanID(),
- pendingChannel.ShortChanID())
- }
-
- // Check to ensure that the _other_ OpenChannel channel packager's
- // source has also been updated after the refresh. This ensures that the
- // other packagers will read and write to buckets corresponding to the
- // updated short chan id.
- if pendingChannel.Packager.(*ChannelPackager).source != chanOpenLoc {
- t.Fatalf("channel packager source was not updated: want %v, "+
- "got %v", chanOpenLoc,
- pendingChannel.Packager.(*ChannelPackager).source)
- }
-
- // Check to ensure that this channel is no longer pending and this field
- // is up to date.
- if pendingChannel.IsPending {
- t.Fatalf("channel pending state wasn't updated: want false got true")
- }
-}
-
-// TestCloseInitiator tests the setting of close initiator statuses for
-// cooperative closes and local force closes.
-func TestCloseInitiator(t *testing.T) {
- tests := []struct {
- name string
- // updateChannel is called to update the channel as broadcast,
- // cooperatively or not, based on the test's requirements.
- updateChannel func(c *OpenChannel) er.R
- expectedStatuses []ChannelStatus
- }{
- {
- name: "local coop close",
- // Mark the channel as cooperatively closed, initiated
- // by the local party.
- updateChannel: func(c *OpenChannel) er.R {
- return c.MarkCoopBroadcasted(
- &wire.MsgTx{}, true,
- )
- },
- expectedStatuses: []ChannelStatus{
- ChanStatusLocalCloseInitiator,
- ChanStatusCoopBroadcasted,
- },
- },
- {
- name: "remote coop close",
- // Mark the channel as cooperatively closed, initiated
- // by the remote party.
- updateChannel: func(c *OpenChannel) er.R {
- return c.MarkCoopBroadcasted(
- &wire.MsgTx{}, false,
- )
- },
- expectedStatuses: []ChannelStatus{
- ChanStatusRemoteCloseInitiator,
- ChanStatusCoopBroadcasted,
- },
- },
- {
- name: "local force close",
- // Mark the channel's commitment as broadcast with
- // local initiator.
- updateChannel: func(c *OpenChannel) er.R {
- return c.MarkCommitmentBroadcasted(
- &wire.MsgTx{}, true,
- )
- },
- expectedStatuses: []ChannelStatus{
- ChanStatusLocalCloseInitiator,
- ChanStatusCommitBroadcasted,
- },
- },
- }
-
- for _, test := range tests {
- test := test
-
- t.Run(test.name, func(t *testing.T) {
- t.Parallel()
-
- cdb, cleanUp, err := MakeTestDB()
- if err != nil {
- t.Fatalf("unable to make test database: %v",
- err)
- }
- defer cleanUp()
-
- // Create an open channel.
- channel := createTestChannel(
- t, cdb, openChannelOption(),
- )
-
- err = test.updateChannel(channel)
- if err != nil {
- t.Fatalf("unexpected error: %v", err)
- }
-
- // Lookup open channels in the database.
- dbChans, err := fetchChannels(
- cdb, pendingChannelFilter(false),
- )
- if err != nil {
- t.Fatalf("unexpected error: %v", err)
- }
- if len(dbChans) != 1 {
- t.Fatalf("expected 1 channel, got: %v",
- len(dbChans))
- }
-
- // Check that the statuses that we expect were written
- // to disk.
- for _, status := range test.expectedStatuses {
- if !dbChans[0].HasChanStatus(status) {
- t.Fatalf("expected channel to have "+
- "status: %v, has status: %v",
- status, dbChans[0].chanStatus)
- }
- }
- })
- }
-}
-
-// TestCloseChannelStatus tests setting of a channel status on the historical
-// channel on channel close.
-func TestCloseChannelStatus(t *testing.T) {
- cdb, cleanUp, err := MakeTestDB()
- if err != nil {
- t.Fatalf("unable to make test database: %v",
- err)
- }
- defer cleanUp()
-
- // Create an open channel.
- channel := createTestChannel(
- t, cdb, openChannelOption(),
- )
-
- if err := channel.CloseChannel(
- &ChannelCloseSummary{
- ChanPoint: channel.FundingOutpoint,
- RemotePub: channel.IdentityPub,
- }, ChanStatusRemoteCloseInitiator,
- ); err != nil {
- t.Fatalf("unexpected error: %v", err)
- }
-
- histChan, err := channel.Db.FetchHistoricalChannel(
- &channel.FundingOutpoint,
- )
- if err != nil {
- t.Fatalf("unexpected error: %v", err)
- }
-
- if !histChan.HasChanStatus(ChanStatusRemoteCloseInitiator) {
- t.Fatalf("channel should have status")
- }
-}
-
-// TestBalanceAtHeight tests lookup of our local and remote balance at a given
-// height.
-func TestBalanceAtHeight(t *testing.T) {
- const (
- // Values that will be set on our current local commit in
- // memory.
- localHeight = 2
- localLocalBalance = 1000
- localRemoteBalance = 1500
-
- // Values that will be set on our current remote commit in
- // memory.
- remoteHeight = 3
- remoteLocalBalance = 2000
- remoteRemoteBalance = 2500
-
- // Values that will be written to disk in the revocation log.
- oldHeight = 0
- oldLocalBalance = 200
- oldRemoteBalance = 300
-
- // Heights to test error cases.
- unknownHeight = 1
- unreachedHeight = 4
- )
-
- // putRevokedState is a helper function used to put commitments is
- // the revocation log bucket to test lookup of balances at heights that
- // are not our current height.
- putRevokedState := func(c *OpenChannel, height uint64, local,
- remote lnwire.MilliSatoshi) er.R {
-
- err := kvdb.Update(c.Db, func(tx kvdb.RwTx) er.R {
- chanBucket, err := fetchChanBucketRw(
- tx, c.IdentityPub, &c.FundingOutpoint,
- c.ChainHash,
- )
- if err != nil {
- return err
- }
-
- logKey := revocationLogBucket
- logBucket, err := chanBucket.CreateBucketIfNotExists(
- logKey,
- )
- if err != nil {
- return err
- }
-
- // Make a copy of our current commitment so we do not
- // need to re-fill all the required fields and copy in
- // our new desired values.
- commit := c.LocalCommitment
- commit.CommitHeight = height
- commit.LocalBalance = local
- commit.RemoteBalance = remote
-
- return appendChannelLogEntry(logBucket, &commit)
- }, func() {})
-
- return err
- }
-
- tests := []struct {
- name string
- targetHeight uint64
- expectedLocalBalance lnwire.MilliSatoshi
- expectedRemoteBalance lnwire.MilliSatoshi
- expectedError *er.ErrorCode
- }{
- {
- name: "target is current local height",
- targetHeight: localHeight,
- expectedLocalBalance: localLocalBalance,
- expectedRemoteBalance: localRemoteBalance,
- expectedError: nil,
- },
- {
- name: "target is current remote height",
- targetHeight: remoteHeight,
- expectedLocalBalance: remoteLocalBalance,
- expectedRemoteBalance: remoteRemoteBalance,
- expectedError: nil,
- },
- {
- name: "need to lookup commit",
- targetHeight: oldHeight,
- expectedLocalBalance: oldLocalBalance,
- expectedRemoteBalance: oldRemoteBalance,
- expectedError: nil,
- },
- {
- name: "height not found",
- targetHeight: unknownHeight,
- expectedLocalBalance: 0,
- expectedRemoteBalance: 0,
- expectedError: errLogEntryNotFound,
- },
- {
- name: "height not reached",
- targetHeight: unreachedHeight,
- expectedLocalBalance: 0,
- expectedRemoteBalance: 0,
- expectedError: errHeightNotReached,
- },
- }
-
- for _, test := range tests {
- test := test
-
- t.Run(test.name, func(t *testing.T) {
- t.Parallel()
-
- cdb, cleanUp, err := MakeTestDB()
- if err != nil {
- t.Fatalf("unable to make test database: %v",
- err)
- }
- defer cleanUp()
-
- // Create options to set the heights and balances of
- // our local and remote commitments.
- localCommitOpt := channelCommitmentOption(
- localHeight, localLocalBalance,
- localRemoteBalance, true,
- )
-
- remoteCommitOpt := channelCommitmentOption(
- remoteHeight, remoteLocalBalance,
- remoteRemoteBalance, false,
- )
-
- // Create an open channel.
- channel := createTestChannel(
- t, cdb, openChannelOption(),
- localCommitOpt, remoteCommitOpt,
- )
-
- // Write an older commit to disk.
- err = putRevokedState(channel, oldHeight,
- oldLocalBalance, oldRemoteBalance)
- if err != nil {
- t.Fatalf("unexpected error: %v", err)
- }
-
- local, remote, err := channel.BalancesAtHeight(
- test.targetHeight,
- )
- if test.expectedError == nil && err == nil {
- } else if test.expectedError == nil || !test.expectedError.Is(err) {
- t.Fatalf("expected: %v, got: %v",
- test.expectedError, err)
- }
-
- if local != test.expectedLocalBalance {
- t.Fatalf("expected local: %v, got: %v",
- test.expectedLocalBalance, local)
- }
-
- if remote != test.expectedRemoteBalance {
- t.Fatalf("expected remote: %v, got: %v",
- test.expectedRemoteBalance, remote)
- }
- })
- }
-}
-
-// TestHasChanStatus asserts the behavior of HasChanStatus by checking the
-// behavior of various status flags in addition to the special case of
-// ChanStatusDefault which is treated like a flag in the code base even though
-// it isn't.
-func TestHasChanStatus(t *testing.T) {
- tests := []struct {
- name string
- status ChannelStatus
- expHas map[ChannelStatus]bool
- }{
- {
- name: "default",
- status: ChanStatusDefault,
- expHas: map[ChannelStatus]bool{
- ChanStatusDefault: true,
- ChanStatusBorked: false,
- },
- },
- {
- name: "single flag",
- status: ChanStatusBorked,
- expHas: map[ChannelStatus]bool{
- ChanStatusDefault: false,
- ChanStatusBorked: true,
- },
- },
- {
- name: "multiple flags",
- status: ChanStatusBorked | ChanStatusLocalDataLoss,
- expHas: map[ChannelStatus]bool{
- ChanStatusDefault: false,
- ChanStatusBorked: true,
- ChanStatusLocalDataLoss: true,
- },
- },
- }
-
- for _, test := range tests {
- test := test
-
- t.Run(test.name, func(t *testing.T) {
- c := &OpenChannel{
- chanStatus: test.status,
- }
-
- for status, expHas := range test.expHas {
- has := c.HasChanStatus(status)
- if has == expHas {
- continue
- }
-
- t.Fatalf("expected chan status to "+
- "have %s? %t, got: %t",
- status, expHas, has)
- }
- })
- }
-}
diff --git a/lnd/channeldb/codec.go b/lnd/channeldb/codec.go
deleted file mode 100644
index d5fda0d0..00000000
--- a/lnd/channeldb/codec.go
+++ /dev/null
@@ -1,465 +0,0 @@
-package channeldb
-
-import (
- "fmt"
- "io"
- "net"
-
- "github.com/pkt-cash/pktd/btcec"
- "github.com/pkt-cash/pktd/btcutil"
- "github.com/pkt-cash/pktd/btcutil/er"
- "github.com/pkt-cash/pktd/btcutil/util"
- "github.com/pkt-cash/pktd/chaincfg/chainhash"
- "github.com/pkt-cash/pktd/lnd/keychain"
- "github.com/pkt-cash/pktd/lnd/lnwire"
- "github.com/pkt-cash/pktd/lnd/shachain"
- "github.com/pkt-cash/pktd/wire"
-)
-
-// writeOutpoint writes an outpoint to the passed writer using the minimal
-// amount of bytes possible.
-func writeOutpoint(w io.Writer, o *wire.OutPoint) er.R {
- if _, err := util.Write(w, o.Hash[:]); err != nil {
- return err
- }
- if err := util.WriteBin(w, byteOrder, o.Index); err != nil {
- return err
- }
-
- return nil
-}
-
-// readOutpoint reads an outpoint from the passed reader that was previously
-// written using the writeOutpoint struct.
-func readOutpoint(r io.Reader, o *wire.OutPoint) er.R {
- if _, err := util.ReadFull(r, o.Hash[:]); err != nil {
- return err
- }
- if err := util.ReadBin(r, byteOrder, &o.Index); err != nil {
- return err
- }
-
- return nil
-}
-
-// UnknownElementType is an error returned when the codec is unable to encode or
-// decode a particular type.
-type UnknownElementType struct {
- method string
- element interface{}
-}
-
-// NewUnknownElementType creates a new UnknownElementType error from the passed
-// method name and element.
-func NewUnknownElementType(method string, el interface{}) UnknownElementType {
- return UnknownElementType{method: method, element: el}
-}
-
-// Error returns the name of the method that encountered the error, as well as
-// the type that was unsupported.
-func (e UnknownElementType) Error() string {
- return fmt.Sprintf("Unknown type in %s: %T", e.method, e.element)
-}
-
-// WriteElement is a one-stop shop to write the big endian representation of
-// any element which is to be serialized for storage on disk. The passed
-// io.Writer should be backed by an appropriately sized byte slice, or be able
-// to dynamically expand to accommodate additional data.
-func WriteElement(w io.Writer, element interface{}) er.R {
- switch e := element.(type) {
- case keychain.KeyDescriptor:
- if err := util.WriteBin(w, byteOrder, e.Family); err != nil {
- return err
- }
- if err := util.WriteBin(w, byteOrder, e.Index); err != nil {
- return err
- }
-
- if e.PubKey != nil {
- if err := util.WriteBin(w, byteOrder, true); err != nil {
- return er.Errorf("error writing serialized element: %s", err)
- }
-
- return WriteElement(w, e.PubKey)
- }
-
- return util.WriteBin(w, byteOrder, false)
- case ChannelType:
- if err := util.WriteBin(w, byteOrder, e); err != nil {
- return err
- }
-
- case chainhash.Hash:
- if _, err := util.Write(w, e[:]); err != nil {
- return err
- }
-
- case wire.OutPoint:
- return writeOutpoint(w, &e)
-
- case lnwire.ShortChannelID:
- if err := util.WriteBin(w, byteOrder, e.ToUint64()); err != nil {
- return err
- }
-
- case lnwire.ChannelID:
- if _, err := util.Write(w, e[:]); err != nil {
- return err
- }
-
- case int64, uint64:
- if err := util.WriteBin(w, byteOrder, e); err != nil {
- return err
- }
-
- case uint32:
- if err := util.WriteBin(w, byteOrder, e); err != nil {
- return err
- }
-
- case int32:
- if err := util.WriteBin(w, byteOrder, e); err != nil {
- return err
- }
-
- case uint16:
- if err := util.WriteBin(w, byteOrder, e); err != nil {
- return err
- }
-
- case uint8:
- if err := util.WriteBin(w, byteOrder, e); err != nil {
- return err
- }
-
- case bool:
- if err := util.WriteBin(w, byteOrder, e); err != nil {
- return err
- }
-
- case btcutil.Amount:
- if err := util.WriteBin(w, byteOrder, uint64(e)); err != nil {
- return err
- }
-
- case lnwire.MilliSatoshi:
- if err := util.WriteBin(w, byteOrder, uint64(e)); err != nil {
- return err
- }
-
- case *btcec.PrivateKey:
- b := e.Serialize()
- if _, err := util.Write(w, b); err != nil {
- return err
- }
-
- case *btcec.PublicKey:
- b := e.SerializeCompressed()
- if _, err := util.Write(w, b); err != nil {
- return err
- }
-
- case shachain.Producer:
- return e.Encode(w)
-
- case shachain.Store:
- return e.Encode(w)
-
- case *wire.MsgTx:
- return e.Serialize(w)
-
- case [32]byte:
- if _, err := util.Write(w, e[:]); err != nil {
- return err
- }
-
- case []byte:
- if err := wire.WriteVarBytes(w, 0, e); err != nil {
- return err
- }
-
- case lnwire.Message:
- if _, err := lnwire.WriteMessage(w, e, 0); err != nil {
- return err
- }
-
- case ChannelStatus:
- if err := util.WriteBin(w, byteOrder, e); err != nil {
- return err
- }
-
- case ClosureType:
- if err := util.WriteBin(w, byteOrder, e); err != nil {
- return err
- }
-
- case paymentIndexType:
- if err := util.WriteBin(w, byteOrder, e); err != nil {
- return err
- }
-
- case lnwire.FundingFlag:
- if err := util.WriteBin(w, byteOrder, e); err != nil {
- return err
- }
-
- case net.Addr:
- if err := serializeAddr(w, e); err != nil {
- return err
- }
-
- case []net.Addr:
- if err := WriteElement(w, uint32(len(e))); err != nil {
- return err
- }
-
- for _, addr := range e {
- if err := serializeAddr(w, addr); err != nil {
- return err
- }
- }
-
- default:
- return er.E(UnknownElementType{"WriteElement", e})
- }
-
- return nil
-}
-
-// WriteElements is writes each element in the elements slice to the passed
-// io.Writer using WriteElement.
-func WriteElements(w io.Writer, elements ...interface{}) er.R {
- for _, element := range elements {
- err := WriteElement(w, element)
- if err != nil {
- return err
- }
- }
- return nil
-}
-
-// ReadElement is a one-stop utility function to deserialize any datastructure
-// encoded using the serialization format of the database.
-func ReadElement(r io.Reader, element interface{}) er.R {
- switch e := element.(type) {
- case *keychain.KeyDescriptor:
- if err := util.ReadBin(r, byteOrder, &e.Family); err != nil {
- return err
- }
- if err := util.ReadBin(r, byteOrder, &e.Index); err != nil {
- return err
- }
-
- var hasPubKey bool
- if err := util.ReadBin(r, byteOrder, &hasPubKey); err != nil {
- return err
- }
-
- if hasPubKey {
- return ReadElement(r, &e.PubKey)
- }
-
- case *ChannelType:
- if err := util.ReadBin(r, byteOrder, e); err != nil {
- return err
- }
-
- case *chainhash.Hash:
- if _, err := util.ReadFull(r, e[:]); err != nil {
- return err
- }
-
- case *wire.OutPoint:
- return readOutpoint(r, e)
-
- case *lnwire.ShortChannelID:
- var a uint64
- if err := util.ReadBin(r, byteOrder, &a); err != nil {
- return err
- }
- *e = lnwire.NewShortChanIDFromInt(a)
-
- case *lnwire.ChannelID:
- if _, err := util.ReadFull(r, e[:]); err != nil {
- return err
- }
-
- case *int64, *uint64:
- if err := util.ReadBin(r, byteOrder, e); err != nil {
- return err
- }
-
- case *uint32:
- if err := util.ReadBin(r, byteOrder, e); err != nil {
- return err
- }
-
- case *int32:
- if err := util.ReadBin(r, byteOrder, e); err != nil {
- return err
- }
-
- case *uint16:
- if err := util.ReadBin(r, byteOrder, e); err != nil {
- return err
- }
-
- case *uint8:
- if err := util.ReadBin(r, byteOrder, e); err != nil {
- return err
- }
-
- case *bool:
- if err := util.ReadBin(r, byteOrder, e); err != nil {
- return err
- }
-
- case *btcutil.Amount:
- var a uint64
- if err := util.ReadBin(r, byteOrder, &a); err != nil {
- return err
- }
-
- *e = btcutil.Amount(a)
-
- case *lnwire.MilliSatoshi:
- var a uint64
- if err := util.ReadBin(r, byteOrder, &a); err != nil {
- return err
- }
-
- *e = lnwire.MilliSatoshi(a)
-
- case **btcec.PrivateKey:
- var b [btcec.PrivKeyBytesLen]byte
- if _, err := util.ReadFull(r, b[:]); err != nil {
- return err
- }
-
- priv, _ := btcec.PrivKeyFromBytes(btcec.S256(), b[:])
- *e = priv
-
- case **btcec.PublicKey:
- var b [btcec.PubKeyBytesLenCompressed]byte
- if _, err := util.ReadFull(r, b[:]); err != nil {
- return err
- }
-
- pubKey, err := btcec.ParsePubKey(b[:], btcec.S256())
- if err != nil {
- return err
- }
- *e = pubKey
-
- case *shachain.Producer:
- var root [32]byte
- if _, err := util.ReadFull(r, root[:]); err != nil {
- return err
- }
-
- // TODO(roasbeef): remove
- producer, err := shachain.NewRevocationProducerFromBytes(root[:])
- if err != nil {
- return err
- }
-
- *e = producer
-
- case *shachain.Store:
- store, err := shachain.NewRevocationStoreFromBytes(r)
- if err != nil {
- return err
- }
-
- *e = store
-
- case **wire.MsgTx:
- tx := wire.NewMsgTx(2)
- if err := tx.Deserialize(r); err != nil {
- return err
- }
-
- *e = tx
-
- case *[32]byte:
- if _, err := util.ReadFull(r, e[:]); err != nil {
- return err
- }
-
- case *[]byte:
- bytes, err := wire.ReadVarBytes(r, 0, 66000, "[]byte")
- if err != nil {
- return err
- }
-
- *e = bytes
-
- case *lnwire.Message:
- msg, err := lnwire.ReadMessage(r, 0)
- if err != nil {
- return err
- }
-
- *e = msg
-
- case *ChannelStatus:
- if err := util.ReadBin(r, byteOrder, e); err != nil {
- return err
- }
-
- case *ClosureType:
- if err := util.ReadBin(r, byteOrder, e); err != nil {
- return err
- }
-
- case *paymentIndexType:
- if err := util.ReadBin(r, byteOrder, e); err != nil {
- return err
- }
-
- case *lnwire.FundingFlag:
- if err := util.ReadBin(r, byteOrder, e); err != nil {
- return err
- }
-
- case *net.Addr:
- addr, err := deserializeAddr(r)
- if err != nil {
- return err
- }
- *e = addr
-
- case *[]net.Addr:
- var numAddrs uint32
- if err := ReadElement(r, &numAddrs); err != nil {
- return err
- }
-
- *e = make([]net.Addr, numAddrs)
- for i := uint32(0); i < numAddrs; i++ {
- addr, err := deserializeAddr(r)
- if err != nil {
- return err
- }
- (*e)[i] = addr
- }
-
- default:
- return er.E(UnknownElementType{"ReadElement", e})
- }
-
- return nil
-}
-
-// ReadElements deserializes a variable number of elements into the passed
-// io.Reader, with each element being deserialized according to the ReadElement
-// function.
-func ReadElements(r io.Reader, elements ...interface{}) er.R {
- for _, element := range elements {
- err := ReadElement(r, element)
- if err != nil {
- return err
- }
- }
- return nil
-}
diff --git a/lnd/channeldb/db.go b/lnd/channeldb/db.go
deleted file mode 100644
index 15021156..00000000
--- a/lnd/channeldb/db.go
+++ /dev/null
@@ -1,1333 +0,0 @@
-package channeldb
-
-import (
- "bytes"
- "encoding/binary"
- "io/ioutil"
- "net"
- "os"
-
- "github.com/pkt-cash/pktd/btcec"
- "github.com/pkt-cash/pktd/btcutil/er"
- "github.com/pkt-cash/pktd/lnd/channeldb/kvdb"
- mig "github.com/pkt-cash/pktd/lnd/channeldb/migration"
- "github.com/pkt-cash/pktd/lnd/channeldb/migration12"
- "github.com/pkt-cash/pktd/lnd/channeldb/migration13"
- "github.com/pkt-cash/pktd/lnd/channeldb/migration16"
- "github.com/pkt-cash/pktd/lnd/channeldb/migration_01_to_11"
- "github.com/pkt-cash/pktd/lnd/clock"
- "github.com/pkt-cash/pktd/lnd/lnwire"
- "github.com/pkt-cash/pktd/pktlog/log"
- "github.com/pkt-cash/pktd/pktwallet/walletdb"
- "github.com/pkt-cash/pktd/wire"
-)
-
-const (
- dbName = "channel.db"
- dbFilePermission = 0600
-)
-
-var (
- // ErrDryRunMigrationOK signals that a migration executed successful,
- // but we intentionally did not commit the result.
- ErrDryRunMigrationOK = Err.CodeWithDetail("ErrDryRunMigrationOK", "dry run migration successful")
-)
-
-// migration is a function which takes a prior outdated version of the database
-// instances and mutates the key/bucket structure to arrive at a more
-// up-to-date version of the database.
-type migration func(tx kvdb.RwTx) er.R
-
-type version struct {
- number uint32
- migration migration
-}
-
-var (
- // dbVersions is storing all versions of database. If current version
- // of database don't match with latest version this list will be used
- // for retrieving all migration function that are need to apply to the
- // current db.
- dbVersions = []version{
- {
- // The base DB version requires no migration.
- number: 0,
- migration: nil,
- },
- {
- // The version of the database where two new indexes
- // for the update time of node and channel updates were
- // added.
- number: 1,
- migration: migration_01_to_11.MigrateNodeAndEdgeUpdateIndex,
- },
- {
- // The DB version that added the invoice event time
- // series.
- number: 2,
- migration: migration_01_to_11.MigrateInvoiceTimeSeries,
- },
- {
- // The DB version that updated the embedded invoice in
- // outgoing payments to match the new format.
- number: 3,
- migration: migration_01_to_11.MigrateInvoiceTimeSeriesOutgoingPayments,
- },
- {
- // The version of the database where every channel
- // always has two entries in the edges bucket. If
- // a policy is unknown, this will be represented
- // by a special byte sequence.
- number: 4,
- migration: migration_01_to_11.MigrateEdgePolicies,
- },
- {
- // The DB version where we persist each attempt to send
- // an HTLC to a payment hash, and track whether the
- // payment is in-flight, succeeded, or failed.
- number: 5,
- migration: migration_01_to_11.PaymentStatusesMigration,
- },
- {
- // The DB version that properly prunes stale entries
- // from the edge update index.
- number: 6,
- migration: migration_01_to_11.MigratePruneEdgeUpdateIndex,
- },
- {
- // The DB version that migrates the ChannelCloseSummary
- // to a format where optional fields are indicated with
- // boolean flags.
- number: 7,
- migration: migration_01_to_11.MigrateOptionalChannelCloseSummaryFields,
- },
- {
- // The DB version that changes the gossiper's message
- // store keys to account for the message's type and
- // ShortChannelID.
- number: 8,
- migration: migration_01_to_11.MigrateGossipMessageStoreKeys,
- },
- {
- // The DB version where the payments and payment
- // statuses are moved to being stored in a combined
- // bucket.
- number: 9,
- migration: migration_01_to_11.MigrateOutgoingPayments,
- },
- {
- // The DB version where we started to store legacy
- // payload information for all routes, as well as the
- // optional TLV records.
- number: 10,
- migration: migration_01_to_11.MigrateRouteSerialization,
- },
- {
- // Add invoice htlc and cltv delta fields.
- number: 11,
- migration: migration_01_to_11.MigrateInvoices,
- },
- {
- // Migrate to TLV invoice bodies, add payment address
- // and features, remove receipt.
- number: 12,
- migration: migration12.MigrateInvoiceTLV,
- },
- {
- // Migrate to multi-path payments.
- number: 13,
- migration: migration13.MigrateMPP,
- },
- {
- // Initialize payment address index and begin using it
- // as the default index, falling back to payment hash
- // index.
- number: 14,
- migration: mig.CreateTLB(payAddrIndexBucket),
- },
- {
- // Initialize payment index bucket which will be used
- // to index payments by sequence number. This index will
- // be used to allow more efficient ListPayments queries.
- number: 15,
- migration: mig.CreateTLB(paymentsIndexBucket),
- },
- {
- // Add our existing payments to the index bucket created
- // in migration 15.
- number: 16,
- migration: migration16.MigrateSequenceIndex,
- },
- {
- // Create a top level bucket which will store extra
- // information about channel closes.
- number: 17,
- migration: mig.CreateTLB(closeSummaryBucket),
- },
- {
- // Create a top level bucket which holds information
- // about our peers.
- number: 18,
- migration: mig.CreateTLB(peersBucket),
- },
- }
-
- // Big endian is the preferred byte order, due to cursor scans over
- // integer keys iterating in order.
- byteOrder = binary.BigEndian
-)
-
-// DB is the primary datastore for the lnd daemon. The database stores
-// information related to nodes, routing data, open/closed channels, fee
-// schedules, and reputation data.
-type DB struct {
- kvdb.Backend
-
- dbPath string
- graph *ChannelGraph
- clock clock.Clock
- dryRun bool
-}
-
-// Update is a wrapper around walletdb.Update which calls into the extended
-// backend when available. This call is needed to be able to cast DB to
-// ExtendedBackend. The passed reset function is called before the start of the
-// transaction and can be used to reset intermediate state. As callers may
-// expect retries of the f closure (depending on the database backend used), the
-// reset function will be called before each retry respectively.
-func (db *DB) Update(f func(tx walletdb.ReadWriteTx) er.R, reset func()) er.R {
- if v, ok := db.Backend.(kvdb.ExtendedBackend); ok {
- return v.Update(f, reset)
- }
-
- reset()
- return walletdb.Update(db, f)
-}
-
-// View is a wrapper around walletdb.View which calls into the extended
-// backend when available. This call is needed to be able to cast DB to
-// ExtendedBackend. The passed reset function is called before the start of the
-// transaction and can be used to reset intermediate state. As callers may
-// expect retries of the f closure (depending on the database backend used), the
-// reset function will be called before each retry respectively.
-func (db *DB) View(f func(tx walletdb.ReadTx) er.R, reset func()) er.R {
- if v, ok := db.Backend.(kvdb.ExtendedBackend); ok {
- return v.View(f, reset)
- }
-
- reset()
- return walletdb.View(db, f)
-}
-
-// PrintStats calls into the extended backend if available. This call is needed
-// to be able to cast DB to ExtendedBackend.
-func (db *DB) PrintStats() string {
- if v, ok := db.Backend.(kvdb.ExtendedBackend); ok {
- return v.PrintStats()
- }
-
- return "unimplemented"
-}
-
-// Open opens or creates channeldb. Any necessary schemas migrations due
-// to updates will take place as necessary.
-// TODO(bhandras): deprecate this function.
-func Open(dbPath string, modifiers ...OptionModifier) (*DB, er.R) {
- opts := DefaultOptions()
- for _, modifier := range modifiers {
- modifier(&opts)
- }
-
- backend, err := kvdb.GetBoltBackend(&kvdb.BoltBackendConfig{
- DBPath: dbPath,
- DBFileName: dbName,
- NoFreelistSync: opts.NoFreelistSync,
- AutoCompact: opts.AutoCompact,
- AutoCompactMinAge: opts.AutoCompactMinAge,
- })
- if err != nil {
- return nil, err
- }
-
- db, err := CreateWithBackend(backend, modifiers...)
- if err == nil {
- db.dbPath = dbPath
- }
- return db, err
-}
-
-// CreateWithBackend creates channeldb instance using the passed kvdb.Backend.
-// Any necessary schemas migrations due to updates will take place as necessary.
-func CreateWithBackend(backend kvdb.Backend, modifiers ...OptionModifier) (*DB, er.R) {
- if err := initChannelDB(backend); err != nil {
- return nil, err
- }
-
- opts := DefaultOptions()
- for _, modifier := range modifiers {
- modifier(&opts)
- }
-
- chanDB := &DB{
- Backend: backend,
- clock: opts.clock,
- dryRun: opts.dryRun,
- }
- chanDB.graph = newChannelGraph(
- chanDB, opts.RejectCacheSize, opts.ChannelCacheSize,
- )
-
- // Synchronize the version of database and apply migrations if needed.
- if err := chanDB.syncVersions(dbVersions); err != nil {
- backend.Close()
- return nil, err
- }
-
- return chanDB, nil
-}
-
-// Path returns the file path to the channel database.
-func (d *DB) Path() string {
- return d.dbPath
-}
-
-var topLevelBuckets = [][]byte{
- openChannelBucket,
- closedChannelBucket,
- forwardingLogBucket,
- fwdPackagesKey,
- invoiceBucket,
- payAddrIndexBucket,
- paymentsIndexBucket,
- peersBucket,
- nodeInfoBucket,
- nodeBucket,
- edgeBucket,
- edgeIndexBucket,
- graphMetaBucket,
- metaBucket,
- closeSummaryBucket,
-}
-
-// Wipe completely deletes all saved state within all used buckets within the
-// database. The deletion is done in a single transaction, therefore this
-// operation is fully atomic.
-func (d *DB) Wipe() er.R {
- return kvdb.Update(d, func(tx kvdb.RwTx) er.R {
- for _, tlb := range topLevelBuckets {
- err := tx.DeleteTopLevelBucket(tlb)
- if err != nil && !kvdb.ErrBucketNotFound.Is(err) {
- return err
- }
- }
- return nil
- }, func() {})
-}
-
-// createChannelDB creates and initializes a fresh version of channeldb. In
-// the case that the target path has not yet been created or doesn't yet exist,
-// then the path is created. Additionally, all required top-level buckets used
-// within the database are created.
-func initChannelDB(db kvdb.Backend) er.R {
- err := kvdb.Update(db, func(tx kvdb.RwTx) er.R {
- meta := &Meta{}
- // Check if DB is already initialized.
- err := fetchMeta(meta, tx)
- if err == nil {
- return nil
- }
-
- for _, tlb := range topLevelBuckets {
- if _, err := tx.CreateTopLevelBucket(tlb); err != nil {
- return err
- }
- }
-
- nodes := tx.ReadWriteBucket(nodeBucket)
- _, err = nodes.CreateBucket(aliasIndexBucket)
- if err != nil {
- return err
- }
- _, err = nodes.CreateBucket(nodeUpdateIndexBucket)
- if err != nil {
- return err
- }
-
- edges := tx.ReadWriteBucket(edgeBucket)
- if _, err := edges.CreateBucket(edgeIndexBucket); err != nil {
- return err
- }
- if _, err := edges.CreateBucket(edgeUpdateIndexBucket); err != nil {
- return err
- }
- if _, err := edges.CreateBucket(channelPointBucket); err != nil {
- return err
- }
- if _, err := edges.CreateBucket(zombieBucket); err != nil {
- return err
- }
-
- graphMeta := tx.ReadWriteBucket(graphMetaBucket)
- _, err = graphMeta.CreateBucket(pruneLogBucket)
- if err != nil {
- return err
- }
-
- meta.DbVersionNumber = getLatestDBVersion(dbVersions)
- return putMeta(meta, tx)
- }, func() {})
- if err != nil {
- return er.Errorf("unable to create new channeldb: %v", err)
- }
-
- return nil
-}
-
-// fileExists returns true if the file exists, and false otherwise.
-func fileExists(path string) bool {
- if _, err := os.Stat(path); err != nil {
- if os.IsNotExist(err) {
- return false
- }
- }
-
- return true
-}
-
-// FetchOpenChannels starts a new database transaction and returns all stored
-// currently active/open channels associated with the target nodeID. In the case
-// that no active channels are known to have been created with this node, then a
-// zero-length slice is returned.
-func (d *DB) FetchOpenChannels(nodeID *btcec.PublicKey) ([]*OpenChannel, er.R) {
- var channels []*OpenChannel
- err := kvdb.View(d, func(tx kvdb.RTx) er.R {
- var err er.R
- channels, err = d.fetchOpenChannels(tx, nodeID)
- return err
- }, func() {
- channels = nil
- })
-
- return channels, err
-}
-
-// fetchOpenChannels uses and existing database transaction and returns all
-// stored currently active/open channels associated with the target nodeID. In
-// the case that no active channels are known to have been created with this
-// node, then a zero-length slice is returned.
-func (db *DB) fetchOpenChannels(tx kvdb.RTx,
- nodeID *btcec.PublicKey) ([]*OpenChannel, er.R) {
-
- // Get the bucket dedicated to storing the metadata for open channels.
- openChanBucket := tx.ReadBucket(openChannelBucket)
- if openChanBucket == nil {
- return nil, nil
- }
-
- // Within this top level bucket, fetch the bucket dedicated to storing
- // open channel data specific to the remote node.
- pub := nodeID.SerializeCompressed()
- nodeChanBucket := openChanBucket.NestedReadBucket(pub)
- if nodeChanBucket == nil {
- return nil, nil
- }
-
- // Next, we'll need to go down an additional layer in order to retrieve
- // the channels for each chain the node knows of.
- var channels []*OpenChannel
- err := nodeChanBucket.ForEach(func(chainHash, v []byte) er.R {
- // If there's a value, it's not a bucket so ignore it.
- if v != nil {
- return nil
- }
-
- // If we've found a valid chainhash bucket, then we'll retrieve
- // that so we can extract all the channels.
- chainBucket := nodeChanBucket.NestedReadBucket(chainHash)
- if chainBucket == nil {
- return er.Errorf("unable to read bucket for chain=%x",
- chainHash[:])
- }
-
- // Finally, we both of the necessary buckets retrieved, fetch
- // all the active channels related to this node.
- nodeChannels, err := db.fetchNodeChannels(chainBucket)
- if err != nil {
- return er.Errorf("unable to read channel for "+
- "chain_hash=%x, node_key=%x: %v",
- chainHash[:], pub, err)
- }
-
- channels = append(channels, nodeChannels...)
- return nil
- })
-
- return channels, err
-}
-
-// fetchNodeChannels retrieves all active channels from the target chainBucket
-// which is under a node's dedicated channel bucket. This function is typically
-// used to fetch all the active channels related to a particular node.
-func (db *DB) fetchNodeChannels(chainBucket kvdb.RBucket) ([]*OpenChannel, er.R) {
-
- var channels []*OpenChannel
-
- // A node may have channels on several chains, so for each known chain,
- // we'll extract all the channels.
- err := chainBucket.ForEach(func(chanPoint, v []byte) er.R {
- // If there's a value, it's not a bucket so ignore it.
- if v != nil {
- return nil
- }
-
- // Once we've found a valid channel bucket, we'll extract it
- // from the node's chain bucket.
- chanBucket := chainBucket.NestedReadBucket(chanPoint)
-
- var outPoint wire.OutPoint
- err := readOutpoint(bytes.NewReader(chanPoint), &outPoint)
- if err != nil {
- return err
- }
- oChannel, err := fetchOpenChannel(chanBucket, &outPoint)
- if err != nil {
- return er.Errorf("unable to read channel data for "+
- "chan_point=%v: %v", outPoint, err)
- }
- oChannel.Db = db
-
- channels = append(channels, oChannel)
-
- return nil
- })
- if err != nil {
- return nil, err
- }
-
- return channels, nil
-}
-
-// FetchChannel attempts to locate a channel specified by the passed channel
-// point. If the channel cannot be found, then an error will be returned.
-func (d *DB) FetchChannel(chanPoint wire.OutPoint) (*OpenChannel, er.R) {
- var (
- targetChan *OpenChannel
- targetChanPoint bytes.Buffer
- )
-
- if err := writeOutpoint(&targetChanPoint, &chanPoint); err != nil {
- return nil, err
- }
-
- // chanScan will traverse the following bucket structure:
- // * nodePub => chainHash => chanPoint
- //
- // At each level we go one further, ensuring that we're traversing the
- // proper key (that's actually a bucket). By only reading the bucket
- // structure and skipping fully decoding each channel, we save a good
- // bit of CPU as we don't need to do things like decompress public
- // keys.
- chanScan := func(tx kvdb.RTx) er.R {
- // Get the bucket dedicated to storing the metadata for open
- // channels.
- openChanBucket := tx.ReadBucket(openChannelBucket)
- if openChanBucket == nil {
- return ErrNoActiveChannels.Default()
- }
-
- // Within the node channel bucket, are the set of node pubkeys
- // we have channels with, we don't know the entire set, so
- // we'll check them all.
- return openChanBucket.ForEach(func(nodePub, v []byte) er.R {
- // Ensure that this is a key the same size as a pubkey,
- // and also that it leads directly to a bucket.
- if len(nodePub) != 33 || v != nil {
- return nil
- }
-
- nodeChanBucket := openChanBucket.NestedReadBucket(nodePub)
- if nodeChanBucket == nil {
- return nil
- }
-
- // The next layer down is all the chains that this node
- // has channels on with us.
- return nodeChanBucket.ForEach(func(chainHash, v []byte) er.R {
- // If there's a value, it's not a bucket so
- // ignore it.
- if v != nil {
- return nil
- }
-
- chainBucket := nodeChanBucket.NestedReadBucket(
- chainHash,
- )
- if chainBucket == nil {
- return er.Errorf("unable to read "+
- "bucket for chain=%x", chainHash[:])
- }
-
- // Finally we reach the leaf bucket that stores
- // all the chanPoints for this node.
- chanBucket := chainBucket.NestedReadBucket(
- targetChanPoint.Bytes(),
- )
- if chanBucket == nil {
- return nil
- }
-
- channel, err := fetchOpenChannel(
- chanBucket, &chanPoint,
- )
- if err != nil {
- return err
- }
-
- targetChan = channel
- targetChan.Db = d
-
- return nil
- })
- })
- }
-
- err := kvdb.View(d, chanScan, func() {})
- if err != nil {
- return nil, err
- }
-
- if targetChan != nil {
- return targetChan, nil
- }
-
- // If we can't find the channel, then we return with an error, as we
- // have nothing to backup.
- return nil, ErrChannelNotFound.Default()
-}
-
-// FetchAllChannels attempts to retrieve all open channels currently stored
-// within the database, including pending open, fully open and channels waiting
-// for a closing transaction to confirm.
-func (d *DB) FetchAllChannels() ([]*OpenChannel, er.R) {
- return fetchChannels(d)
-}
-
-// FetchAllOpenChannels will return all channels that have the funding
-// transaction confirmed, and is not waiting for a closing transaction to be
-// confirmed.
-func (d *DB) FetchAllOpenChannels() ([]*OpenChannel, er.R) {
- return fetchChannels(
- d,
- pendingChannelFilter(false),
- waitingCloseFilter(false),
- )
-}
-
-// FetchPendingChannels will return channels that have completed the process of
-// generating and broadcasting funding transactions, but whose funding
-// transactions have yet to be confirmed on the blockchain.
-func (d *DB) FetchPendingChannels() ([]*OpenChannel, er.R) {
- return fetchChannels(d,
- pendingChannelFilter(true),
- waitingCloseFilter(false),
- )
-}
-
-// FetchWaitingCloseChannels will return all channels that have been opened,
-// but are now waiting for a closing transaction to be confirmed.
-//
-// NOTE: This includes channels that are also pending to be opened.
-func (d *DB) FetchWaitingCloseChannels() ([]*OpenChannel, er.R) {
- return fetchChannels(
- d, waitingCloseFilter(true),
- )
-}
-
-// fetchChannelsFilter applies a filter to channels retrieved in fetchchannels.
-// A set of filters can be combined to filter across multiple dimensions.
-type fetchChannelsFilter func(channel *OpenChannel) bool
-
-// pendingChannelFilter returns a filter based on whether channels are pending
-// (ie, their funding transaction still needs to confirm). If pending is false,
-// channels with confirmed funding transactions are returned.
-func pendingChannelFilter(pending bool) fetchChannelsFilter {
- return func(channel *OpenChannel) bool {
- return channel.IsPending == pending
- }
-}
-
-// waitingCloseFilter returns a filter which filters channels based on whether
-// they are awaiting the confirmation of their closing transaction. If waiting
-// close is true, channels that have had their closing tx broadcast are
-// included. If it is false, channels that are not awaiting confirmation of
-// their close transaction are returned.
-func waitingCloseFilter(waitingClose bool) fetchChannelsFilter {
- return func(channel *OpenChannel) bool {
- // If the channel is in any other state than Default,
- // then it means it is waiting to be closed.
- channelWaitingClose :=
- channel.ChanStatus() != ChanStatusDefault
-
- // Include the channel if it matches the value for
- // waiting close that we are filtering on.
- return channelWaitingClose == waitingClose
- }
-}
-
-// fetchChannels attempts to retrieve channels currently stored in the
-// database. It takes a set of filters which are applied to each channel to
-// obtain a set of channels with the desired set of properties. Only channels
-// which have a true value returned for *all* of the filters will be returned.
-// If no filters are provided, every channel in the open channels bucket will
-// be returned.
-func fetchChannels(d *DB, filters ...fetchChannelsFilter) ([]*OpenChannel, er.R) {
- var channels []*OpenChannel
-
- err := kvdb.View(d, func(tx kvdb.RTx) er.R {
- // Get the bucket dedicated to storing the metadata for open
- // channels.
- openChanBucket := tx.ReadBucket(openChannelBucket)
- if openChanBucket == nil {
- return ErrNoActiveChannels.Default()
- }
-
- // Next, fetch the bucket dedicated to storing metadata related
- // to all nodes. All keys within this bucket are the serialized
- // public keys of all our direct counterparties.
- nodeMetaBucket := tx.ReadBucket(nodeInfoBucket)
- if nodeMetaBucket == nil {
- return er.Errorf("node bucket not created")
- }
-
- // Finally for each node public key in the bucket, fetch all
- // the channels related to this particular node.
- return nodeMetaBucket.ForEach(func(k, v []byte) er.R {
- nodeChanBucket := openChanBucket.NestedReadBucket(k)
- if nodeChanBucket == nil {
- return nil
- }
-
- return nodeChanBucket.ForEach(func(chainHash, v []byte) er.R {
- // If there's a value, it's not a bucket so
- // ignore it.
- if v != nil {
- return nil
- }
-
- // If we've found a valid chainhash bucket,
- // then we'll retrieve that so we can extract
- // all the channels.
- chainBucket := nodeChanBucket.NestedReadBucket(
- chainHash,
- )
- if chainBucket == nil {
- return er.Errorf("unable to read "+
- "bucket for chain=%x", chainHash[:])
- }
-
- nodeChans, err := d.fetchNodeChannels(chainBucket)
- if err != nil {
- return er.Errorf("unable to read "+
- "channel for chain_hash=%x, "+
- "node_key=%x: %v", chainHash[:], k, err)
- }
- for _, channel := range nodeChans {
- // includeChannel indicates whether the channel
- // meets the criteria specified by our filters.
- includeChannel := true
-
- // Run through each filter and check whether the
- // channel should be included.
- for _, f := range filters {
- // If the channel fails the filter, set
- // includeChannel to false and don't bother
- // checking the remaining filters.
- if !f(channel) {
- includeChannel = false
- break
- }
- }
-
- // If the channel passed every filter, include it in
- // our set of channels.
- if includeChannel {
- channels = append(channels, channel)
- }
- }
- return nil
- })
-
- })
- }, func() {
- channels = nil
- })
- if err != nil {
- return nil, err
- }
-
- return channels, nil
-}
-
-// FetchClosedChannels attempts to fetch all closed channels from the database.
-// The pendingOnly bool toggles if channels that aren't yet fully closed should
-// be returned in the response or not. When a channel was cooperatively closed,
-// it becomes fully closed after a single confirmation. When a channel was
-// forcibly closed, it will become fully closed after _all_ the pending funds
-// (if any) have been swept.
-func (d *DB) FetchClosedChannels(pendingOnly bool) ([]*ChannelCloseSummary, er.R) {
- var chanSummaries []*ChannelCloseSummary
-
- if err := kvdb.View(d, func(tx kvdb.RTx) er.R {
- closeBucket := tx.ReadBucket(closedChannelBucket)
- if closeBucket == nil {
- return ErrNoClosedChannels.Default()
- }
-
- return closeBucket.ForEach(func(chanID []byte, summaryBytes []byte) er.R {
- summaryReader := bytes.NewReader(summaryBytes)
- chanSummary, err := deserializeCloseChannelSummary(summaryReader)
- if err != nil {
- return err
- }
-
- // If the query specified to only include pending
- // channels, then we'll skip any channels which aren't
- // currently pending.
- if !chanSummary.IsPending && pendingOnly {
- return nil
- }
-
- chanSummaries = append(chanSummaries, chanSummary)
- return nil
- })
- }, func() {
- chanSummaries = nil
- }); err != nil {
- return nil, err
- }
-
- return chanSummaries, nil
-}
-
-// ErrClosedChannelNotFound signals that a closed channel could not be found in
-// the channeldb.
-var ErrClosedChannelNotFound = Err.CodeWithDetail("ErrClosedChannelNotFound", "unable to find closed channel summary")
-
-// FetchClosedChannel queries for a channel close summary using the channel
-// point of the channel in question.
-func (d *DB) FetchClosedChannel(chanID *wire.OutPoint) (*ChannelCloseSummary, er.R) {
- var chanSummary *ChannelCloseSummary
- if err := kvdb.View(d, func(tx kvdb.RTx) er.R {
- closeBucket := tx.ReadBucket(closedChannelBucket)
- if closeBucket == nil {
- return ErrClosedChannelNotFound.Default()
- }
-
- var b bytes.Buffer
- var err er.R
- if err = writeOutpoint(&b, chanID); err != nil {
- return err
- }
-
- summaryBytes := closeBucket.Get(b.Bytes())
- if summaryBytes == nil {
- return ErrClosedChannelNotFound.Default()
- }
-
- summaryReader := bytes.NewReader(summaryBytes)
- chanSummary, err = deserializeCloseChannelSummary(summaryReader)
-
- return err
- }, func() {
- chanSummary = nil
- }); err != nil {
- return nil, err
- }
-
- return chanSummary, nil
-}
-
-// FetchClosedChannelForID queries for a channel close summary using the
-// channel ID of the channel in question.
-func (d *DB) FetchClosedChannelForID(cid lnwire.ChannelID) (
- *ChannelCloseSummary, er.R) {
-
- var chanSummary *ChannelCloseSummary
- if err := kvdb.View(d, func(tx kvdb.RTx) er.R {
- closeBucket := tx.ReadBucket(closedChannelBucket)
- if closeBucket == nil {
- return ErrClosedChannelNotFound.Default()
- }
-
- // The first 30 bytes of the channel ID and outpoint will be
- // equal.
- cursor := closeBucket.ReadCursor()
- op, c := cursor.Seek(cid[:30])
-
- // We scan over all possible candidates for this channel ID.
- for ; op != nil && bytes.Compare(cid[:30], op[:30]) <= 0; op, c = cursor.Next() {
- var outPoint wire.OutPoint
- err := readOutpoint(bytes.NewReader(op), &outPoint)
- if err != nil {
- return err
- }
-
- // If the found outpoint does not correspond to this
- // channel ID, we continue.
- if !cid.IsChanPoint(&outPoint) {
- continue
- }
-
- // Deserialize the close summary and return.
- r := bytes.NewReader(c)
- chanSummary, err = deserializeCloseChannelSummary(r)
- if err != nil {
- return err
- }
-
- return nil
- }
- return ErrClosedChannelNotFound.Default()
- }, func() {
- chanSummary = nil
- }); err != nil {
- return nil, err
- }
-
- return chanSummary, nil
-}
-
-// MarkChanFullyClosed marks a channel as fully closed within the database. A
-// channel should be marked as fully closed if the channel was initially
-// cooperatively closed and it's reached a single confirmation, or after all
-// the pending funds in a channel that has been forcibly closed have been
-// swept.
-func (d *DB) MarkChanFullyClosed(chanPoint *wire.OutPoint) er.R {
- return kvdb.Update(d, func(tx kvdb.RwTx) er.R {
- var b bytes.Buffer
- if err := writeOutpoint(&b, chanPoint); err != nil {
- return err
- }
-
- chanID := b.Bytes()
-
- closedChanBucket, err := tx.CreateTopLevelBucket(
- closedChannelBucket,
- )
- if err != nil {
- return err
- }
-
- chanSummaryBytes := closedChanBucket.Get(chanID)
- if chanSummaryBytes == nil {
- return er.Errorf("no closed channel for "+
- "chan_point=%v found", chanPoint)
- }
-
- chanSummaryReader := bytes.NewReader(chanSummaryBytes)
- chanSummary, errr := deserializeCloseChannelSummary(
- chanSummaryReader,
- )
- if errr != nil {
- return errr
- }
-
- chanSummary.IsPending = false
-
- var newSummary bytes.Buffer
- errr = serializeChannelCloseSummary(&newSummary, chanSummary)
- if errr != nil {
- return errr
- }
-
- err = closedChanBucket.Put(chanID, newSummary.Bytes())
- if err != nil {
- return err
- }
-
- // Now that the channel is closed, we'll check if we have any
- // other open channels with this peer. If we don't we'll
- // garbage collect it to ensure we don't establish persistent
- // connections to peers without open channels.
- return d.pruneLinkNode(tx, chanSummary.RemotePub)
- }, func() {})
-}
-
-// pruneLinkNode determines whether we should garbage collect a link node from
-// the database due to no longer having any open channels with it. If there are
-// any left, then this acts as a no-op.
-func (db *DB) pruneLinkNode(tx kvdb.RwTx, remotePub *btcec.PublicKey) er.R {
- openChannels, err := db.fetchOpenChannels(tx, remotePub)
- if err != nil {
- return er.Errorf("unable to fetch open channels for peer %x: "+
- "%v", remotePub.SerializeCompressed(), err)
- }
-
- if len(openChannels) > 0 {
- return nil
- }
-
- log.Infof("Pruning link node %x with zero open channels from database",
- remotePub.SerializeCompressed())
-
- return db.deleteLinkNode(tx, remotePub)
-}
-
-// PruneLinkNodes attempts to prune all link nodes found within the databse with
-// whom we no longer have any open channels with.
-func (d *DB) PruneLinkNodes() er.R {
- return kvdb.Update(d, func(tx kvdb.RwTx) er.R {
- linkNodes, err := d.fetchAllLinkNodes(tx)
- if err != nil {
- return err
- }
-
- for _, linkNode := range linkNodes {
- err := d.pruneLinkNode(tx, linkNode.IdentityPub)
- if err != nil {
- return err
- }
- }
-
- return nil
- }, func() {})
-}
-
-// ChannelShell is a shell of a channel that is meant to be used for channel
-// recovery purposes. It contains a minimal OpenChannel instance along with
-// addresses for that target node.
-type ChannelShell struct {
- // NodeAddrs the set of addresses that this node has known to be
- // reachable at in the past.
- NodeAddrs []net.Addr
-
- // Chan is a shell of an OpenChannel, it contains only the items
- // required to restore the channel on disk.
- Chan *OpenChannel
-}
-
-// RestoreChannelShells is a method that allows the caller to reconstruct the
-// state of an OpenChannel from the ChannelShell. We'll attempt to write the
-// new channel to disk, create a LinkNode instance with the passed node
-// addresses, and finally create an edge within the graph for the channel as
-// well. This method is idempotent, so repeated calls with the same set of
-// channel shells won't modify the database after the initial call.
-func (d *DB) RestoreChannelShells(channelShells ...*ChannelShell) er.R {
- err := kvdb.Update(d, func(tx kvdb.RwTx) er.R {
- for _, channelShell := range channelShells {
- channel := channelShell.Chan
-
- // When we make a channel, we mark that the channel has
- // been restored, this will signal to other sub-systems
- // to not attempt to use the channel as if it was a
- // regular one.
- channel.chanStatus |= ChanStatusRestored
-
- // First, we'll attempt to create a new open channel
- // and link node for this channel. If the channel
- // already exists, then in order to ensure this method
- // is idempotent, we'll continue to the next step.
- channel.Db = d
- err := syncNewChannel(
- tx, channel, channelShell.NodeAddrs,
- )
- if err != nil {
- return err
- }
- }
-
- return nil
- }, func() {})
- if err != nil {
- return err
- }
-
- return nil
-}
-
-// AddrsForNode consults the graph and channel database for all addresses known
-// to the passed node public key.
-func (d *DB) AddrsForNode(nodePub *btcec.PublicKey) ([]net.Addr, er.R) {
- var (
- linkNode *LinkNode
- graphNode LightningNode
- )
-
- dbErr := kvdb.View(d, func(tx kvdb.RTx) er.R {
- var err er.R
-
- linkNode, err = fetchLinkNode(tx, nodePub)
- if err != nil {
- return err
- }
-
- // We'll also query the graph for this peer to see if they have
- // any addresses that we don't currently have stored within the
- // link node database.
- nodes := tx.ReadBucket(nodeBucket)
- if nodes == nil {
- return ErrGraphNotFound.Default()
- }
- compressedPubKey := nodePub.SerializeCompressed()
- graphNode, err = fetchLightningNode(nodes, compressedPubKey)
- if err != nil && !ErrGraphNodeNotFound.Is(err) {
- // If the node isn't found, then that's OK, as we still
- // have the link node data.
- return err
- }
-
- return nil
- }, func() {
- linkNode = nil
- })
- if dbErr != nil {
- return nil, dbErr
- }
-
- // Now that we have both sources of addrs for this node, we'll use a
- // map to de-duplicate any addresses between the two sources, and
- // produce a final list of the combined addrs.
- addrs := make(map[string]net.Addr)
- for _, addr := range linkNode.Addresses {
- addrs[addr.String()] = addr
- }
- for _, addr := range graphNode.Addresses {
- addrs[addr.String()] = addr
- }
- dedupedAddrs := make([]net.Addr, 0, len(addrs))
- for _, addr := range addrs {
- dedupedAddrs = append(dedupedAddrs, addr)
- }
-
- return dedupedAddrs, nil
-}
-
-// AbandonChannel attempts to remove the target channel from the open channel
-// database. If the channel was already removed (has a closed channel entry),
-// then we'll return a nil error. Otherwise, we'll insert a new close summary
-// into the database.
-func (db *DB) AbandonChannel(chanPoint *wire.OutPoint, bestHeight uint32) er.R {
- // With the chanPoint constructed, we'll attempt to find the target
- // channel in the database. If we can't find the channel, then we'll
- // return the error back to the caller.
- dbChan, err := db.FetchChannel(*chanPoint)
- switch {
- // If the channel wasn't found, then it's possible that it was already
- // abandoned from the database.
- case ErrChannelNotFound.Is(err):
- _, closedErr := db.FetchClosedChannel(chanPoint)
- if closedErr != nil {
- return closedErr
- }
-
- // If the channel was already closed, then we don't return an
- // error as we'd like fro this step to be repeatable.
- return nil
- case err != nil:
- return err
- }
-
- // Now that we've found the channel, we'll populate a close summary for
- // the channel, so we can store as much information for this abounded
- // channel as possible. We also ensure that we set Pending to false, to
- // indicate that this channel has been "fully" closed.
- summary := &ChannelCloseSummary{
- CloseType: Abandoned,
- ChanPoint: *chanPoint,
- ChainHash: dbChan.ChainHash,
- CloseHeight: bestHeight,
- RemotePub: dbChan.IdentityPub,
- Capacity: dbChan.Capacity,
- SettledBalance: dbChan.LocalCommitment.LocalBalance.ToSatoshis(),
- ShortChanID: dbChan.ShortChanID(),
- RemoteCurrentRevocation: dbChan.RemoteCurrentRevocation,
- RemoteNextRevocation: dbChan.RemoteNextRevocation,
- LocalChanConfig: dbChan.LocalChanCfg,
- }
-
- // Finally, we'll close the channel in the DB, and return back to the
- // caller. We set ourselves as the close initiator because we abandoned
- // the channel.
- return dbChan.CloseChannel(summary, ChanStatusLocalCloseInitiator)
-}
-
-// syncVersions function is used for safe db version synchronization. It
-// applies migration functions to the current database and recovers the
-// previous state of db if at least one error/panic appeared during migration.
-func (d *DB) syncVersions(versions []version) er.R {
- meta, err := d.FetchMeta(nil)
- if err != nil {
- if ErrMetaNotFound.Is(err) {
- meta = &Meta{}
- } else {
- return err
- }
- }
-
- latestVersion := getLatestDBVersion(versions)
- log.Infof("Checking for schema update: latest_version=%v, "+
- "db_version=%v", latestVersion, meta.DbVersionNumber)
-
- switch {
-
- // If the database reports a higher version that we are aware of, the
- // user is probably trying to revert to a prior version of lnd. We fail
- // here to prevent reversions and unintended corruption.
- case meta.DbVersionNumber > latestVersion:
- log.Errorf("Refusing to revert from db_version=%d to "+
- "lower version=%d", meta.DbVersionNumber,
- latestVersion)
- return ErrDBReversion.Default()
-
- // If the current database version matches the latest version number,
- // then we don't need to perform any migrations.
- case meta.DbVersionNumber == latestVersion:
- return nil
- }
-
- log.Infof("Performing database schema migration")
-
- // Otherwise, we fetch the migrations which need to applied, and
- // execute them serially within a single database transaction to ensure
- // the migration is atomic.
- migrations, migrationVersions := getMigrationsToApply(
- versions, meta.DbVersionNumber,
- )
- return kvdb.Update(d, func(tx kvdb.RwTx) er.R {
- for i, migration := range migrations {
- if migration == nil {
- continue
- }
-
- log.Infof("Applying migration #%v", migrationVersions[i])
-
- if err := migration(tx); err != nil {
- log.Infof("Unable to apply migration #%v",
- migrationVersions[i])
- return err
- }
- }
-
- meta.DbVersionNumber = latestVersion
- err := putMeta(meta, tx)
- if err != nil {
- return err
- }
-
- // In dry-run mode, return an error to prevent the transaction
- // from committing.
- if d.dryRun {
- return ErrDryRunMigrationOK.Default()
- }
-
- return nil
- }, func() {})
-}
-
-// ChannelGraph returns a new instance of the directed channel graph.
-func (d *DB) ChannelGraph() *ChannelGraph {
- return d.graph
-}
-
-func getLatestDBVersion(versions []version) uint32 {
- return versions[len(versions)-1].number
-}
-
-// getMigrationsToApply retrieves the migration function that should be
-// applied to the database.
-func getMigrationsToApply(versions []version, version uint32) ([]migration, []uint32) {
- migrations := make([]migration, 0, len(versions))
- migrationVersions := make([]uint32, 0, len(versions))
-
- for _, v := range versions {
- if v.number > version {
- migrations = append(migrations, v.migration)
- migrationVersions = append(migrationVersions, v.number)
- }
- }
-
- return migrations, migrationVersions
-}
-
-// fetchHistoricalChanBucket returns a the channel bucket for a given outpoint
-// from the historical channel bucket. If the bucket does not exist,
-// ErrNoHistoricalBucket is returned.
-func fetchHistoricalChanBucket(tx kvdb.RTx,
- outPoint *wire.OutPoint) (kvdb.RBucket, er.R) {
-
- // First fetch the top level bucket which stores all data related to
- // historically stored channels.
- historicalChanBucket := tx.ReadBucket(historicalChannelBucket)
- if historicalChanBucket == nil {
- return nil, ErrNoHistoricalBucket.Default()
- }
-
- // With the bucket for the node and chain fetched, we can now go down
- // another level, for the channel itself.
- var chanPointBuf bytes.Buffer
- if err := writeOutpoint(&chanPointBuf, outPoint); err != nil {
- return nil, err
- }
- chanBucket := historicalChanBucket.NestedReadBucket(chanPointBuf.Bytes())
- if chanBucket == nil {
- return nil, ErrChannelNotFound.Default()
- }
-
- return chanBucket, nil
-}
-
-// FetchHistoricalChannel fetches open channel data from the historical channel
-// bucket.
-func (db *DB) FetchHistoricalChannel(outPoint *wire.OutPoint) (*OpenChannel, er.R) {
- var channel *OpenChannel
- err := kvdb.View(db, func(tx kvdb.RTx) er.R {
- chanBucket, err := fetchHistoricalChanBucket(tx, outPoint)
- if err != nil {
- return err
- }
-
- channel, err = fetchOpenChannel(chanBucket, outPoint)
- return err
- }, func() {
- channel = nil
- })
- if err != nil {
- return nil, err
- }
-
- return channel, nil
-}
-
-// MakeTestDB creates a new instance of the ChannelDB for testing purposes.
-// A callback which cleans up the created temporary directories is also
-// returned and intended to be executed after the test completes.
-func MakeTestDB(modifiers ...OptionModifier) (*DB, func(), er.R) {
- // First, create a temporary directory to be used for the duration of
- // this test.
- tempDirName, errr := ioutil.TempDir("", "channeldb")
- if errr != nil {
- return nil, nil, er.E(errr)
- }
-
- // Next, create channeldb for the first time.
- backend, backendCleanup, err := kvdb.GetTestBackend(tempDirName, "cdb")
- if err != nil {
- backendCleanup()
- return nil, nil, err
- }
-
- cdb, err := CreateWithBackend(backend, modifiers...)
- if err != nil {
- backendCleanup()
- os.RemoveAll(tempDirName)
- return nil, nil, err
- }
-
- cleanUp := func() {
- cdb.Close()
- backendCleanup()
- os.RemoveAll(tempDirName)
- }
-
- return cdb, cleanUp, nil
-}
diff --git a/lnd/channeldb/db_test.go b/lnd/channeldb/db_test.go
deleted file mode 100644
index c1e3158a..00000000
--- a/lnd/channeldb/db_test.go
+++ /dev/null
@@ -1,742 +0,0 @@
-package channeldb
-
-import (
- "io/ioutil"
- "math"
- "math/rand"
- "net"
- "os"
- "path/filepath"
- "reflect"
- "testing"
-
- "github.com/davecgh/go-spew/spew"
- "github.com/pkt-cash/pktd/btcec"
- "github.com/pkt-cash/pktd/btcutil"
- "github.com/pkt-cash/pktd/btcutil/er"
- "github.com/pkt-cash/pktd/chaincfg/chainhash"
- "github.com/pkt-cash/pktd/lnd/channeldb/kvdb"
- "github.com/pkt-cash/pktd/lnd/keychain"
- "github.com/pkt-cash/pktd/lnd/lnwire"
- "github.com/pkt-cash/pktd/lnd/shachain"
- "github.com/pkt-cash/pktd/wire"
- "github.com/pkt-cash/pktd/wire/protocol"
-)
-
-func TestOpenWithCreate(t *testing.T) {
- t.Parallel()
-
- // First, create a temporary directory to be used for the duration of
- // this test.
- tempDirName, errr := ioutil.TempDir("", "channeldb")
- if errr != nil {
- t.Fatalf("unable to create temp dir: %v", errr)
- }
- defer os.RemoveAll(tempDirName)
-
- // Next, open thereby creating channeldb for the first time.
- dbPath := filepath.Join(tempDirName, "cdb")
- backend, cleanup, err := kvdb.GetTestBackend(dbPath, "cdb")
- if err != nil {
- t.Fatalf("unable to get test db backend: %v", err)
- }
- defer cleanup()
-
- cdb, err := CreateWithBackend(backend)
- if err != nil {
- t.Fatalf("unable to create channeldb: %v", err)
- }
- if err := cdb.Close(); err != nil {
- t.Fatalf("unable to close channeldb: %v", err)
- }
-
- // The path should have been successfully created.
- if !fileExists(dbPath) {
- t.Fatalf("channeldb failed to create data directory")
- }
-
- // Now, reopen the same db in dry run migration mode. Since we have not
- // applied any migrations, this should ignore the flag and not fail.
- cdb, err = Open(dbPath, OptionDryRunMigration(true))
- if err != nil {
- t.Fatalf("unable to create channeldb: %v", err)
- }
- if err := cdb.Close(); err != nil {
- t.Fatalf("unable to close channeldb: %v", err)
- }
-}
-
-// TestWipe tests that the database wipe operation completes successfully
-// and that the buckets are deleted. It also checks that attempts to fetch
-// information while the buckets are not set return the correct errors.
-func TestWipe(t *testing.T) {
- t.Parallel()
-
- // First, create a temporary directory to be used for the duration of
- // this test.
- tempDirName, errr := ioutil.TempDir("", "channeldb")
- if errr != nil {
- t.Fatalf("unable to create temp dir: %v", errr)
- }
- defer os.RemoveAll(tempDirName)
-
- // Next, open thereby creating channeldb for the first time.
- dbPath := filepath.Join(tempDirName, "cdb")
- backend, cleanup, err := kvdb.GetTestBackend(dbPath, "cdb")
- if err != nil {
- t.Fatalf("unable to get test db backend: %v", err)
- }
- defer cleanup()
-
- cdb, err := CreateWithBackend(backend)
- if err != nil {
- t.Fatalf("unable to create channeldb: %v", err)
- }
- defer cdb.Close()
-
- if err := cdb.Wipe(); err != nil {
- t.Fatalf("unable to wipe channeldb: %v", err)
- }
- // Check correct errors are returned
- _, err = cdb.FetchAllOpenChannels()
- if !ErrNoActiveChannels.Is(err) {
- t.Fatalf("fetching open channels: expected '%v' instead got '%v'",
- ErrNoActiveChannels, err)
- }
- _, err = cdb.FetchClosedChannels(false)
- if !ErrNoClosedChannels.Is(err) {
- t.Fatalf("fetching closed channels: expected '%v' instead got '%v'",
- ErrNoClosedChannels, err)
- }
-}
-
-// TestFetchClosedChannelForID tests that we are able to properly retrieve a
-// ChannelCloseSummary from the DB given a ChannelID.
-func TestFetchClosedChannelForID(t *testing.T) {
- t.Parallel()
-
- const numChans = 101
-
- cdb, cleanUp, err := MakeTestDB()
- if err != nil {
- t.Fatalf("unable to make test database: %v", err)
- }
- defer cleanUp()
-
- // Create the test channel state, that we will mutate the index of the
- // funding point.
- state := createTestChannelState(t, cdb)
-
- // Now run through the number of channels, and modify the outpoint index
- // to create new channel IDs.
- for i := uint32(0); i < numChans; i++ {
- // Save the open channel to disk.
- state.FundingOutpoint.Index = i
-
- // Write the channel to disk in a pending state.
- createTestChannel(
- t, cdb,
- fundingPointOption(state.FundingOutpoint),
- openChannelOption(),
- )
-
- // Close the channel. To make sure we retrieve the correct
- // summary later, we make them differ in the SettledBalance.
- closeSummary := &ChannelCloseSummary{
- ChanPoint: state.FundingOutpoint,
- RemotePub: state.IdentityPub,
- SettledBalance: btcutil.Amount(500 + i),
- }
- if err := state.CloseChannel(closeSummary); err != nil {
- t.Fatalf("unable to close channel: %v", err)
- }
- }
-
- // Now run though them all again and make sure we are able to retrieve
- // summaries from the DB.
- for i := uint32(0); i < numChans; i++ {
- state.FundingOutpoint.Index = i
-
- // We calculate the ChannelID and use it to fetch the summary.
- cid := lnwire.NewChanIDFromOutPoint(&state.FundingOutpoint)
- fetchedSummary, err := cdb.FetchClosedChannelForID(cid)
- if err != nil {
- t.Fatalf("unable to fetch close summary: %v", err)
- }
-
- // Make sure we retrieved the correct one by checking the
- // SettledBalance.
- if fetchedSummary.SettledBalance != btcutil.Amount(500+i) {
- t.Fatalf("summaries don't match: expected %v got %v",
- btcutil.Amount(500+i),
- fetchedSummary.SettledBalance)
- }
- }
-
- // As a final test we make sure that we get ErrClosedChannelNotFound
- // for a ChannelID we didn't add to the DB.
- state.FundingOutpoint.Index++
- cid := lnwire.NewChanIDFromOutPoint(&state.FundingOutpoint)
- _, err = cdb.FetchClosedChannelForID(cid)
- if !ErrClosedChannelNotFound.Is(err) {
- t.Fatalf("expected ErrClosedChannelNotFound, instead got: %v", err)
- }
-}
-
-// TestAddrsForNode tests the we're able to properly obtain all the addresses
-// for a target node.
-func TestAddrsForNode(t *testing.T) {
- t.Parallel()
-
- cdb, cleanUp, err := MakeTestDB()
- if err != nil {
- t.Fatalf("unable to make test database: %v", err)
- }
- defer cleanUp()
-
- graph := cdb.ChannelGraph()
-
- // We'll make a test vertex to insert into the database, as the source
- // node, but this node will only have half the number of addresses it
- // usually does.
- testNode, err := createTestVertex(cdb)
- if err != nil {
- t.Fatalf("unable to create test node: %v", err)
- }
- testNode.Addresses = []net.Addr{testAddr}
- if err := graph.SetSourceNode(testNode); err != nil {
- t.Fatalf("unable to set source node: %v", err)
- }
-
- // Next, we'll make a link node with the same pubkey, but with an
- // additional address.
- nodePub, err := testNode.PubKey()
- if err != nil {
- t.Fatalf("unable to recv node pub: %v", err)
- }
- linkNode := cdb.NewLinkNode(
- protocol.MainNet, nodePub, anotherAddr,
- )
- if err := linkNode.Sync(); err != nil {
- t.Fatalf("unable to sync link node: %v", err)
- }
-
- // Now that we've created a link node, as well as a vertex for the
- // node, we'll query for all its addresses.
- nodeAddrs, err := cdb.AddrsForNode(nodePub)
- if err != nil {
- t.Fatalf("unable to obtain node addrs: %v", err)
- }
-
- expectedAddrs := make(map[string]struct{})
- expectedAddrs[testAddr.String()] = struct{}{}
- expectedAddrs[anotherAddr.String()] = struct{}{}
-
- // Finally, ensure that all the expected addresses are found.
- if len(nodeAddrs) != len(expectedAddrs) {
- t.Fatalf("expected %v addrs, got %v",
- len(expectedAddrs), len(nodeAddrs))
- }
- for _, addr := range nodeAddrs {
- if _, ok := expectedAddrs[addr.String()]; !ok {
- t.Fatalf("unexpected addr: %v", addr)
- }
- }
-}
-
-// TestFetchChannel tests that we're able to fetch an arbitrary channel from
-// disk.
-func TestFetchChannel(t *testing.T) {
- t.Parallel()
-
- cdb, cleanUp, err := MakeTestDB()
- if err != nil {
- t.Fatalf("unable to make test database: %v", err)
- }
- defer cleanUp()
-
- // Create an open channel.
- channelState := createTestChannel(t, cdb, openChannelOption())
-
- // Next, attempt to fetch the channel by its chan point.
- dbChannel, err := cdb.FetchChannel(channelState.FundingOutpoint)
- if err != nil {
- t.Fatalf("unable to fetch channel: %v", err)
- }
-
- // The decoded channel state should be identical to what we stored
- // above.
- if !reflect.DeepEqual(channelState, dbChannel) {
- t.Fatalf("channel state doesn't match:: %v vs %v",
- spew.Sdump(channelState), spew.Sdump(dbChannel))
- }
-
- // If we attempt to query for a non-exist ante channel, then we should
- // get an error.
- channelState2 := createTestChannelState(t, cdb)
- if err != nil {
- t.Fatalf("unable to create channel state: %v", err)
- }
- channelState2.FundingOutpoint.Index ^= 1
-
- _, err = cdb.FetchChannel(channelState2.FundingOutpoint)
- if err == nil {
- t.Fatalf("expected query to fail")
- }
-}
-
-func genRandomChannelShell() (*ChannelShell, er.R) {
- var testPriv [32]byte
- if _, err := rand.Read(testPriv[:]); err != nil {
- return nil, er.E(err)
- }
-
- _, pub := btcec.PrivKeyFromBytes(btcec.S256(), testPriv[:])
-
- var chanPoint wire.OutPoint
- if _, err := rand.Read(chanPoint.Hash[:]); err != nil {
- return nil, er.E(err)
- }
-
- pub.Curve = nil
-
- chanPoint.Index = uint32(rand.Intn(math.MaxUint16))
-
- chanStatus := ChanStatusDefault | ChanStatusRestored
-
- var shaChainPriv [32]byte
- if _, err := rand.Read(testPriv[:]); err != nil {
- return nil, er.E(err)
- }
- revRoot, err := chainhash.NewHash(shaChainPriv[:])
- if err != nil {
- return nil, err
- }
- shaChainProducer := shachain.NewRevocationProducer(*revRoot)
-
- return &ChannelShell{
- NodeAddrs: []net.Addr{&net.TCPAddr{
- IP: net.ParseIP("127.0.0.1"),
- Port: 18555,
- }},
- Chan: &OpenChannel{
- chanStatus: chanStatus,
- ChainHash: rev,
- FundingOutpoint: chanPoint,
- ShortChannelID: lnwire.NewShortChanIDFromInt(
- uint64(rand.Int63()),
- ),
- IdentityPub: pub,
- LocalChanCfg: ChannelConfig{
- ChannelConstraints: ChannelConstraints{
- CsvDelay: uint16(rand.Int63()),
- },
- PaymentBasePoint: keychain.KeyDescriptor{
- KeyLocator: keychain.KeyLocator{
- Family: keychain.KeyFamily(rand.Int63()),
- Index: uint32(rand.Int63()),
- },
- },
- },
- RemoteCurrentRevocation: pub,
- IsPending: false,
- RevocationStore: shachain.NewRevocationStore(),
- RevocationProducer: shaChainProducer,
- },
- }, nil
-}
-
-// TestRestoreChannelShells tests that we're able to insert a partially channel
-// populated to disk. This is useful for channel recovery purposes. We should
-// find the new channel shell on disk, and also the db should be populated with
-// an edge for that channel.
-func TestRestoreChannelShells(t *testing.T) {
- t.Parallel()
-
- cdb, cleanUp, err := MakeTestDB()
- if err != nil {
- t.Fatalf("unable to make test database: %v", err)
- }
- defer cleanUp()
-
- // First, we'll make our channel shell, it will only have the minimal
- // amount of information required for us to initiate the data loss
- // protection feature.
- channelShell, err := genRandomChannelShell()
- if err != nil {
- t.Fatalf("unable to gen channel shell: %v", err)
- }
-
- // With the channel shell constructed, we'll now insert it into the
- // database with the restoration method.
- if err := cdb.RestoreChannelShells(channelShell); err != nil {
- t.Fatalf("unable to restore channel shell: %v", err)
- }
-
- // Now that the channel has been inserted, we'll attempt to query for
- // it to ensure we can properly locate it via various means.
- //
- // First, we'll attempt to query for all channels that we have with the
- // node public key that was restored.
- nodeChans, err := cdb.FetchOpenChannels(channelShell.Chan.IdentityPub)
- if err != nil {
- t.Fatalf("unable find channel: %v", err)
- }
-
- // We should now find a single channel from the database.
- if len(nodeChans) != 1 {
- t.Fatalf("unable to find restored channel by node "+
- "pubkey: %v", err)
- }
-
- // Ensure that it isn't possible to modify the commitment state machine
- // of this restored channel.
- channel := nodeChans[0]
- err = channel.UpdateCommitment(nil, nil)
- if !ErrNoRestoredChannelMutation.Is(err) {
- t.Fatalf("able to mutate restored channel")
- }
- err = channel.AppendRemoteCommitChain(nil)
- if !ErrNoRestoredChannelMutation.Is(err) {
- t.Fatalf("able to mutate restored channel")
- }
- err = channel.AdvanceCommitChainTail(nil, nil)
- if !ErrNoRestoredChannelMutation.Is(err) {
- t.Fatalf("able to mutate restored channel")
- }
-
- // That single channel should have the proper channel point, and also
- // the expected set of flags to indicate that it was a restored
- // channel.
- if nodeChans[0].FundingOutpoint != channelShell.Chan.FundingOutpoint {
- t.Fatalf("wrong funding outpoint: expected %v, got %v",
- nodeChans[0].FundingOutpoint,
- channelShell.Chan.FundingOutpoint)
- }
- if !nodeChans[0].HasChanStatus(ChanStatusRestored) {
- t.Fatalf("node has wrong status flags: %v",
- nodeChans[0].chanStatus)
- }
-
- // We should also be able to find the channel if we query for it
- // directly.
- _, err = cdb.FetchChannel(channelShell.Chan.FundingOutpoint)
- if err != nil {
- t.Fatalf("unable to fetch channel: %v", err)
- }
-
- // We should also be able to find the link node that was inserted by
- // its public key.
- linkNode, err := cdb.FetchLinkNode(channelShell.Chan.IdentityPub)
- if err != nil {
- t.Fatalf("unable to fetch link node: %v", err)
- }
-
- // The node should have the same address, as specified in the channel
- // shell.
- if reflect.DeepEqual(linkNode.Addresses, channelShell.NodeAddrs) {
- t.Fatalf("addr mismach: expected %v, got %v",
- linkNode.Addresses, channelShell.NodeAddrs)
- }
-}
-
-// TestAbandonChannel tests that the AbandonChannel method is able to properly
-// remove a channel from the database and add a close channel summary. If
-// called after a channel has already been removed, the method shouldn't return
-// an error.
-func TestAbandonChannel(t *testing.T) {
- t.Parallel()
-
- cdb, cleanUp, err := MakeTestDB()
- if err != nil {
- t.Fatalf("unable to make test database: %v", err)
- }
- defer cleanUp()
-
- // If we attempt to abandon the state of a channel that doesn't exist
- // in the open or closed channel bucket, then we should receive an
- // error.
- err = cdb.AbandonChannel(&wire.OutPoint{}, 0)
- if err == nil {
- t.Fatalf("removing non-existent channel should have failed")
- }
-
- // We'll now create a new channel in a pending state to abandon
- // shortly.
- chanState := createTestChannel(t, cdb)
-
- // We should now be able to abandon the channel without any errors.
- closeHeight := uint32(11)
- err = cdb.AbandonChannel(&chanState.FundingOutpoint, closeHeight)
- if err != nil {
- t.Fatalf("unable to abandon channel: %v", err)
- }
-
- // At this point, the channel should no longer be found in the set of
- // open channels.
- _, err = cdb.FetchChannel(chanState.FundingOutpoint)
- if !ErrChannelNotFound.Is(err) {
- t.Fatalf("channel should not have been found: %v", err)
- }
-
- // However we should be able to retrieve a close channel summary for
- // the channel.
- _, err = cdb.FetchClosedChannel(&chanState.FundingOutpoint)
- if err != nil {
- t.Fatalf("unable to fetch closed channel: %v", err)
- }
-
- // Finally, if we attempt to abandon the channel again, we should get a
- // nil error as the channel has already been abandoned.
- err = cdb.AbandonChannel(&chanState.FundingOutpoint, closeHeight)
- if err != nil {
- t.Fatalf("unable to abandon channel: %v", err)
- }
-}
-
-// TestFetchChannels tests the filtering of open channels in fetchChannels.
-// It tests the case where no filters are provided (which is equivalent to
-// FetchAllOpenChannels) and every combination of pending and waiting close.
-func TestFetchChannels(t *testing.T) {
- // Create static channel IDs for each kind of channel retrieved by
- // fetchChannels so that the expected channel IDs can be set in tests.
- var (
- // Pending is a channel that is pending open, and has not had
- // a close initiated.
- pendingChan = lnwire.NewShortChanIDFromInt(1)
-
- // pendingWaitingClose is a channel that is pending open and
- // has has its closing transaction broadcast.
- pendingWaitingChan = lnwire.NewShortChanIDFromInt(2)
-
- // openChan is a channel that has confirmed on chain.
- openChan = lnwire.NewShortChanIDFromInt(3)
-
- // openWaitingChan is a channel that has confirmed on chain,
- // and it waiting for its close transaction to confirm.
- openWaitingChan = lnwire.NewShortChanIDFromInt(4)
- )
-
- tests := []struct {
- name string
- filters []fetchChannelsFilter
- expectedChannels map[lnwire.ShortChannelID]bool
- }{
- {
- name: "get all channels",
- filters: []fetchChannelsFilter{},
- expectedChannels: map[lnwire.ShortChannelID]bool{
- pendingChan: true,
- pendingWaitingChan: true,
- openChan: true,
- openWaitingChan: true,
- },
- },
- {
- name: "pending channels",
- filters: []fetchChannelsFilter{
- pendingChannelFilter(true),
- },
- expectedChannels: map[lnwire.ShortChannelID]bool{
- pendingChan: true,
- pendingWaitingChan: true,
- },
- },
- {
- name: "open channels",
- filters: []fetchChannelsFilter{
- pendingChannelFilter(false),
- },
- expectedChannels: map[lnwire.ShortChannelID]bool{
- openChan: true,
- openWaitingChan: true,
- },
- },
- {
- name: "waiting close channels",
- filters: []fetchChannelsFilter{
- waitingCloseFilter(true),
- },
- expectedChannels: map[lnwire.ShortChannelID]bool{
- pendingWaitingChan: true,
- openWaitingChan: true,
- },
- },
- {
- name: "not waiting close channels",
- filters: []fetchChannelsFilter{
- waitingCloseFilter(false),
- },
- expectedChannels: map[lnwire.ShortChannelID]bool{
- pendingChan: true,
- openChan: true,
- },
- },
- {
- name: "pending waiting",
- filters: []fetchChannelsFilter{
- pendingChannelFilter(true),
- waitingCloseFilter(true),
- },
- expectedChannels: map[lnwire.ShortChannelID]bool{
- pendingWaitingChan: true,
- },
- },
- {
- name: "pending, not waiting",
- filters: []fetchChannelsFilter{
- pendingChannelFilter(true),
- waitingCloseFilter(false),
- },
- expectedChannels: map[lnwire.ShortChannelID]bool{
- pendingChan: true,
- },
- },
- {
- name: "open waiting",
- filters: []fetchChannelsFilter{
- pendingChannelFilter(false),
- waitingCloseFilter(true),
- },
- expectedChannels: map[lnwire.ShortChannelID]bool{
- openWaitingChan: true,
- },
- },
- {
- name: "open, not waiting",
- filters: []fetchChannelsFilter{
- pendingChannelFilter(false),
- waitingCloseFilter(false),
- },
- expectedChannels: map[lnwire.ShortChannelID]bool{
- openChan: true,
- },
- },
- }
-
- for _, test := range tests {
- test := test
-
- t.Run(test.name, func(t *testing.T) {
- t.Parallel()
-
- cdb, cleanUp, err := MakeTestDB()
- if err != nil {
- t.Fatalf("unable to make test "+
- "database: %v", err)
- }
- defer cleanUp()
-
- // Create a pending channel that is not awaiting close.
- createTestChannel(
- t, cdb, channelIDOption(pendingChan),
- )
-
- // Create a pending channel which has has been marked as
- // broadcast, indicating that its closing transaction is
- // waiting to confirm.
- pendingClosing := createTestChannel(
- t, cdb,
- channelIDOption(pendingWaitingChan),
- )
-
- err = pendingClosing.MarkCoopBroadcasted(nil, true)
- if err != nil {
- t.Fatalf("unexpected error: %v", err)
- }
-
- // Create a open channel that is not awaiting close.
- createTestChannel(
- t, cdb,
- channelIDOption(openChan),
- openChannelOption(),
- )
-
- // Create a open channel which has has been marked as
- // broadcast, indicating that its closing transaction is
- // waiting to confirm.
- openClosing := createTestChannel(
- t, cdb,
- channelIDOption(openWaitingChan),
- openChannelOption(),
- )
- err = openClosing.MarkCoopBroadcasted(nil, true)
- if err != nil {
- t.Fatalf("unexpected error: %v", err)
- }
-
- channels, err := fetchChannels(cdb, test.filters...)
- if err != nil {
- t.Fatalf("unexpected error: %v", err)
- }
-
- if len(channels) != len(test.expectedChannels) {
- t.Fatalf("expected: %v channels, "+
- "got: %v", len(test.expectedChannels),
- len(channels))
- }
-
- for _, ch := range channels {
- _, ok := test.expectedChannels[ch.ShortChannelID]
- if !ok {
- t.Fatalf("fetch channels unexpected "+
- "channel: %v", ch.ShortChannelID)
- }
- }
- })
- }
-}
-
-// TestFetchHistoricalChannel tests lookup of historical channels.
-func TestFetchHistoricalChannel(t *testing.T) {
- cdb, cleanUp, err := MakeTestDB()
- if err != nil {
- t.Fatalf("unable to make test database: %v", err)
- }
- defer cleanUp()
-
- // Create a an open channel in the database.
- channel := createTestChannel(t, cdb, openChannelOption())
-
- // First, try to lookup a channel when the bucket does not
- // exist.
- _, err = cdb.FetchHistoricalChannel(&channel.FundingOutpoint)
- if !ErrNoHistoricalBucket.Is(err) {
- t.Fatalf("expected no bucket, got: %v", err)
- }
-
- // Close the channel so that it will be written to the historical
- // bucket. The values provided in the channel close summary are the
- // minimum required for this call to run without panicking.
- if err := channel.CloseChannel(&ChannelCloseSummary{
- ChanPoint: channel.FundingOutpoint,
- RemotePub: channel.IdentityPub,
- SettledBalance: btcutil.Amount(500),
- }); err != nil {
- t.Fatalf("unexpected error closing channel: %v", err)
- }
-
- histChannel, err := cdb.FetchHistoricalChannel(&channel.FundingOutpoint)
- if err != nil {
- t.Fatalf("unexepected error getting channel: %v", err)
- }
-
- // Set the db on our channel to nil so that we can check that all other
- // fields on the channel equal those on the historical channel.
- channel.Db = nil
-
- if !reflect.DeepEqual(histChannel, channel) {
- t.Fatalf("expected: %v, got: %v", channel, histChannel)
- }
-
- // Create an outpoint that will not be in the db and look it up.
- badOutpoint := &wire.OutPoint{
- Hash: channel.FundingOutpoint.Hash,
- Index: channel.FundingOutpoint.Index + 1,
- }
- _, err = cdb.FetchHistoricalChannel(badOutpoint)
- if !ErrChannelNotFound.Is(err) {
- t.Fatalf("expected chan not found, got: %v", err)
- }
-
-}
diff --git a/lnd/channeldb/doc.go b/lnd/channeldb/doc.go
deleted file mode 100644
index d03b3406..00000000
--- a/lnd/channeldb/doc.go
+++ /dev/null
@@ -1 +0,0 @@
-package channeldb
diff --git a/lnd/channeldb/duplicate_payments.go b/lnd/channeldb/duplicate_payments.go
deleted file mode 100644
index 87dea620..00000000
--- a/lnd/channeldb/duplicate_payments.go
+++ /dev/null
@@ -1,247 +0,0 @@
-package channeldb
-
-import (
- "bytes"
- "encoding/binary"
- "io"
- "time"
-
- "github.com/pkt-cash/pktd/btcec"
- "github.com/pkt-cash/pktd/btcutil/er"
- "github.com/pkt-cash/pktd/btcutil/util"
- "github.com/pkt-cash/pktd/lnd/channeldb/kvdb"
- "github.com/pkt-cash/pktd/lnd/lntypes"
- "github.com/pkt-cash/pktd/lnd/lnwire"
- "github.com/pkt-cash/pktd/lnd/routing/route"
-)
-
-var (
- // duplicatePaymentsBucket is the name of a optional sub-bucket within
- // the payment hash bucket, that is used to hold duplicate payments to a
- // payment hash. This is needed to support information from earlier
- // versions of lnd, where it was possible to pay to a payment hash more
- // than once.
- duplicatePaymentsBucket = []byte("payment-duplicate-bucket")
-
- // duplicatePaymentSettleInfoKey is a key used in the payment's
- // sub-bucket to store the settle info of the payment.
- duplicatePaymentSettleInfoKey = []byte("payment-settle-info")
-
- // duplicatePaymentAttemptInfoKey is a key used in the payment's
- // sub-bucket to store the info about the latest attempt that was done
- // for the payment in question.
- duplicatePaymentAttemptInfoKey = []byte("payment-attempt-info")
-
- // duplicatePaymentCreationInfoKey is a key used in the payment's
- // sub-bucket to store the creation info of the payment.
- duplicatePaymentCreationInfoKey = []byte("payment-creation-info")
-
- // duplicatePaymentFailInfoKey is a key used in the payment's sub-bucket
- // to store information about the reason a payment failed.
- duplicatePaymentFailInfoKey = []byte("payment-fail-info")
-
- // duplicatePaymentSequenceKey is a key used in the payment's sub-bucket
- // to store the sequence number of the payment.
- duplicatePaymentSequenceKey = []byte("payment-sequence-key")
-)
-
-// duplicateHTLCAttemptInfo contains static information about a specific HTLC
-// attempt for a payment. This information is used by the router to handle any
-// errors coming back after an attempt is made, and to query the switch about
-// the status of the attempt.
-type duplicateHTLCAttemptInfo struct {
- // attemptID is the unique ID used for this attempt.
- attemptID uint64
-
- // sessionKey is the ephemeral key used for this attempt.
- sessionKey *btcec.PrivateKey
-
- // route is the route attempted to send the HTLC.
- route route.Route
-}
-
-// fetchDuplicatePaymentStatus fetches the payment status of the payment. If the
-// payment isn't found, it will default to "StatusUnknown".
-func fetchDuplicatePaymentStatus(bucket kvdb.RBucket) PaymentStatus {
- if bucket.Get(duplicatePaymentSettleInfoKey) != nil {
- return StatusSucceeded
- }
-
- if bucket.Get(duplicatePaymentFailInfoKey) != nil {
- return StatusFailed
- }
-
- if bucket.Get(duplicatePaymentCreationInfoKey) != nil {
- return StatusInFlight
- }
-
- return StatusUnknown
-}
-
-func deserializeDuplicateHTLCAttemptInfo(r io.Reader) (
- *duplicateHTLCAttemptInfo, er.R) {
-
- a := &duplicateHTLCAttemptInfo{}
- err := ReadElements(r, &a.attemptID, &a.sessionKey)
- if err != nil {
- return nil, err
- }
- a.route, err = DeserializeRoute(r)
- if err != nil {
- return nil, err
- }
- return a, nil
-}
-
-func deserializeDuplicatePaymentCreationInfo(r io.Reader) (
- *PaymentCreationInfo, er.R) {
-
- var scratch [8]byte
-
- c := &PaymentCreationInfo{}
-
- if _, err := util.ReadFull(r, c.PaymentHash[:]); err != nil {
- return nil, err
- }
-
- if _, err := util.ReadFull(r, scratch[:]); err != nil {
- return nil, err
- }
- c.Value = lnwire.MilliSatoshi(byteOrder.Uint64(scratch[:]))
-
- if _, err := util.ReadFull(r, scratch[:]); err != nil {
- return nil, err
- }
- c.CreationTime = time.Unix(int64(byteOrder.Uint64(scratch[:])), 0)
-
- if _, err := util.ReadFull(r, scratch[:4]); err != nil {
- return nil, err
- }
-
- reqLen := byteOrder.Uint32(scratch[:4])
- payReq := make([]byte, reqLen)
- if reqLen > 0 {
- if _, err := util.ReadFull(r, payReq); err != nil {
- return nil, err
- }
- }
- c.PaymentRequest = payReq
-
- return c, nil
-}
-
-func fetchDuplicatePayment(bucket kvdb.RBucket) (*MPPayment, er.R) {
- seqBytes := bucket.Get(duplicatePaymentSequenceKey)
- if seqBytes == nil {
- return nil, er.Errorf("sequence number not found")
- }
-
- sequenceNum := binary.BigEndian.Uint64(seqBytes)
-
- // Get the payment status.
- paymentStatus := fetchDuplicatePaymentStatus(bucket)
-
- // Get the PaymentCreationInfo.
- b := bucket.Get(duplicatePaymentCreationInfoKey)
- if b == nil {
- return nil, er.Errorf("creation info not found")
- }
-
- r := bytes.NewReader(b)
- creationInfo, err := deserializeDuplicatePaymentCreationInfo(r)
- if err != nil {
- return nil, err
-
- }
-
- // Get failure reason if available.
- var failureReason *FailureReason
- b = bucket.Get(duplicatePaymentFailInfoKey)
- if b != nil {
- reason := FailureReason(b[0])
- failureReason = &reason
- }
-
- payment := &MPPayment{
- SequenceNum: sequenceNum,
- Info: creationInfo,
- FailureReason: failureReason,
- Status: paymentStatus,
- }
-
- // Get the HTLCAttemptInfo. It can be absent.
- b = bucket.Get(duplicatePaymentAttemptInfoKey)
- if b != nil {
- r = bytes.NewReader(b)
- attempt, err := deserializeDuplicateHTLCAttemptInfo(r)
- if err != nil {
- return nil, err
- }
-
- htlc := HTLCAttempt{
- HTLCAttemptInfo: HTLCAttemptInfo{
- AttemptID: attempt.attemptID,
- Route: attempt.route,
- SessionKey: attempt.sessionKey,
- },
- }
-
- // Get the payment preimage. This is only found for
- // successful payments.
- b = bucket.Get(duplicatePaymentSettleInfoKey)
- if b != nil {
- var preimg lntypes.Preimage
- copy(preimg[:], b)
-
- htlc.Settle = &HTLCSettleInfo{
- Preimage: preimg,
- SettleTime: time.Time{},
- }
- } else {
- // Otherwise the payment must have failed.
- htlc.Failure = &HTLCFailInfo{
- FailTime: time.Time{},
- }
- }
-
- payment.HTLCs = []HTLCAttempt{htlc}
- }
-
- return payment, nil
-}
-
-func fetchDuplicatePayments(paymentHashBucket kvdb.RBucket) ([]*MPPayment,
- er.R) {
-
- var payments []*MPPayment
-
- // For older versions of lnd, duplicate payments to a payment has was
- // possible. These will be found in a sub-bucket indexed by their
- // sequence number if available.
- dup := paymentHashBucket.NestedReadBucket(duplicatePaymentsBucket)
- if dup == nil {
- return nil, nil
- }
-
- err := dup.ForEach(func(k, v []byte) er.R {
- subBucket := dup.NestedReadBucket(k)
- if subBucket == nil {
- // We one bucket for each duplicate to be found.
- return er.Errorf("non bucket element" +
- "in duplicate bucket")
- }
-
- p, err := fetchDuplicatePayment(subBucket)
- if err != nil {
- return err
- }
-
- payments = append(payments, p)
- return nil
- })
- if err != nil {
- return nil, err
- }
-
- return payments, nil
-}
diff --git a/lnd/channeldb/error.go b/lnd/channeldb/error.go
deleted file mode 100644
index 6b21edf0..00000000
--- a/lnd/channeldb/error.go
+++ /dev/null
@@ -1,162 +0,0 @@
-package channeldb
-
-import (
- "fmt"
-
- "github.com/pkt-cash/pktd/btcutil/er"
-)
-
-var (
- Err = er.NewErrorType("lnd.channeldb")
- // ErrNoChanDBExists is returned when a channel bucket hasn't been
- // created.
- ErrNoChanDBExists = Err.CodeWithDetail("ErrNoChanDBExists",
- "channel db has not yet been created")
-
- // ErrNoHistoricalBucket is returned when the historical channel bucket
- // not been created yet.
- ErrNoHistoricalBucket = Err.CodeWithDetail("ErrNoHistoricalBucket",
- "historical channel bucket has not yet been created")
-
- // ErrDBReversion is returned when detecting an attempt to revert to a
- // prior database version.
- ErrDBReversion = Err.CodeWithDetail("ErrDBReversion",
- "channel db cannot revert to prior version")
-
- // ErrLinkNodesNotFound is returned when node info bucket hasn't been
- // created.
- ErrLinkNodesNotFound = Err.CodeWithDetail("ErrLinkNodesNotFound",
- "no link nodes exist")
-
- // ErrNoActiveChannels is returned when there is no active (open)
- // channels within the database.
- ErrNoActiveChannels = Err.CodeWithDetail("ErrNoActiveChannels",
- "no active channels exist")
-
- // ErrNoPastDeltas is returned when the channel delta bucket hasn't been
- // created.
- ErrNoPastDeltas = Err.CodeWithDetail("ErrNoPastDeltas",
- "channel has no recorded deltas")
-
- // ErrInvoiceNotFound is returned when a targeted invoice can't be
- // found.
- ErrInvoiceNotFound = Err.CodeWithDetail("ErrInvoiceNotFound",
- "unable to locate invoice")
-
- // ErrNoInvoicesCreated is returned when we don't have invoices in
- // our database to return.
- ErrNoInvoicesCreated = Err.CodeWithDetail("ErrNoInvoicesCreated",
- "there are no existing invoices")
-
- // ErrDuplicateInvoice is returned when an invoice with the target
- // payment hash already exists.
- ErrDuplicateInvoice = Err.CodeWithDetail("ErrDuplicateInvoice",
- "invoice with payment hash already exists")
-
- // ErrDuplicatePayAddr is returned when an invoice with the target
- // payment addr already exists.
- ErrDuplicatePayAddr = Err.CodeWithDetail("ErrDuplicatePayAddr",
- "invoice with payemnt addr already exists")
-
- // ErrInvRefEquivocation is returned when an InvoiceRef targets
- // multiple, distinct invoices.
- ErrInvRefEquivocation = Err.CodeWithDetail("ErrInvRefEquivocation", "inv ref matches multiple invoices")
-
- // ErrNoPaymentsCreated is returned when bucket of payments hasn't been
- // created.
- ErrNoPaymentsCreated = Err.CodeWithDetail("ErrNoPaymentsCreated",
- "there are no existing payments")
-
- // ErrNodeNotFound is returned when node bucket exists, but node with
- // specific identity can't be found.
- ErrNodeNotFound = Err.CodeWithDetail("ErrNodeNotFound",
- "link node with target identity not found")
-
- // ErrChannelNotFound is returned when we attempt to locate a channel
- // for a specific chain, but it is not found.
- ErrChannelNotFound = Err.CodeWithDetail("ErrChannelNotFound",
- "channel not found")
-
- // ErrMetaNotFound is returned when meta bucket hasn't been
- // created.
- ErrMetaNotFound = Err.CodeWithDetail("ErrMetaNotFound",
- "unable to locate meta information")
-
- // ErrGraphNotFound is returned when at least one of the components of
- // graph doesn't exist.
- ErrGraphNotFound = Err.CodeWithDetail("ErrGraphNotFound",
- "graph bucket not initialized")
-
- // ErrGraphNeverPruned is returned when graph was never pruned.
- ErrGraphNeverPruned = Err.CodeWithDetail("ErrGraphNeverPruned",
- "graph never pruned")
-
- // ErrSourceNodeNotSet is returned if the source node of the graph
- // hasn't been added The source node is the center node within a
- // star-graph.
- ErrSourceNodeNotSet = Err.CodeWithDetail("ErrSourceNodeNotSet",
- "source node does not exist")
-
- // ErrGraphNodesNotFound is returned in case none of the nodes has
- // been added in graph node bucket.
- ErrGraphNodesNotFound = Err.CodeWithDetail("ErrGraphNodesNotFound",
- "no graph nodes exist")
-
- // ErrGraphNoEdgesFound is returned in case of none of the channel/edges
- // has been added in graph edge bucket.
- ErrGraphNoEdgesFound = Err.CodeWithDetail("ErrGraphNoEdgesFound",
- "no graph edges exist")
-
- // ErrGraphNodeNotFound is returned when we're unable to find the target
- // node.
- ErrGraphNodeNotFound = Err.CodeWithDetail("ErrGraphNodeNotFound",
- "unable to find node")
-
- // ErrEdgeNotFound is returned when an edge for the target chanID
- // can't be found.
- ErrEdgeNotFound = Err.CodeWithDetail("ErrEdgeNotFound",
- "edge not found")
-
- // ErrZombieEdge is an error returned when we attempt to look up an edge
- // but it is marked as a zombie within the zombie index.
- ErrZombieEdge = Err.CodeWithDetail("ErrZombieEdge", "edge marked as zombie")
-
- // ErrEdgeAlreadyExist is returned when edge with specific
- // channel id can't be added because it already exist.
- ErrEdgeAlreadyExist = Err.CodeWithDetail("ErrEdgeAlreadyExist",
- "edge already exist")
-
- // ErrNodeAliasNotFound is returned when alias for node can't be found.
- ErrNodeAliasNotFound = Err.CodeWithDetail("ErrNodeAliasNotFound",
- "alias for node not found")
-
- // ErrUnknownAddressType is returned when a node's addressType is not
- // an expected value.
- ErrUnknownAddressType = Err.CodeWithDetail("ErrUnknownAddressType",
- "address type cannot be resolved")
-
- // ErrNoClosedChannels is returned when a node is queries for all the
- // channels it has closed, but it hasn't yet closed any channels.
- ErrNoClosedChannels = Err.CodeWithDetail("ErrNoClosedChannels",
- "no channel have been closed yet")
-
- // ErrNoForwardingEvents is returned in the case that a query fails due
- // to the log not having any recorded events.
- ErrNoForwardingEvents = Err.CodeWithDetail("ErrNoForwardingEvents",
- "no recorded forwarding events")
-
- // ErrEdgePolicyOptionalFieldNotFound is an error returned if a channel
- // policy field is not found in the db even though its message flags
- // indicate it should be.
- ErrEdgePolicyOptionalFieldNotFound = Err.CodeWithDetail("ErrEdgePolicyOptionalFieldNotFound",
- "optional field not present")
-
- // ErrChanAlreadyExists is return when the caller attempts to create a
- // channel with a channel point that is already present in the
- // database.
- ErrChanAlreadyExists = Err.CodeWithDetail("ErrChanAlreadyExists",
- "channel already exists")
-
- ErrTooManyExtraOpaqueBytes = Err.CodeWithDetail("ErrTooManyExtraOpaqueBytes",
- fmt.Sprintf("max allowed number of opaque bytes is %v", MaxAllowedExtraOpaqueBytes))
-)
diff --git a/lnd/channeldb/fees.go b/lnd/channeldb/fees.go
deleted file mode 100644
index d03b3406..00000000
--- a/lnd/channeldb/fees.go
+++ /dev/null
@@ -1 +0,0 @@
-package channeldb
diff --git a/lnd/channeldb/forwarding_log.go b/lnd/channeldb/forwarding_log.go
deleted file mode 100644
index aee3e78d..00000000
--- a/lnd/channeldb/forwarding_log.go
+++ /dev/null
@@ -1,342 +0,0 @@
-package channeldb
-
-import (
- "bytes"
- "io"
- "sort"
- "time"
-
- "github.com/pkt-cash/pktd/btcutil/er"
- "github.com/pkt-cash/pktd/lnd/channeldb/kvdb"
- "github.com/pkt-cash/pktd/lnd/lnwire"
- "github.com/pkt-cash/pktd/pktwallet/walletdb"
-)
-
-var (
- // forwardingLogBucket is the bucket that we'll use to store the
- // forwarding log. The forwarding log contains a time series database
- // of the forwarding history of a lightning daemon. Each key within the
- // bucket is a timestamp (in nano seconds since the unix epoch), and
- // the value a slice of a forwarding event for that timestamp.
- forwardingLogBucket = []byte("circuit-fwd-log")
-)
-
-const (
- // forwardingEventSize is the size of a forwarding event. The breakdown
- // is as follows:
- //
- // * 8 byte incoming chan ID || 8 byte outgoing chan ID || 8 byte value in
- // || 8 byte value out
- //
- // From the value in and value out, callers can easily compute the
- // total fee extract from a forwarding event.
- forwardingEventSize = 32
-
- // MaxResponseEvents is the max number of forwarding events that will
- // be returned by a single query response. This size was selected to
- // safely remain under gRPC's 4MiB message size response limit. As each
- // full forwarding event (including the timestamp) is 40 bytes, we can
- // safely return 50k entries in a single response.
- MaxResponseEvents = 50000
-)
-
-// ForwardingLog returns an instance of the ForwardingLog object backed by the
-// target database instance.
-func (d *DB) ForwardingLog() *ForwardingLog {
- return &ForwardingLog{
- db: d,
- }
-}
-
-// ForwardingLog is a time series database that logs the fulfilment of payment
-// circuits by a lightning network daemon. The log contains a series of
-// forwarding events which map a timestamp to a forwarding event. A forwarding
-// event describes which channels were used to create+settle a circuit, and the
-// amount involved. Subtracting the outgoing amount from the incoming amount
-// reveals the fee charged for the forwarding service.
-type ForwardingLog struct {
- db *DB
-}
-
-// ForwardingEvent is an event in the forwarding log's time series. Each
-// forwarding event logs the creation and tear-down of a payment circuit. A
-// circuit is created once an incoming HTLC has been fully forwarded, and
-// destroyed once the payment has been settled.
-type ForwardingEvent struct {
- // Timestamp is the settlement time of this payment circuit.
- Timestamp time.Time
-
- // IncomingChanID is the incoming channel ID of the payment circuit.
- IncomingChanID lnwire.ShortChannelID
-
- // OutgoingChanID is the outgoing channel ID of the payment circuit.
- OutgoingChanID lnwire.ShortChannelID
-
- // AmtIn is the amount of the incoming HTLC. Subtracting this from the
- // outgoing amount gives the total fees of this payment circuit.
- AmtIn lnwire.MilliSatoshi
-
- // AmtOut is the amount of the outgoing HTLC. Subtracting the incoming
- // amount from this gives the total fees for this payment circuit.
- AmtOut lnwire.MilliSatoshi
-}
-
-// encodeForwardingEvent writes out the target forwarding event to the passed
-// io.Writer, using the expected DB format. Note that the timestamp isn't
-// serialized as this will be the key value within the bucket.
-func encodeForwardingEvent(w io.Writer, f *ForwardingEvent) er.R {
- return WriteElements(
- w, f.IncomingChanID, f.OutgoingChanID, f.AmtIn, f.AmtOut,
- )
-}
-
-// decodeForwardingEvent attempts to decode the raw bytes of a serialized
-// forwarding event into the target ForwardingEvent. Note that the timestamp
-// won't be decoded, as the caller is expected to set this due to the bucket
-// structure of the forwarding log.
-func decodeForwardingEvent(r io.Reader, f *ForwardingEvent) er.R {
- return ReadElements(
- r, &f.IncomingChanID, &f.OutgoingChanID, &f.AmtIn, &f.AmtOut,
- )
-}
-
-// AddForwardingEvents adds a series of forwarding events to the database.
-// Before inserting, the set of events will be sorted according to their
-// timestamp. This ensures that all writes to disk are sequential.
-func (f *ForwardingLog) AddForwardingEvents(events []ForwardingEvent) er.R {
- // Before we create the database transaction, we'll ensure that the set
- // of forwarding events are properly sorted according to their
- // timestamp and that no duplicate timestamps exist to avoid collisions
- // in the key we are going to store the events under.
- makeUniqueTimestamps(events)
-
- var timestamp [8]byte
-
- return kvdb.Batch(f.db.Backend, func(tx kvdb.RwTx) er.R {
- // First, we'll fetch the bucket that stores our time series
- // log.
- logBucket, err := tx.CreateTopLevelBucket(
- forwardingLogBucket,
- )
- if err != nil {
- return err
- }
-
- // With the bucket obtained, we can now begin to write out the
- // series of events.
- for _, event := range events {
- err := storeEvent(logBucket, event, timestamp[:])
- if err != nil {
- return err
- }
- }
-
- return nil
- })
-}
-
-// storeEvent tries to store a forwarding event into the given bucket by trying
-// to avoid collisions. If a key for the event timestamp already exists in the
-// database, the timestamp is incremented in nanosecond intervals until a "free"
-// slot is found.
-func storeEvent(bucket walletdb.ReadWriteBucket, event ForwardingEvent,
- timestampScratchSpace []byte) er.R {
-
- // First, we'll serialize this timestamp into our
- // timestamp buffer.
- byteOrder.PutUint64(
- timestampScratchSpace, uint64(event.Timestamp.UnixNano()),
- )
-
- // Next we'll loop until we find a "free" slot in the bucket to store
- // the event under. This should almost never happen unless we're running
- // on a system that has a very bad system clock that doesn't properly
- // resolve to nanosecond scale. We try up to 100 times (which would come
- // to a maximum shift of 0.1 microsecond which is acceptable for most
- // use cases). If we don't find a free slot, we just give up and let
- // the collision happen. Something must be wrong with the data in that
- // case, even on a very fast machine forwarding payments _will_ take a
- // few microseconds at least so we should find a nanosecond slot
- // somewhere.
- const maxTries = 100
- tries := 0
- for tries < maxTries {
- val := bucket.Get(timestampScratchSpace)
- if val == nil {
- break
- }
-
- // Collision, try the next nanosecond timestamp.
- nextNano := event.Timestamp.UnixNano() + 1
- event.Timestamp = time.Unix(0, nextNano)
- byteOrder.PutUint64(timestampScratchSpace, uint64(nextNano))
- tries++
- }
-
- // With the key encoded, we'll then encode the event
- // into our buffer, then write it out to disk.
- var eventBytes [forwardingEventSize]byte
- eventBuf := bytes.NewBuffer(eventBytes[0:0:forwardingEventSize])
- err := encodeForwardingEvent(eventBuf, &event)
- if err != nil {
- return err
- }
- return bucket.Put(timestampScratchSpace, eventBuf.Bytes())
-}
-
-// ForwardingEventQuery represents a query to the forwarding log payment
-// circuit time series database. The query allows a caller to retrieve all
-// records for a particular time slice, offset in that time slice, limiting the
-// total number of responses returned.
-type ForwardingEventQuery struct {
- // StartTime is the start time of the time slice.
- StartTime time.Time
-
- // EndTime is the end time of the time slice.
- EndTime time.Time
-
- // IndexOffset is the offset within the time slice to start at. This
- // can be used to start the response at a particular record.
- IndexOffset uint32
-
- // NumMaxEvents is the max number of events to return.
- NumMaxEvents uint32
-}
-
-// ForwardingLogTimeSlice is the response to a forwarding query. It includes
-// the original query, the set events that match the query, and an integer
-// which represents the offset index of the last item in the set of retuned
-// events. This integer allows callers to resume their query using this offset
-// in the event that the query's response exceeds the max number of returnable
-// events.
-type ForwardingLogTimeSlice struct {
- ForwardingEventQuery
-
- // ForwardingEvents is the set of events in our time series that answer
- // the query embedded above.
- ForwardingEvents []ForwardingEvent
-
- // LastIndexOffset is the index of the last element in the set of
- // returned ForwardingEvents above. Callers can use this to resume
- // their query in the event that the time slice has too many events to
- // fit into a single response.
- LastIndexOffset uint32
-}
-
-// Query allows a caller to query the forwarding event time series for a
-// particular time slice. The caller can control the precise time as well as
-// the number of events to be returned.
-//
-// TODO(roasbeef): rename?
-func (f *ForwardingLog) Query(q ForwardingEventQuery) (ForwardingLogTimeSlice, er.R) {
- var resp ForwardingLogTimeSlice
-
- // If the user provided an index offset, then we'll not know how many
- // records we need to skip. We'll also keep track of the record offset
- // as that's part of the final return value.
- recordsToSkip := q.IndexOffset
- recordOffset := q.IndexOffset
-
- err := kvdb.View(f.db, func(tx kvdb.RTx) er.R {
- // If the bucket wasn't found, then there aren't any events to
- // be returned.
- logBucket := tx.ReadBucket(forwardingLogBucket)
- if logBucket == nil {
- return ErrNoForwardingEvents.Default()
- }
-
- // We'll be using a cursor to seek into the database, so we'll
- // populate byte slices that represent the start of the key
- // space we're interested in, and the end.
- var startTime, endTime [8]byte
- byteOrder.PutUint64(startTime[:], uint64(q.StartTime.UnixNano()))
- byteOrder.PutUint64(endTime[:], uint64(q.EndTime.UnixNano()))
-
- // If we know that a set of log events exists, then we'll begin
- // our seek through the log in order to satisfy the query.
- // We'll continue until either we reach the end of the range,
- // or reach our max number of events.
- logCursor := logBucket.ReadCursor()
- timestamp, events := logCursor.Seek(startTime[:])
- for ; timestamp != nil && bytes.Compare(timestamp, endTime[:]) <= 0; timestamp, events = logCursor.Next() {
- // If our current return payload exceeds the max number
- // of events, then we'll exit now.
- if uint32(len(resp.ForwardingEvents)) >= q.NumMaxEvents {
- return nil
- }
-
- // If we're not yet past the user defined offset, then
- // we'll continue to seek forward.
- if recordsToSkip > 0 {
- recordsToSkip--
- continue
- }
-
- currentTime := time.Unix(
- 0, int64(byteOrder.Uint64(timestamp)),
- )
-
- // At this point, we've skipped enough records to start
- // to collate our query. For each record, we'll
- // increment the final record offset so the querier can
- // utilize pagination to seek further.
- readBuf := bytes.NewReader(events)
- for readBuf.Len() != 0 {
- var event ForwardingEvent
- err := decodeForwardingEvent(readBuf, &event)
- if err != nil {
- return err
- }
-
- event.Timestamp = currentTime
- resp.ForwardingEvents = append(resp.ForwardingEvents, event)
-
- recordOffset++
- }
- }
-
- return nil
- }, func() {
- resp = ForwardingLogTimeSlice{
- ForwardingEventQuery: q,
- }
- })
- if err != nil && !ErrNoForwardingEvents.Is(err) {
- return ForwardingLogTimeSlice{}, err
- }
-
- resp.LastIndexOffset = recordOffset
-
- return resp, nil
-}
-
-// makeUniqueTimestamps takes a slice of forwarding events, sorts it by the
-// event timestamps and then makes sure there are no duplicates in the
-// timestamps. If duplicates are found, some of the timestamps are increased on
-// the nanosecond scale until only unique values remain. This is a fix to
-// address the problem that in some environments (looking at you, Windows) the
-// system clock has such a bad resolution that two serial invocations of
-// time.Now() might return the same timestamp, even if some time has elapsed
-// between the calls.
-func makeUniqueTimestamps(events []ForwardingEvent) {
- sort.Slice(events, func(i, j int) bool {
- return events[i].Timestamp.Before(events[j].Timestamp)
- })
-
- // Now that we know the events are sorted by timestamp, we can go
- // through the list and fix all duplicates until only unique values
- // remain.
- for outer := 0; outer < len(events)-1; outer++ {
- current := events[outer].Timestamp.UnixNano()
- next := events[outer+1].Timestamp.UnixNano()
-
- // We initially sorted the slice. So if the current is now
- // greater or equal to the next one, it's either because it's a
- // duplicate or because we increased the current in the last
- // iteration.
- if current >= next {
- next = current + 1
- events[outer+1].Timestamp = time.Unix(0, next)
- }
- }
-}
diff --git a/lnd/channeldb/forwarding_log_test.go b/lnd/channeldb/forwarding_log_test.go
deleted file mode 100644
index e41e984c..00000000
--- a/lnd/channeldb/forwarding_log_test.go
+++ /dev/null
@@ -1,383 +0,0 @@
-package channeldb
-
-import (
- "math/rand"
- "reflect"
- "testing"
- "time"
-
- "github.com/davecgh/go-spew/spew"
- "github.com/pkt-cash/pktd/lnd/lnwire"
- "github.com/stretchr/testify/assert"
-)
-
-// TestForwardingLogBasicStorageAndQuery tests that we're able to store and
-// then query for items that have previously been added to the event log.
-func TestForwardingLogBasicStorageAndQuery(t *testing.T) {
- t.Parallel()
-
- // First, we'll set up a test database, and use that to instantiate the
- // forwarding event log that we'll be using for the duration of the
- // test.
- db, cleanUp, err := MakeTestDB()
- if err != nil {
- t.Fatalf("unable to make test db: %v", err)
- }
- defer cleanUp()
-
- log := ForwardingLog{
- db: db,
- }
-
- initialTime := time.Unix(1234, 0)
- timestamp := time.Unix(1234, 0)
-
- // We'll create 100 random events, which each event being spaced 10
- // minutes after the prior event.
- numEvents := 100
- events := make([]ForwardingEvent, numEvents)
- for i := 0; i < numEvents; i++ {
- events[i] = ForwardingEvent{
- Timestamp: timestamp,
- IncomingChanID: lnwire.NewShortChanIDFromInt(uint64(rand.Int63())),
- OutgoingChanID: lnwire.NewShortChanIDFromInt(uint64(rand.Int63())),
- AmtIn: lnwire.MilliSatoshi(rand.Int63()),
- AmtOut: lnwire.MilliSatoshi(rand.Int63()),
- }
-
- timestamp = timestamp.Add(time.Minute * 10)
- }
-
- // Now that all of our set of events constructed, we'll add them to the
- // database in a batch manner.
- if err := log.AddForwardingEvents(events); err != nil {
- t.Fatalf("unable to add events: %v", err)
- }
-
- // With our events added we'll now construct a basic query to retrieve
- // all of the events.
- eventQuery := ForwardingEventQuery{
- StartTime: initialTime,
- EndTime: timestamp,
- IndexOffset: 0,
- NumMaxEvents: 1000,
- }
- timeSlice, err := log.Query(eventQuery)
- if err != nil {
- t.Fatalf("unable to query for events: %v", err)
- }
-
- // The set of returned events should match identically, as they should
- // be returned in sorted order.
- if !reflect.DeepEqual(events, timeSlice.ForwardingEvents) {
- t.Fatalf("event mismatch: expected %v vs %v",
- spew.Sdump(events), spew.Sdump(timeSlice.ForwardingEvents))
- }
-
- // The offset index of the final entry should be numEvents, so the
- // number of total events we've written.
- if timeSlice.LastIndexOffset != uint32(numEvents) {
- t.Fatalf("wrong final offset: expected %v, got %v",
- timeSlice.LastIndexOffset, numEvents)
- }
-}
-
-// TestForwardingLogQueryOptions tests that the query offset works properly. So
-// if we add a series of events, then we should be able to seek within the
-// timeslice accordingly. This exercises the index offset and num max event
-// field in the query, and also the last index offset field int he response.
-func TestForwardingLogQueryOptions(t *testing.T) {
- t.Parallel()
-
- // First, we'll set up a test database, and use that to instantiate the
- // forwarding event log that we'll be using for the duration of the
- // test.
- db, cleanUp, err := MakeTestDB()
- if err != nil {
- t.Fatalf("unable to make test db: %v", err)
- }
- defer cleanUp()
-
- log := ForwardingLog{
- db: db,
- }
-
- initialTime := time.Unix(1234, 0)
- endTime := time.Unix(1234, 0)
-
- // We'll create 20 random events, which each event being spaced 10
- // minutes after the prior event.
- numEvents := 20
- events := make([]ForwardingEvent, numEvents)
- for i := 0; i < numEvents; i++ {
- events[i] = ForwardingEvent{
- Timestamp: endTime,
- IncomingChanID: lnwire.NewShortChanIDFromInt(uint64(rand.Int63())),
- OutgoingChanID: lnwire.NewShortChanIDFromInt(uint64(rand.Int63())),
- AmtIn: lnwire.MilliSatoshi(rand.Int63()),
- AmtOut: lnwire.MilliSatoshi(rand.Int63()),
- }
-
- endTime = endTime.Add(time.Minute * 10)
- }
-
- // Now that all of our set of events constructed, we'll add them to the
- // database in a batch manner.
- if err := log.AddForwardingEvents(events); err != nil {
- t.Fatalf("unable to add events: %v", err)
- }
-
- // With all of our events added, we should be able to query for the
- // first 10 events using the max event query field.
- eventQuery := ForwardingEventQuery{
- StartTime: initialTime,
- EndTime: endTime,
- IndexOffset: 0,
- NumMaxEvents: 10,
- }
- timeSlice, err := log.Query(eventQuery)
- if err != nil {
- t.Fatalf("unable to query for events: %v", err)
- }
-
- // We should get exactly 10 events back.
- if len(timeSlice.ForwardingEvents) != 10 {
- t.Fatalf("wrong number of events: expected %v, got %v", 10,
- len(timeSlice.ForwardingEvents))
- }
-
- // The set of events returned should be the first 10 events that we
- // added.
- if !reflect.DeepEqual(events[:10], timeSlice.ForwardingEvents) {
- t.Fatalf("wrong response: expected %v, got %v",
- spew.Sdump(events[:10]),
- spew.Sdump(timeSlice.ForwardingEvents))
- }
-
- // The final offset should be the exact number of events returned.
- if timeSlice.LastIndexOffset != 10 {
- t.Fatalf("wrong index offset: expected %v, got %v", 10,
- timeSlice.LastIndexOffset)
- }
-
- // If we use the final offset to query again, then we should get 10
- // more events, that are the last 10 events we wrote.
- eventQuery.IndexOffset = 10
- timeSlice, err = log.Query(eventQuery)
- if err != nil {
- t.Fatalf("unable to query for events: %v", err)
- }
-
- // We should get exactly 10 events back once again.
- if len(timeSlice.ForwardingEvents) != 10 {
- t.Fatalf("wrong number of events: expected %v, got %v", 10,
- len(timeSlice.ForwardingEvents))
- }
-
- // The events that we got back should be the last 10 events that we
- // wrote out.
- if !reflect.DeepEqual(events[10:], timeSlice.ForwardingEvents) {
- t.Fatalf("wrong response: expected %v, got %v",
- spew.Sdump(events[10:]),
- spew.Sdump(timeSlice.ForwardingEvents))
- }
-
- // Finally, the last index offset should be 20, or the number of
- // records we've written out.
- if timeSlice.LastIndexOffset != 20 {
- t.Fatalf("wrong index offset: expected %v, got %v", 20,
- timeSlice.LastIndexOffset)
- }
-}
-
-// TestForwardingLogQueryLimit tests that we're able to properly limit the
-// number of events that are returned as part of a query.
-func TestForwardingLogQueryLimit(t *testing.T) {
- t.Parallel()
-
- // First, we'll set up a test database, and use that to instantiate the
- // forwarding event log that we'll be using for the duration of the
- // test.
- db, cleanUp, err := MakeTestDB()
- if err != nil {
- t.Fatalf("unable to make test db: %v", err)
- }
- defer cleanUp()
-
- log := ForwardingLog{
- db: db,
- }
-
- initialTime := time.Unix(1234, 0)
- endTime := time.Unix(1234, 0)
-
- // We'll create 200 random events, which each event being spaced 10
- // minutes after the prior event.
- numEvents := 200
- events := make([]ForwardingEvent, numEvents)
- for i := 0; i < numEvents; i++ {
- events[i] = ForwardingEvent{
- Timestamp: endTime,
- IncomingChanID: lnwire.NewShortChanIDFromInt(uint64(rand.Int63())),
- OutgoingChanID: lnwire.NewShortChanIDFromInt(uint64(rand.Int63())),
- AmtIn: lnwire.MilliSatoshi(rand.Int63()),
- AmtOut: lnwire.MilliSatoshi(rand.Int63()),
- }
-
- endTime = endTime.Add(time.Minute * 10)
- }
-
- // Now that all of our set of events constructed, we'll add them to the
- // database in a batch manner.
- if err := log.AddForwardingEvents(events); err != nil {
- t.Fatalf("unable to add events: %v", err)
- }
-
- // Once the events have been written out, we'll issue a query over the
- // entire range, but restrict the number of events to the first 100.
- eventQuery := ForwardingEventQuery{
- StartTime: initialTime,
- EndTime: endTime,
- IndexOffset: 0,
- NumMaxEvents: 100,
- }
- timeSlice, err := log.Query(eventQuery)
- if err != nil {
- t.Fatalf("unable to query for events: %v", err)
- }
-
- // We should get exactly 100 events back.
- if len(timeSlice.ForwardingEvents) != 100 {
- t.Fatalf("wrong number of events: expected %v, got %v", 10,
- len(timeSlice.ForwardingEvents))
- }
-
- // The set of events returned should be the first 100 events that we
- // added.
- if !reflect.DeepEqual(events[:100], timeSlice.ForwardingEvents) {
- t.Fatalf("wrong response: expected %v, got %v",
- spew.Sdump(events[:100]),
- spew.Sdump(timeSlice.ForwardingEvents))
- }
-
- // The final offset should be the exact number of events returned.
- if timeSlice.LastIndexOffset != 100 {
- t.Fatalf("wrong index offset: expected %v, got %v", 100,
- timeSlice.LastIndexOffset)
- }
-}
-
-// TestForwardingLogMakeUniqueTimestamps makes sure the function that creates
-// unique timestamps does it job correctly.
-func TestForwardingLogMakeUniqueTimestamps(t *testing.T) {
- t.Parallel()
-
- // Create a list of events where some of the timestamps collide. We
- // expect no existing timestamp to be overwritten, instead the "gaps"
- // between them should be filled.
- inputSlice := []ForwardingEvent{
- {Timestamp: time.Unix(0, 1001)},
- {Timestamp: time.Unix(0, 2001)},
- {Timestamp: time.Unix(0, 1001)},
- {Timestamp: time.Unix(0, 1002)},
- {Timestamp: time.Unix(0, 1004)},
- {Timestamp: time.Unix(0, 1004)},
- {Timestamp: time.Unix(0, 1007)},
- {Timestamp: time.Unix(0, 1001)},
- }
- expectedSlice := []ForwardingEvent{
- {Timestamp: time.Unix(0, 1001)},
- {Timestamp: time.Unix(0, 1002)},
- {Timestamp: time.Unix(0, 1003)},
- {Timestamp: time.Unix(0, 1004)},
- {Timestamp: time.Unix(0, 1005)},
- {Timestamp: time.Unix(0, 1006)},
- {Timestamp: time.Unix(0, 1007)},
- {Timestamp: time.Unix(0, 2001)},
- }
-
- makeUniqueTimestamps(inputSlice)
-
- for idx, in := range inputSlice {
- expect := expectedSlice[idx]
- assert.Equal(
- t, expect.Timestamp.UnixNano(), in.Timestamp.UnixNano(),
- )
- }
-}
-
-// TestForwardingLogStoreEvent makes sure forwarding events are stored without
-// colliding on duplicate timestamps.
-func TestForwardingLogStoreEvent(t *testing.T) {
- t.Parallel()
-
- // First, we'll set up a test database, and use that to instantiate the
- // forwarding event log that we'll be using for the duration of the
- // test.
- db, cleanUp, err := MakeTestDB()
- if err != nil {
- t.Fatalf("unable to make test db: %v", err)
- }
- defer cleanUp()
-
- log := ForwardingLog{
- db: db,
- }
-
- // We'll create 20 random events, with each event having a timestamp
- // with just one nanosecond apart.
- numEvents := 20
- events := make([]ForwardingEvent, numEvents)
- ts := time.Now().UnixNano()
- for i := 0; i < numEvents; i++ {
- events[i] = ForwardingEvent{
- Timestamp: time.Unix(0, ts+int64(i)),
- IncomingChanID: lnwire.NewShortChanIDFromInt(uint64(rand.Int63())),
- OutgoingChanID: lnwire.NewShortChanIDFromInt(uint64(rand.Int63())),
- AmtIn: lnwire.MilliSatoshi(rand.Int63()),
- AmtOut: lnwire.MilliSatoshi(rand.Int63()),
- }
- }
-
- // Now that all of our events are constructed, we'll add them to the
- // database in a batched manner.
- if err := log.AddForwardingEvents(events); err != nil {
- t.Fatalf("unable to add events: %v", err)
- }
-
- // Because timestamps are de-duplicated when adding them in a single
- // batch before they even hit the DB, we add the same events again but
- // in a new batch. They now have to be de-duplicated on the DB level.
- if err := log.AddForwardingEvents(events); err != nil {
- t.Fatalf("unable to add second batch of events: %v", err)
- }
-
- // With all of our events added, we should be able to query for all
- // events with a range of just 40 nanoseconds (2 times 20 events, all
- // spaced one nanosecond apart).
- eventQuery := ForwardingEventQuery{
- StartTime: time.Unix(0, ts),
- EndTime: time.Unix(0, ts+int64(numEvents*2)),
- IndexOffset: 0,
- NumMaxEvents: uint32(numEvents * 3),
- }
- timeSlice, err := log.Query(eventQuery)
- if err != nil {
- t.Fatalf("unable to query for events: %v", err)
- }
-
- // We should get exactly 40 events back.
- if len(timeSlice.ForwardingEvents) != numEvents*2 {
- t.Fatalf("wrong number of events: expected %v, got %v",
- numEvents*2, len(timeSlice.ForwardingEvents))
- }
-
- // The timestamps should be spaced out evenly and in order.
- for i := 0; i < numEvents*2; i++ {
- eventTs := timeSlice.ForwardingEvents[i].Timestamp.UnixNano()
- if eventTs != ts+int64(i) {
- t.Fatalf("unexpected timestamp of event %d: expected "+
- "%d, got %d", i, ts+int64(i), eventTs)
- }
- }
-}
diff --git a/lnd/channeldb/forwarding_package.go b/lnd/channeldb/forwarding_package.go
deleted file mode 100644
index 6594db2d..00000000
--- a/lnd/channeldb/forwarding_package.go
+++ /dev/null
@@ -1,929 +0,0 @@
-package channeldb
-
-import (
- "bytes"
- "encoding/binary"
- "fmt"
- "io"
-
- "github.com/pkt-cash/pktd/btcutil/er"
- "github.com/pkt-cash/pktd/btcutil/util"
- "github.com/pkt-cash/pktd/lnd/channeldb/kvdb"
- "github.com/pkt-cash/pktd/lnd/lnwire"
-)
-
-// ErrCorruptedFwdPkg signals that the on-disk structure of the forwarding
-// package has potentially been mangled.
-var ErrCorruptedFwdPkg = Err.CodeWithDetail("ErrCorruptedFwdPkg", "fwding package db has been corrupted")
-
-// FwdState is an enum used to describe the lifecycle of a FwdPkg.
-type FwdState byte
-
-const (
- // FwdStateLockedIn is the starting state for all forwarding packages.
- // Packages in this state have not yet committed to the exact set of
- // Adds to forward to the switch.
- FwdStateLockedIn FwdState = iota
-
- // FwdStateProcessed marks the state in which all Adds have been
- // locally processed and the forwarding decision to the switch has been
- // persisted.
- FwdStateProcessed
-
- // FwdStateCompleted signals that all Adds have been acked, and that all
- // settles and fails have been delivered to their sources. Packages in
- // this state can be removed permanently.
- FwdStateCompleted
-)
-
-var (
- // fwdPackagesKey is the root-level bucket that all forwarding packages
- // are written. This bucket is further subdivided based on the short
- // channel ID of each channel.
- fwdPackagesKey = []byte("fwd-packages")
-
- // addBucketKey is the bucket to which all Add log updates are written.
- addBucketKey = []byte("add-updates")
-
- // failSettleBucketKey is the bucket to which all Settle/Fail log
- // updates are written.
- failSettleBucketKey = []byte("fail-settle-updates")
-
- // fwdFilterKey is a key used to write the set of Adds that passed
- // validation and are to be forwarded to the switch.
- // NOTE: The presence of this key within a forwarding package indicates
- // that the package has reached FwdStateProcessed.
- fwdFilterKey = []byte("fwd-filter-key")
-
- // ackFilterKey is a key used to access the PkgFilter indicating which
- // Adds have received a Settle/Fail. This response may come from a
- // number of sources, including: exitHop settle/fails, switch failures,
- // chain arbiter interjections, as well as settle/fails from the
- // next hop in the route.
- ackFilterKey = []byte("ack-filter-key")
-
- // settleFailFilterKey is a key used to access the PkgFilter indicating
- // which Settles/Fails in have been received and processed by the link
- // that originally received the Add.
- settleFailFilterKey = []byte("settle-fail-filter-key")
-)
-
-// PkgFilter is used to compactly represent a particular subset of the Adds in a
-// forwarding package. Each filter is represented as a simple, statically-sized
-// bitvector, where the elements are intended to be the indices of the Adds as
-// they are written in the FwdPkg.
-type PkgFilter struct {
- count uint16
- filter []byte
-}
-
-// NewPkgFilter initializes an empty PkgFilter supporting `count` elements.
-func NewPkgFilter(count uint16) *PkgFilter {
- // We add 7 to ensure that the integer division yields properly rounded
- // values.
- filterLen := (count + 7) / 8
-
- return &PkgFilter{
- count: count,
- filter: make([]byte, filterLen),
- }
-}
-
-// Count returns the number of elements represented by this PkgFilter.
-func (f *PkgFilter) Count() uint16 {
- return f.count
-}
-
-// Set marks the `i`-th element as included by this filter.
-// NOTE: It is assumed that i is always less than count.
-func (f *PkgFilter) Set(i uint16) {
- byt := i / 8
- bit := i % 8
-
- // Set the i-th bit in the filter.
- // TODO(conner): ignore if > count to prevent panic?
- f.filter[byt] |= byte(1 << (7 - bit))
-}
-
-// Contains queries the filter for membership of index `i`.
-// NOTE: It is assumed that i is always less than count.
-func (f *PkgFilter) Contains(i uint16) bool {
- byt := i / 8
- bit := i % 8
-
- // Read the i-th bit in the filter.
- // TODO(conner): ignore if > count to prevent panic?
- return f.filter[byt]&(1<<(7-bit)) != 0
-}
-
-// Equal checks two PkgFilters for equality.
-func (f *PkgFilter) Equal(f2 *PkgFilter) bool {
- if f == f2 {
- return true
- }
- if f.count != f2.count {
- return false
- }
-
- return bytes.Equal(f.filter, f2.filter)
-}
-
-// IsFull returns true if every element in the filter has been Set, and false
-// otherwise.
-func (f *PkgFilter) IsFull() bool {
- // Batch validate bytes that are fully used.
- for i := uint16(0); i < f.count/8; i++ {
- if f.filter[i] != 0xFF {
- return false
- }
- }
-
- // If the count is not a multiple of 8, check that the filter contains
- // all remaining bits.
- rem := f.count % 8
- for idx := f.count - rem; idx < f.count; idx++ {
- if !f.Contains(idx) {
- return false
- }
- }
-
- return true
-}
-
-// Size returns number of bytes produced when the PkgFilter is serialized.
-func (f *PkgFilter) Size() uint16 {
- // 2 bytes for uint16 `count`, then round up number of bytes required to
- // represent `count` bits.
- return 2 + (f.count+7)/8
-}
-
-// Encode writes the filter to the provided io.Writer.
-func (f *PkgFilter) Encode(w io.Writer) er.R {
- if err := util.WriteBin(w, binary.BigEndian, f.count); err != nil {
- return err
- }
-
- _, err := util.Write(w, f.filter)
-
- return err
-}
-
-// Decode reads the filter from the provided io.Reader.
-func (f *PkgFilter) Decode(r io.Reader) er.R {
- if err := util.ReadBin(r, binary.BigEndian, &f.count); err != nil {
- return err
- }
-
- f.filter = make([]byte, f.Size()-2)
- _, err := util.ReadFull(r, f.filter)
-
- return err
-}
-
-// FwdPkg records all adds, settles, and fails that were locked in as a result
-// of the remote peer sending us a revocation. Each package is identified by
-// the short chanid and remote commitment height corresponding to the revocation
-// that locked in the HTLCs. For everything except a locally initiated payment,
-// settles and fails in a forwarding package must have a corresponding Add in
-// another package, and can be removed individually once the source link has
-// received the fail/settle.
-//
-// Adds cannot be removed, as we need to present the same batch of Adds to
-// properly handle replay protection. Instead, we use a PkgFilter to mark that
-// we have finished processing a particular Add. A FwdPkg should only be deleted
-// after the AckFilter is full and all settles and fails have been persistently
-// removed.
-type FwdPkg struct {
- // Source identifies the channel that wrote this forwarding package.
- Source lnwire.ShortChannelID
-
- // Height is the height of the remote commitment chain that locked in
- // this forwarding package.
- Height uint64
-
- // State signals the persistent condition of the package and directs how
- // to reprocess the package in the event of failures.
- State FwdState
-
- // Adds contains all add messages which need to be processed and
- // forwarded to the switch. Adds does not change over the life of a
- // forwarding package.
- Adds []LogUpdate
-
- // FwdFilter is a filter containing the indices of all Adds that were
- // forwarded to the switch.
- FwdFilter *PkgFilter
-
- // AckFilter is a filter containing the indices of all Adds for which
- // the source has received a settle or fail and is reflected in the next
- // commitment txn. A package should not be removed until IsFull()
- // returns true.
- AckFilter *PkgFilter
-
- // SettleFails contains all settle and fail messages that should be
- // forwarded to the switch.
- SettleFails []LogUpdate
-
- // SettleFailFilter is a filter containing the indices of all Settle or
- // Fails originating in this package that have been received and locked
- // into the incoming link's commitment state.
- SettleFailFilter *PkgFilter
-}
-
-// NewFwdPkg initializes a new forwarding package in FwdStateLockedIn. This
-// should be used to create a package at the time we receive a revocation.
-func NewFwdPkg(source lnwire.ShortChannelID, height uint64,
- addUpdates, settleFailUpdates []LogUpdate) *FwdPkg {
-
- nAddUpdates := uint16(len(addUpdates))
- nSettleFailUpdates := uint16(len(settleFailUpdates))
-
- return &FwdPkg{
- Source: source,
- Height: height,
- State: FwdStateLockedIn,
- Adds: addUpdates,
- FwdFilter: NewPkgFilter(nAddUpdates),
- AckFilter: NewPkgFilter(nAddUpdates),
- SettleFails: settleFailUpdates,
- SettleFailFilter: NewPkgFilter(nSettleFailUpdates),
- }
-}
-
-// ID returns an unique identifier for this package, used to ensure that sphinx
-// replay processing of this batch is idempotent.
-func (f *FwdPkg) ID() []byte {
- var id = make([]byte, 16)
- byteOrder.PutUint64(id[:8], f.Source.ToUint64())
- byteOrder.PutUint64(id[8:], f.Height)
- return id
-}
-
-// String returns a human-readable description of the forwarding package.
-func (f *FwdPkg) String() string {
- return fmt.Sprintf("%T(src=%v, height=%v, nadds=%v, nfailsettles=%v)",
- f, f.Source, f.Height, len(f.Adds), len(f.SettleFails))
-}
-
-// AddRef is used to identify a particular Add in a FwdPkg. The short channel ID
-// is assumed to be that of the packager.
-type AddRef struct {
- // Height is the remote commitment height that locked in the Add.
- Height uint64
-
- // Index is the index of the Add within the fwd pkg's Adds.
- //
- // NOTE: This index is static over the lifetime of a forwarding package.
- Index uint16
-}
-
-// Encode serializes the AddRef to the given io.Writer.
-func (a *AddRef) Encode(w io.Writer) er.R {
- if err := util.WriteBin(w, binary.BigEndian, a.Height); err != nil {
- return err
- }
-
- return util.WriteBin(w, binary.BigEndian, a.Index)
-}
-
-// Decode deserializes the AddRef from the given io.Reader.
-func (a *AddRef) Decode(r io.Reader) er.R {
- if err := util.ReadBin(r, binary.BigEndian, &a.Height); err != nil {
- return err
- }
-
- return util.ReadBin(r, binary.BigEndian, &a.Index)
-}
-
-// SettleFailRef is used to locate a Settle/Fail in another channel's FwdPkg. A
-// channel does not remove its own Settle/Fail htlcs, so the source is provided
-// to locate a db bucket belonging to another channel.
-type SettleFailRef struct {
- // Source identifies the outgoing link that locked in the settle or
- // fail. This is then used by the *incoming* link to find the settle
- // fail in another link's forwarding packages.
- Source lnwire.ShortChannelID
-
- // Height is the remote commitment height that locked in this
- // Settle/Fail.
- Height uint64
-
- // Index is the index of the Add with the fwd pkg's SettleFails.
- //
- // NOTE: This index is static over the lifetime of a forwarding package.
- Index uint16
-}
-
-// SettleFailAcker is a generic interface providing the ability to acknowledge
-// settle/fail HTLCs stored in forwarding packages.
-type SettleFailAcker interface {
- // AckSettleFails atomically updates the settle-fail filters in *other*
- // channels' forwarding packages.
- AckSettleFails(tx kvdb.RwTx, settleFailRefs ...SettleFailRef) er.R
-}
-
-// GlobalFwdPkgReader is an interface used to retrieve the forwarding packages
-// of any active channel.
-type GlobalFwdPkgReader interface {
- // LoadChannelFwdPkgs loads all known forwarding packages for the given
- // channel.
- LoadChannelFwdPkgs(tx kvdb.RTx,
- source lnwire.ShortChannelID) ([]*FwdPkg, er.R)
-}
-
-// FwdOperator defines the interfaces for managing forwarding packages that are
-// external to a particular channel. This interface is used by the switch to
-// read forwarding packages from arbitrary channels, and acknowledge settles and
-// fails for locally-sourced payments.
-type FwdOperator interface {
- // GlobalFwdPkgReader provides read access to all known forwarding
- // packages
- GlobalFwdPkgReader
-
- // SettleFailAcker grants the ability to acknowledge settles or fails
- // residing in arbitrary forwarding packages.
- SettleFailAcker
-}
-
-// SwitchPackager is a concrete implementation of the FwdOperator interface.
-// A SwitchPackager offers the ability to read any forwarding package, and ack
-// arbitrary settle and fail HTLCs.
-type SwitchPackager struct{}
-
-// NewSwitchPackager instantiates a new SwitchPackager.
-func NewSwitchPackager() *SwitchPackager {
- return &SwitchPackager{}
-}
-
-// AckSettleFails atomically updates the settle-fail filters in *other*
-// channels' forwarding packages, to mark that the switch has received a settle
-// or fail residing in the forwarding package of a link.
-func (*SwitchPackager) AckSettleFails(tx kvdb.RwTx,
- settleFailRefs ...SettleFailRef) er.R {
-
- return ackSettleFails(tx, settleFailRefs)
-}
-
-// LoadChannelFwdPkgs loads all forwarding packages for a particular channel.
-func (*SwitchPackager) LoadChannelFwdPkgs(tx kvdb.RTx,
- source lnwire.ShortChannelID) ([]*FwdPkg, er.R) {
-
- return loadChannelFwdPkgs(tx, source)
-}
-
-// FwdPackager supports all operations required to modify fwd packages, such as
-// creation, updates, reading, and removal. The interfaces are broken down in
-// this way to support future delegation of the subinterfaces.
-type FwdPackager interface {
- // AddFwdPkg serializes and writes a FwdPkg for this channel at the
- // remote commitment height included in the forwarding package.
- AddFwdPkg(tx kvdb.RwTx, fwdPkg *FwdPkg) er.R
-
- // SetFwdFilter looks up the forwarding package at the remote `height`
- // and sets the `fwdFilter`, marking the Adds for which:
- // 1) We are not the exit node
- // 2) Passed all validation
- // 3) Should be forwarded to the switch immediately after a failure
- SetFwdFilter(tx kvdb.RwTx, height uint64, fwdFilter *PkgFilter) er.R
-
- // AckAddHtlcs atomically updates the add filters in this channel's
- // forwarding packages to mark the resolution of an Add that was
- // received from the remote party.
- AckAddHtlcs(tx kvdb.RwTx, addRefs ...AddRef) er.R
-
- // SettleFailAcker allows a link to acknowledge settle/fail HTLCs
- // belonging to other channels.
- SettleFailAcker
-
- // LoadFwdPkgs loads all known forwarding packages owned by this
- // channel.
- LoadFwdPkgs(tx kvdb.RTx) ([]*FwdPkg, er.R)
-
- // RemovePkg deletes a forwarding package owned by this channel at
- // the provided remote `height`.
- RemovePkg(tx kvdb.RwTx, height uint64) er.R
-}
-
-// ChannelPackager is used by a channel to manage the lifecycle of its forwarding
-// packages. The packager is tied to a particular source channel ID, allowing it
-// to create and edit its own packages. Each packager also has the ability to
-// remove fail/settle htlcs that correspond to an add contained in one of
-// source's packages.
-type ChannelPackager struct {
- source lnwire.ShortChannelID
-}
-
-// NewChannelPackager creates a new packager for a single channel.
-func NewChannelPackager(source lnwire.ShortChannelID) *ChannelPackager {
- return &ChannelPackager{
- source: source,
- }
-}
-
-// AddFwdPkg writes a newly locked in forwarding package to disk.
-func (*ChannelPackager) AddFwdPkg(tx kvdb.RwTx, fwdPkg *FwdPkg) er.R {
- fwdPkgBkt, err := tx.CreateTopLevelBucket(fwdPackagesKey)
- if err != nil {
- return err
- }
-
- source := makeLogKey(fwdPkg.Source.ToUint64())
- sourceBkt, err := fwdPkgBkt.CreateBucketIfNotExists(source[:])
- if err != nil {
- return err
- }
-
- heightKey := makeLogKey(fwdPkg.Height)
- heightBkt, err := sourceBkt.CreateBucketIfNotExists(heightKey[:])
- if err != nil {
- return err
- }
-
- // Write ADD updates we received at this commit height.
- addBkt, err := heightBkt.CreateBucketIfNotExists(addBucketKey)
- if err != nil {
- return err
- }
-
- // Write SETTLE/FAIL updates we received at this commit height.
- failSettleBkt, err := heightBkt.CreateBucketIfNotExists(failSettleBucketKey)
- if err != nil {
- return err
- }
-
- for i := range fwdPkg.Adds {
- errr := putLogUpdate(addBkt, uint16(i), &fwdPkg.Adds[i])
- if errr != nil {
- return errr
- }
- }
-
- // Persist the initialized pkg filter, which will be used to determine
- // when we can remove this forwarding package from disk.
- var ackFilterBuf bytes.Buffer
- if err := fwdPkg.AckFilter.Encode(&ackFilterBuf); err != nil {
- return err
- }
-
- if err := heightBkt.Put(ackFilterKey, ackFilterBuf.Bytes()); err != nil {
- return err
- }
-
- for i := range fwdPkg.SettleFails {
- errr := putLogUpdate(failSettleBkt, uint16(i), &fwdPkg.SettleFails[i])
- if errr != nil {
- return errr
- }
- }
-
- var settleFailFilterBuf bytes.Buffer
- errr := fwdPkg.SettleFailFilter.Encode(&settleFailFilterBuf)
- if errr != nil {
- return errr
- }
-
- return heightBkt.Put(settleFailFilterKey, settleFailFilterBuf.Bytes())
-}
-
-// putLogUpdate writes an htlc to the provided `bkt`, using `index` as the key.
-func putLogUpdate(bkt kvdb.RwBucket, idx uint16, htlc *LogUpdate) er.R {
- var b bytes.Buffer
- if err := htlc.Encode(&b); err != nil {
- return err
- }
-
- return bkt.Put(uint16Key(idx), b.Bytes())
-}
-
-// LoadFwdPkgs scans the forwarding log for any packages that haven't been
-// processed, and returns their deserialized log updates in a map indexed by the
-// remote commitment height at which the updates were locked in.
-func (p *ChannelPackager) LoadFwdPkgs(tx kvdb.RTx) ([]*FwdPkg, er.R) {
- return loadChannelFwdPkgs(tx, p.source)
-}
-
-// loadChannelFwdPkgs loads all forwarding packages owned by `source`.
-func loadChannelFwdPkgs(tx kvdb.RTx, source lnwire.ShortChannelID) ([]*FwdPkg, er.R) {
- fwdPkgBkt := tx.ReadBucket(fwdPackagesKey)
- if fwdPkgBkt == nil {
- return nil, nil
- }
-
- sourceKey := makeLogKey(source.ToUint64())
- sourceBkt := fwdPkgBkt.NestedReadBucket(sourceKey[:])
- if sourceBkt == nil {
- return nil, nil
- }
-
- var heights []uint64
- if err := sourceBkt.ForEach(func(k, _ []byte) er.R {
- if len(k) != 8 {
- return ErrCorruptedFwdPkg.Default()
- }
-
- heights = append(heights, byteOrder.Uint64(k))
-
- return nil
- }); err != nil {
- return nil, err
- }
-
- // Load the forwarding package for each retrieved height.
- fwdPkgs := make([]*FwdPkg, 0, len(heights))
- for _, height := range heights {
- fwdPkg, err := loadFwdPkg(fwdPkgBkt, source, height)
- if err != nil {
- return nil, err
- }
-
- fwdPkgs = append(fwdPkgs, fwdPkg)
- }
-
- return fwdPkgs, nil
-}
-
-// loadFwPkg reads the packager's fwd pkg at a given height, and determines the
-// appropriate FwdState.
-func loadFwdPkg(fwdPkgBkt kvdb.RBucket, source lnwire.ShortChannelID,
- height uint64) (*FwdPkg, er.R) {
-
- sourceKey := makeLogKey(source.ToUint64())
- sourceBkt := fwdPkgBkt.NestedReadBucket(sourceKey[:])
- if sourceBkt == nil {
- return nil, ErrCorruptedFwdPkg.Default()
- }
-
- heightKey := makeLogKey(height)
- heightBkt := sourceBkt.NestedReadBucket(heightKey[:])
- if heightBkt == nil {
- return nil, ErrCorruptedFwdPkg.Default()
- }
-
- // Load ADDs from disk.
- addBkt := heightBkt.NestedReadBucket(addBucketKey)
- if addBkt == nil {
- return nil, ErrCorruptedFwdPkg.Default()
- }
-
- adds, err := loadHtlcs(addBkt)
- if err != nil {
- return nil, err
- }
-
- // Load ack filter from disk.
- ackFilterBytes := heightBkt.Get(ackFilterKey)
- if ackFilterBytes == nil {
- return nil, ErrCorruptedFwdPkg.Default()
- }
- ackFilterReader := bytes.NewReader(ackFilterBytes)
-
- ackFilter := &PkgFilter{}
- if err := ackFilter.Decode(ackFilterReader); err != nil {
- return nil, err
- }
-
- // Load SETTLE/FAILs from disk.
- failSettleBkt := heightBkt.NestedReadBucket(failSettleBucketKey)
- if failSettleBkt == nil {
- return nil, ErrCorruptedFwdPkg.Default()
- }
-
- failSettles, err := loadHtlcs(failSettleBkt)
- if err != nil {
- return nil, err
- }
-
- // Load settle fail filter from disk.
- settleFailFilterBytes := heightBkt.Get(settleFailFilterKey)
- if settleFailFilterBytes == nil {
- return nil, ErrCorruptedFwdPkg.Default()
- }
- settleFailFilterReader := bytes.NewReader(settleFailFilterBytes)
-
- settleFailFilter := &PkgFilter{}
- if err := settleFailFilter.Decode(settleFailFilterReader); err != nil {
- return nil, err
- }
-
- // Initialize the fwding package, which always starts in the
- // FwdStateLockedIn. We can determine what state the package was left in
- // by examining constraints on the information loaded from disk.
- fwdPkg := &FwdPkg{
- Source: source,
- State: FwdStateLockedIn,
- Height: height,
- Adds: adds,
- AckFilter: ackFilter,
- SettleFails: failSettles,
- SettleFailFilter: settleFailFilter,
- }
-
- // Check to see if we have written the set exported filter adds to
- // disk. If we haven't, processing of this package was never started, or
- // failed during the last attempt.
- fwdFilterBytes := heightBkt.Get(fwdFilterKey)
- if fwdFilterBytes == nil {
- nAdds := uint16(len(adds))
- fwdPkg.FwdFilter = NewPkgFilter(nAdds)
- return fwdPkg, nil
- }
-
- fwdFilterReader := bytes.NewReader(fwdFilterBytes)
- fwdPkg.FwdFilter = &PkgFilter{}
- if err := fwdPkg.FwdFilter.Decode(fwdFilterReader); err != nil {
- return nil, err
- }
-
- // Otherwise, a complete round of processing was completed, and we
- // advance the package to FwdStateProcessed.
- fwdPkg.State = FwdStateProcessed
-
- // If every add, settle, and fail has been fully acknowledged, we can
- // safely set the package's state to FwdStateCompleted, signalling that
- // it can be garbage collected.
- if fwdPkg.AckFilter.IsFull() && fwdPkg.SettleFailFilter.IsFull() {
- fwdPkg.State = FwdStateCompleted
- }
-
- return fwdPkg, nil
-}
-
-// loadHtlcs retrieves all serialized htlcs in a bucket, returning
-// them in order of the indexes they were written under.
-func loadHtlcs(bkt kvdb.RBucket) ([]LogUpdate, er.R) {
- var htlcs []LogUpdate
- if err := bkt.ForEach(func(_, v []byte) er.R {
- var htlc LogUpdate
- if err := htlc.Decode(bytes.NewReader(v)); err != nil {
- return err
- }
-
- htlcs = append(htlcs, htlc)
-
- return nil
- }); err != nil {
- return nil, err
- }
-
- return htlcs, nil
-}
-
-// SetFwdFilter writes the set of indexes corresponding to Adds at the
-// `height` that are to be forwarded to the switch. Calling this method causes
-// the forwarding package at `height` to be in FwdStateProcessed. We write this
-// forwarding decision so that we always arrive at the same behavior for HTLCs
-// leaving this channel. After a restart, we skip validation of these Adds,
-// since they are assumed to have already been validated, and make the switch or
-// outgoing link responsible for handling replays.
-func (p *ChannelPackager) SetFwdFilter(tx kvdb.RwTx, height uint64,
- fwdFilter *PkgFilter) er.R {
-
- fwdPkgBkt := tx.ReadWriteBucket(fwdPackagesKey)
- if fwdPkgBkt == nil {
- return ErrCorruptedFwdPkg.Default()
- }
-
- source := makeLogKey(p.source.ToUint64())
- sourceBkt := fwdPkgBkt.NestedReadWriteBucket(source[:])
- if sourceBkt == nil {
- return ErrCorruptedFwdPkg.Default()
- }
-
- heightKey := makeLogKey(height)
- heightBkt := sourceBkt.NestedReadWriteBucket(heightKey[:])
- if heightBkt == nil {
- return ErrCorruptedFwdPkg.Default()
- }
-
- // If the fwd filter has already been written, we return early to avoid
- // modifying the persistent state.
- forwardedAddsBytes := heightBkt.Get(fwdFilterKey)
- if forwardedAddsBytes != nil {
- return nil
- }
-
- // Otherwise we serialize and write the provided fwd filter.
- var b bytes.Buffer
- if err := fwdFilter.Encode(&b); err != nil {
- return err
- }
-
- return heightBkt.Put(fwdFilterKey, b.Bytes())
-}
-
-// AckAddHtlcs accepts a list of references to add htlcs, and updates the
-// AckAddFilter of those forwarding packages to indicate that a settle or fail
-// has been received in response to the add.
-func (p *ChannelPackager) AckAddHtlcs(tx kvdb.RwTx, addRefs ...AddRef) er.R {
- if len(addRefs) == 0 {
- return nil
- }
-
- fwdPkgBkt := tx.ReadWriteBucket(fwdPackagesKey)
- if fwdPkgBkt == nil {
- return ErrCorruptedFwdPkg.Default()
- }
-
- sourceKey := makeLogKey(p.source.ToUint64())
- sourceBkt := fwdPkgBkt.NestedReadWriteBucket(sourceKey[:])
- if sourceBkt == nil {
- return ErrCorruptedFwdPkg.Default()
- }
-
- // Organize the forward references such that we just get a single slice
- // of indexes for each unique height.
- heightDiffs := make(map[uint64][]uint16)
- for _, addRef := range addRefs {
- heightDiffs[addRef.Height] = append(
- heightDiffs[addRef.Height],
- addRef.Index,
- )
- }
-
- // Load each height bucket once and remove all acked htlcs at that
- // height.
- for height, indexes := range heightDiffs {
- err := ackAddHtlcsAtHeight(sourceBkt, height, indexes)
- if err != nil {
- return err
- }
- }
-
- return nil
-}
-
-// ackAddHtlcsAtHeight updates the AddAckFilter of a single forwarding package
-// with a list of indexes, writing the resulting filter back in its place.
-func ackAddHtlcsAtHeight(sourceBkt kvdb.RwBucket, height uint64,
- indexes []uint16) er.R {
-
- heightKey := makeLogKey(height)
- heightBkt := sourceBkt.NestedReadWriteBucket(heightKey[:])
- if heightBkt == nil {
- // If the height bucket isn't found, this could be because the
- // forwarding package was already removed. We'll return nil to
- // signal that the operation is successful, as there is nothing
- // to ack.
- return nil
- }
-
- // Load ack filter from disk.
- ackFilterBytes := heightBkt.Get(ackFilterKey)
- if ackFilterBytes == nil {
- return ErrCorruptedFwdPkg.Default()
- }
-
- ackFilter := &PkgFilter{}
- ackFilterReader := bytes.NewReader(ackFilterBytes)
- if err := ackFilter.Decode(ackFilterReader); err != nil {
- return err
- }
-
- // Update the ack filter for this height.
- for _, index := range indexes {
- ackFilter.Set(index)
- }
-
- // Write the resulting filter to disk.
- var ackFilterBuf bytes.Buffer
- if err := ackFilter.Encode(&ackFilterBuf); err != nil {
- return err
- }
-
- return heightBkt.Put(ackFilterKey, ackFilterBuf.Bytes())
-}
-
-// AckSettleFails persistently acknowledges settles or fails from a remote forwarding
-// package. This should only be called after the source of the Add has locked in
-// the settle/fail, or it becomes otherwise safe to forgo retransmitting the
-// settle/fail after a restart.
-func (p *ChannelPackager) AckSettleFails(tx kvdb.RwTx, settleFailRefs ...SettleFailRef) er.R {
- return ackSettleFails(tx, settleFailRefs)
-}
-
-// ackSettleFails persistently acknowledges a batch of settle fail references.
-func ackSettleFails(tx kvdb.RwTx, settleFailRefs []SettleFailRef) er.R {
- if len(settleFailRefs) == 0 {
- return nil
- }
-
- fwdPkgBkt := tx.ReadWriteBucket(fwdPackagesKey)
- if fwdPkgBkt == nil {
- return ErrCorruptedFwdPkg.Default()
- }
-
- // Organize the forward references such that we just get a single slice
- // of indexes for each unique destination-height pair.
- destHeightDiffs := make(map[lnwire.ShortChannelID]map[uint64][]uint16)
- for _, settleFailRef := range settleFailRefs {
- destHeights, ok := destHeightDiffs[settleFailRef.Source]
- if !ok {
- destHeights = make(map[uint64][]uint16)
- destHeightDiffs[settleFailRef.Source] = destHeights
- }
-
- destHeights[settleFailRef.Height] = append(
- destHeights[settleFailRef.Height],
- settleFailRef.Index,
- )
- }
-
- // With the references organized by destination and height, we now load
- // each remote bucket, and update the settle fail filter for any
- // settle/fail htlcs.
- for dest, destHeights := range destHeightDiffs {
- destKey := makeLogKey(dest.ToUint64())
- destBkt := fwdPkgBkt.NestedReadWriteBucket(destKey[:])
- if destBkt == nil {
- // If the destination bucket is not found, this is
- // likely the result of the destination channel being
- // closed and having it's forwarding packages wiped. We
- // won't treat this as an error, because the response
- // will no longer be retransmitted internally.
- continue
- }
-
- for height, indexes := range destHeights {
- err := ackSettleFailsAtHeight(destBkt, height, indexes)
- if err != nil {
- return err
- }
- }
- }
-
- return nil
-}
-
-// ackSettleFailsAtHeight given a destination bucket, acks the provided indexes
-// at particular a height by updating the settle fail filter.
-func ackSettleFailsAtHeight(destBkt kvdb.RwBucket, height uint64,
- indexes []uint16) er.R {
-
- heightKey := makeLogKey(height)
- heightBkt := destBkt.NestedReadWriteBucket(heightKey[:])
- if heightBkt == nil {
- // If the height bucket isn't found, this could be because the
- // forwarding package was already removed. We'll return nil to
- // signal that the operation is as there is nothing to ack.
- return nil
- }
-
- // Load ack filter from disk.
- settleFailFilterBytes := heightBkt.Get(settleFailFilterKey)
- if settleFailFilterBytes == nil {
- return ErrCorruptedFwdPkg.Default()
- }
-
- settleFailFilter := &PkgFilter{}
- settleFailFilterReader := bytes.NewReader(settleFailFilterBytes)
- if err := settleFailFilter.Decode(settleFailFilterReader); err != nil {
- return err
- }
-
- // Update the ack filter for this height.
- for _, index := range indexes {
- settleFailFilter.Set(index)
- }
-
- // Write the resulting filter to disk.
- var settleFailFilterBuf bytes.Buffer
- if err := settleFailFilter.Encode(&settleFailFilterBuf); err != nil {
- return err
- }
-
- return heightBkt.Put(settleFailFilterKey, settleFailFilterBuf.Bytes())
-}
-
-// RemovePkg deletes the forwarding package at the given height from the
-// packager's source bucket.
-func (p *ChannelPackager) RemovePkg(tx kvdb.RwTx, height uint64) er.R {
- fwdPkgBkt := tx.ReadWriteBucket(fwdPackagesKey)
- if fwdPkgBkt == nil {
- return nil
- }
-
- sourceBytes := makeLogKey(p.source.ToUint64())
- sourceBkt := fwdPkgBkt.NestedReadWriteBucket(sourceBytes[:])
- if sourceBkt == nil {
- return ErrCorruptedFwdPkg.Default()
- }
-
- heightKey := makeLogKey(height)
-
- return sourceBkt.DeleteNestedBucket(heightKey[:])
-}
-
-// uint16Key writes the provided 16-bit unsigned integer to a 2-byte slice.
-func uint16Key(i uint16) []byte {
- key := make([]byte, 2)
- byteOrder.PutUint16(key, i)
- return key
-}
-
-// Compile-time constraint to ensure that ChannelPackager implements the public
-// FwdPackager interface.
-var _ FwdPackager = (*ChannelPackager)(nil)
-
-// Compile-time constraint to ensure that SwitchPackager implements the public
-// FwdOperator interface.
-var _ FwdOperator = (*SwitchPackager)(nil)
diff --git a/lnd/channeldb/forwarding_package_test.go b/lnd/channeldb/forwarding_package_test.go
deleted file mode 100644
index 1500a201..00000000
--- a/lnd/channeldb/forwarding_package_test.go
+++ /dev/null
@@ -1,818 +0,0 @@
-package channeldb_test
-
-import (
- "bytes"
- "io/ioutil"
- "path/filepath"
- "runtime"
- "testing"
-
- "github.com/pkt-cash/pktd/btcutil/er"
- "github.com/pkt-cash/pktd/lnd/channeldb"
- "github.com/pkt-cash/pktd/lnd/channeldb/kvdb"
- "github.com/pkt-cash/pktd/lnd/lnwire"
- "github.com/pkt-cash/pktd/wire"
-)
-
-// TestPkgFilterBruteForce tests the behavior of a pkg filter up to size 1000,
-// which is greater than the number of HTLCs we permit on a commitment txn.
-// This should encapsulate every potential filter used in practice.
-func TestPkgFilterBruteForce(t *testing.T) {
- t.Parallel()
-
- checkPkgFilterRange(t, 1000)
-}
-
-// checkPkgFilterRange verifies the behavior of a pkg filter when doing a linear
-// insertion of `high` elements. This is primarily to test that IsFull functions
-// properly for all relevant sizes of `high`.
-func checkPkgFilterRange(t *testing.T, high int) {
- for i := uint16(0); i < uint16(high); i++ {
- f := channeldb.NewPkgFilter(i)
-
- if f.Count() != i {
- t.Fatalf("pkg filter count=%d is actually %d",
- i, f.Count())
- }
- checkPkgFilterEncodeDecode(t, i, f)
-
- for j := uint16(0); j < i; j++ {
- if f.Contains(j) {
- t.Fatalf("pkg filter count=%d contains %d "+
- "before being added", i, j)
- }
-
- f.Set(j)
- checkPkgFilterEncodeDecode(t, i, f)
-
- if !f.Contains(j) {
- t.Fatalf("pkg filter count=%d missing %d "+
- "after being added", i, j)
- }
-
- if j < i-1 && f.IsFull() {
- t.Fatalf("pkg filter count=%d already full", i)
- }
- }
-
- if !f.IsFull() {
- t.Fatalf("pkg filter count=%d not full", i)
- }
- checkPkgFilterEncodeDecode(t, i, f)
- }
-}
-
-// TestPkgFilterRand uses a random permutation to verify the proper behavior of
-// the pkg filter if the entries are not inserted in-order.
-func TestPkgFilterRand(t *testing.T) {
- t.Parallel()
-
- checkPkgFilterRand(t, 3, 17)
-}
-
-// checkPkgFilterRand checks the behavior of a pkg filter by randomly inserting
-// indices and asserting the invariants. The order in which indices are inserted
-// is parameterized by a base `b` coprime to `p`, and using modular
-// exponentiation to generate all elements in [1,p).
-func checkPkgFilterRand(t *testing.T, b, p uint16) {
- f := channeldb.NewPkgFilter(p)
- var j = b
- for i := uint16(1); i < p; i++ {
- if f.Contains(j) {
- t.Fatalf("pkg filter contains %d-%d "+
- "before being added", i, j)
- }
-
- f.Set(j)
- checkPkgFilterEncodeDecode(t, i, f)
-
- if !f.Contains(j) {
- t.Fatalf("pkg filter missing %d-%d "+
- "after being added", i, j)
- }
-
- if i < p-1 && f.IsFull() {
- t.Fatalf("pkg filter %d already full", i)
- }
- checkPkgFilterEncodeDecode(t, i, f)
-
- j = (b * j) % p
- }
-
- // Set 0 independently, since it will never be emitted by the generator.
- f.Set(0)
- checkPkgFilterEncodeDecode(t, p, f)
-
- if !f.IsFull() {
- t.Fatalf("pkg filter count=%d not full", p)
- }
- checkPkgFilterEncodeDecode(t, p, f)
-}
-
-// checkPkgFilterEncodeDecode tests the serialization of a pkg filter by:
-// 1) writing it to a buffer
-// 2) verifying the number of bytes written matches the filter's Size()
-// 3) reconstructing the filter decoding the bytes
-// 4) checking that the two filters are the same according to Equal
-func checkPkgFilterEncodeDecode(t *testing.T, i uint16, f *channeldb.PkgFilter) {
- var b bytes.Buffer
- if err := f.Encode(&b); err != nil {
- t.Fatalf("unable to serialize pkg filter: %v", err)
- }
-
- // +2 for uint16 length
- size := uint16(len(b.Bytes()))
- if size != f.Size() {
- t.Fatalf("pkg filter count=%d serialized size differs, "+
- "Size(): %d, len(bytes): %v", i, f.Size(), size)
- }
-
- reader := bytes.NewReader(b.Bytes())
-
- f2 := &channeldb.PkgFilter{}
- if err := f2.Decode(reader); err != nil {
- t.Fatalf("unable to deserialize pkg filter: %v", err)
- }
-
- if !f.Equal(f2) {
- t.Fatalf("pkg filter count=%v does is not equal "+
- "after deserialization, want: %v, got %v",
- i, f, f2)
- }
-}
-
-var (
- chanID = lnwire.NewChanIDFromOutPoint(&wire.OutPoint{})
-
- adds = []channeldb.LogUpdate{
- {
- LogIndex: 0,
- UpdateMsg: &lnwire.UpdateAddHTLC{
- ChanID: chanID,
- ID: 1,
- Amount: 100,
- Expiry: 1000,
- PaymentHash: [32]byte{0},
- },
- },
- {
- LogIndex: 1,
- UpdateMsg: &lnwire.UpdateAddHTLC{
- ChanID: chanID,
- ID: 1,
- Amount: 101,
- Expiry: 1001,
- PaymentHash: [32]byte{1},
- },
- },
- }
-
- settleFails = []channeldb.LogUpdate{
- {
- LogIndex: 2,
- UpdateMsg: &lnwire.UpdateFulfillHTLC{
- ChanID: chanID,
- ID: 0,
- PaymentPreimage: [32]byte{0},
- },
- },
- {
- LogIndex: 3,
- UpdateMsg: &lnwire.UpdateFailHTLC{
- ChanID: chanID,
- ID: 1,
- Reason: []byte{},
- },
- },
- }
-)
-
-// TestPackagerEmptyFwdPkg checks that the state transitions exhibited by a
-// forwarding package that contains no adds, fails or settles. We expect that
-// the fwdpkg reaches FwdStateCompleted immediately after writing the forwarding
-// decision via SetFwdFilter.
-func TestPackagerEmptyFwdPkg(t *testing.T) {
- t.Parallel()
-
- db := makeFwdPkgDB(t, "")
-
- shortChanID := lnwire.NewShortChanIDFromInt(1)
- packager := channeldb.NewChannelPackager(shortChanID)
-
- // To begin, there should be no forwarding packages on disk.
- fwdPkgs := loadFwdPkgs(t, db, packager)
- if len(fwdPkgs) != 0 {
- t.Fatalf("no forwarding packages should exist, found %d", len(fwdPkgs))
- }
-
- // Next, create and write a new forwarding package with no htlcs.
- fwdPkg := channeldb.NewFwdPkg(shortChanID, 0, nil, nil)
-
- if err := kvdb.Update(db, func(tx kvdb.RwTx) er.R {
- return packager.AddFwdPkg(tx, fwdPkg)
- }, func() {}); err != nil {
- t.Fatalf("unable to add fwd pkg: %v", err)
- }
-
- // There should now be one fwdpkg on disk. Since no forwarding decision
- // has been written, we expect it to be FwdStateLockedIn. With no HTLCs,
- // the ack filter will have no elements, and should always return true.
- fwdPkgs = loadFwdPkgs(t, db, packager)
- if len(fwdPkgs) != 1 {
- t.Fatalf("expected 1 fwdpkg, instead found %d", len(fwdPkgs))
- }
- assertFwdPkgState(t, fwdPkgs[0], channeldb.FwdStateLockedIn)
- assertFwdPkgNumAddsSettleFails(t, fwdPkgs[0], 0, 0)
- assertAckFilterIsFull(t, fwdPkgs[0], true)
-
- // Now, write the forwarding decision. In this case, its just an empty
- // fwd filter.
- if err := kvdb.Update(db, func(tx kvdb.RwTx) er.R {
- return packager.SetFwdFilter(tx, fwdPkg.Height, fwdPkg.FwdFilter)
- }, func() {}); err != nil {
- t.Fatalf("unable to set fwdfiter: %v", err)
- }
-
- // We should still have one package on disk. Since the forwarding
- // decision has been written, it will minimally be in FwdStateProcessed.
- // However with no htlcs, it should leap frog to FwdStateCompleted.
- fwdPkgs = loadFwdPkgs(t, db, packager)
- if len(fwdPkgs) != 1 {
- t.Fatalf("expected 1 fwdpkg, instead found %d", len(fwdPkgs))
- }
- assertFwdPkgState(t, fwdPkgs[0], channeldb.FwdStateCompleted)
- assertFwdPkgNumAddsSettleFails(t, fwdPkgs[0], 0, 0)
- assertAckFilterIsFull(t, fwdPkgs[0], true)
-
- // Lastly, remove the completed forwarding package from disk.
- if err := kvdb.Update(db, func(tx kvdb.RwTx) er.R {
- return packager.RemovePkg(tx, fwdPkg.Height)
- }, func() {}); err != nil {
- t.Fatalf("unable to remove fwdpkg: %v", err)
- }
-
- // Check that the fwd package was actually removed.
- fwdPkgs = loadFwdPkgs(t, db, packager)
- if len(fwdPkgs) != 0 {
- t.Fatalf("no forwarding packages should exist, found %d", len(fwdPkgs))
- }
-}
-
-// TestPackagerOnlyAdds checks that the fwdpkg does not reach FwdStateCompleted
-// as soon as all the adds in the package have been acked using AckAddHtlcs.
-func TestPackagerOnlyAdds(t *testing.T) {
- t.Parallel()
-
- db := makeFwdPkgDB(t, "")
-
- shortChanID := lnwire.NewShortChanIDFromInt(1)
- packager := channeldb.NewChannelPackager(shortChanID)
-
- // To begin, there should be no forwarding packages on disk.
- fwdPkgs := loadFwdPkgs(t, db, packager)
- if len(fwdPkgs) != 0 {
- t.Fatalf("no forwarding packages should exist, found %d", len(fwdPkgs))
- }
-
- // Next, create and write a new forwarding package that only has add
- // htlcs.
- fwdPkg := channeldb.NewFwdPkg(shortChanID, 0, adds, nil)
-
- nAdds := len(adds)
-
- if err := kvdb.Update(db, func(tx kvdb.RwTx) er.R {
- return packager.AddFwdPkg(tx, fwdPkg)
- }, func() {}); err != nil {
- t.Fatalf("unable to add fwd pkg: %v", err)
- }
-
- // There should now be one fwdpkg on disk. Since no forwarding decision
- // has been written, we expect it to be FwdStateLockedIn. The package
- // has unacked add HTLCs, so the ack filter should not be full.
- fwdPkgs = loadFwdPkgs(t, db, packager)
- if len(fwdPkgs) != 1 {
- t.Fatalf("expected 1 fwdpkg, instead found %d", len(fwdPkgs))
- }
- assertFwdPkgState(t, fwdPkgs[0], channeldb.FwdStateLockedIn)
- assertFwdPkgNumAddsSettleFails(t, fwdPkgs[0], nAdds, 0)
- assertAckFilterIsFull(t, fwdPkgs[0], false)
-
- // Now, write the forwarding decision. Since we have not explicitly
- // added any adds to the fwdfilter, this would indicate that all of the
- // adds were 1) settled locally by this link (exit hop), or 2) the htlc
- // was failed locally.
- if err := kvdb.Update(db, func(tx kvdb.RwTx) er.R {
- return packager.SetFwdFilter(tx, fwdPkg.Height, fwdPkg.FwdFilter)
- }, func() {}); err != nil {
- t.Fatalf("unable to set fwdfiter: %v", err)
- }
-
- for i := range adds {
- // We should still have one package on disk. Since the forwarding
- // decision has been written, it will minimally be in FwdStateProcessed.
- // However not allf of the HTLCs have been acked, so should not
- // have advanced further.
- fwdPkgs = loadFwdPkgs(t, db, packager)
- if len(fwdPkgs) != 1 {
- t.Fatalf("expected 1 fwdpkg, instead found %d", len(fwdPkgs))
- }
- assertFwdPkgState(t, fwdPkgs[0], channeldb.FwdStateProcessed)
- assertFwdPkgNumAddsSettleFails(t, fwdPkgs[0], nAdds, 0)
- assertAckFilterIsFull(t, fwdPkgs[0], false)
-
- addRef := channeldb.AddRef{
- Height: fwdPkg.Height,
- Index: uint16(i),
- }
-
- if err := kvdb.Update(db, func(tx kvdb.RwTx) er.R {
- return packager.AckAddHtlcs(tx, addRef)
- }, func() {}); err != nil {
- t.Fatalf("unable to ack add htlc: %v", err)
- }
- }
-
- // We should still have one package on disk. Now that all adds have been
- // acked, the ack filter should return true and the package should be
- // FwdStateCompleted since there are no other settle/fail packets.
- fwdPkgs = loadFwdPkgs(t, db, packager)
- if len(fwdPkgs) != 1 {
- t.Fatalf("expected 1 fwdpkg, instead found %d", len(fwdPkgs))
- }
- assertFwdPkgState(t, fwdPkgs[0], channeldb.FwdStateCompleted)
- assertFwdPkgNumAddsSettleFails(t, fwdPkgs[0], nAdds, 0)
- assertAckFilterIsFull(t, fwdPkgs[0], true)
-
- // Lastly, remove the completed forwarding package from disk.
- if err := kvdb.Update(db, func(tx kvdb.RwTx) er.R {
- return packager.RemovePkg(tx, fwdPkg.Height)
- }, func() {}); err != nil {
- t.Fatalf("unable to remove fwdpkg: %v", err)
- }
-
- // Check that the fwd package was actually removed.
- fwdPkgs = loadFwdPkgs(t, db, packager)
- if len(fwdPkgs) != 0 {
- t.Fatalf("no forwarding packages should exist, found %d", len(fwdPkgs))
- }
-}
-
-// TestPackagerOnlySettleFails asserts that the fwdpkg remains in
-// FwdStateProcessed after writing the forwarding decision when there are no
-// adds in the fwdpkg. We expect this because an empty FwdFilter will always
-// return true, but we are still waiting for the remaining fails and settles to
-// be deleted.
-func TestPackagerOnlySettleFails(t *testing.T) {
- t.Parallel()
-
- db := makeFwdPkgDB(t, "")
-
- shortChanID := lnwire.NewShortChanIDFromInt(1)
- packager := channeldb.NewChannelPackager(shortChanID)
-
- // To begin, there should be no forwarding packages on disk.
- fwdPkgs := loadFwdPkgs(t, db, packager)
- if len(fwdPkgs) != 0 {
- t.Fatalf("no forwarding packages should exist, found %d", len(fwdPkgs))
- }
-
- // Next, create and write a new forwarding package that only has add
- // htlcs.
- fwdPkg := channeldb.NewFwdPkg(shortChanID, 0, nil, settleFails)
-
- nSettleFails := len(settleFails)
-
- if err := kvdb.Update(db, func(tx kvdb.RwTx) er.R {
- return packager.AddFwdPkg(tx, fwdPkg)
- }, func() {}); err != nil {
- t.Fatalf("unable to add fwd pkg: %v", err)
- }
-
- // There should now be one fwdpkg on disk. Since no forwarding decision
- // has been written, we expect it to be FwdStateLockedIn. The package
- // has unacked add HTLCs, so the ack filter should not be full.
- fwdPkgs = loadFwdPkgs(t, db, packager)
- if len(fwdPkgs) != 1 {
- t.Fatalf("expected 1 fwdpkg, instead found %d", len(fwdPkgs))
- }
- assertFwdPkgState(t, fwdPkgs[0], channeldb.FwdStateLockedIn)
- assertFwdPkgNumAddsSettleFails(t, fwdPkgs[0], 0, nSettleFails)
- assertAckFilterIsFull(t, fwdPkgs[0], true)
-
- // Now, write the forwarding decision. Since we have not explicitly
- // added any adds to the fwdfilter, this would indicate that all of the
- // adds were 1) settled locally by this link (exit hop), or 2) the htlc
- // was failed locally.
- if err := kvdb.Update(db, func(tx kvdb.RwTx) er.R {
- return packager.SetFwdFilter(tx, fwdPkg.Height, fwdPkg.FwdFilter)
- }, func() {}); err != nil {
- t.Fatalf("unable to set fwdfiter: %v", err)
- }
-
- for i := range settleFails {
- // We should still have one package on disk. Since the
- // forwarding decision has been written, it will minimally be in
- // FwdStateProcessed. However, not all of the HTLCs have been
- // acked, so should not have advanced further.
- fwdPkgs = loadFwdPkgs(t, db, packager)
- if len(fwdPkgs) != 1 {
- t.Fatalf("expected 1 fwdpkg, instead found %d", len(fwdPkgs))
- }
- assertFwdPkgState(t, fwdPkgs[0], channeldb.FwdStateProcessed)
- assertFwdPkgNumAddsSettleFails(t, fwdPkgs[0], 0, nSettleFails)
- assertSettleFailFilterIsFull(t, fwdPkgs[0], false)
- assertAckFilterIsFull(t, fwdPkgs[0], true)
-
- failSettleRef := channeldb.SettleFailRef{
- Source: shortChanID,
- Height: fwdPkg.Height,
- Index: uint16(i),
- }
-
- if err := kvdb.Update(db, func(tx kvdb.RwTx) er.R {
- return packager.AckSettleFails(tx, failSettleRef)
- }, func() {}); err != nil {
- t.Fatalf("unable to ack add htlc: %v", err)
- }
- }
-
- // We should still have one package on disk. Now that all settles and
- // fails have been removed, package should be FwdStateCompleted since
- // there are no other add packets.
- fwdPkgs = loadFwdPkgs(t, db, packager)
- if len(fwdPkgs) != 1 {
- t.Fatalf("expected 1 fwdpkg, instead found %d", len(fwdPkgs))
- }
- assertFwdPkgState(t, fwdPkgs[0], channeldb.FwdStateCompleted)
- assertFwdPkgNumAddsSettleFails(t, fwdPkgs[0], 0, nSettleFails)
- assertSettleFailFilterIsFull(t, fwdPkgs[0], true)
- assertAckFilterIsFull(t, fwdPkgs[0], true)
-
- // Lastly, remove the completed forwarding package from disk.
- if err := kvdb.Update(db, func(tx kvdb.RwTx) er.R {
- return packager.RemovePkg(tx, fwdPkg.Height)
- }, func() {}); err != nil {
- t.Fatalf("unable to remove fwdpkg: %v", err)
- }
-
- // Check that the fwd package was actually removed.
- fwdPkgs = loadFwdPkgs(t, db, packager)
- if len(fwdPkgs) != 0 {
- t.Fatalf("no forwarding packages should exist, found %d", len(fwdPkgs))
- }
-}
-
-// TestPackagerAddsThenSettleFails writes a fwdpkg containing both adds and
-// settle/fails, then checks the behavior when the adds are acked before any of
-// the settle fails. Here we expect pkg to remain in FwdStateProcessed while the
-// remainder of the fail/settles are being deleted.
-func TestPackagerAddsThenSettleFails(t *testing.T) {
- t.Parallel()
-
- db := makeFwdPkgDB(t, "")
-
- shortChanID := lnwire.NewShortChanIDFromInt(1)
- packager := channeldb.NewChannelPackager(shortChanID)
-
- // To begin, there should be no forwarding packages on disk.
- fwdPkgs := loadFwdPkgs(t, db, packager)
- if len(fwdPkgs) != 0 {
- t.Fatalf("no forwarding packages should exist, found %d", len(fwdPkgs))
- }
-
- // Next, create and write a new forwarding package that only has add
- // htlcs.
- fwdPkg := channeldb.NewFwdPkg(shortChanID, 0, adds, settleFails)
-
- nAdds := len(adds)
- nSettleFails := len(settleFails)
-
- if err := kvdb.Update(db, func(tx kvdb.RwTx) er.R {
- return packager.AddFwdPkg(tx, fwdPkg)
- }, func() {}); err != nil {
- t.Fatalf("unable to add fwd pkg: %v", err)
- }
-
- // There should now be one fwdpkg on disk. Since no forwarding decision
- // has been written, we expect it to be FwdStateLockedIn. The package
- // has unacked add HTLCs, so the ack filter should not be full.
- fwdPkgs = loadFwdPkgs(t, db, packager)
- if len(fwdPkgs) != 1 {
- t.Fatalf("expected 1 fwdpkg, instead found %d", len(fwdPkgs))
- }
- assertFwdPkgState(t, fwdPkgs[0], channeldb.FwdStateLockedIn)
- assertFwdPkgNumAddsSettleFails(t, fwdPkgs[0], nAdds, nSettleFails)
- assertAckFilterIsFull(t, fwdPkgs[0], false)
-
- // Now, write the forwarding decision. Since we have not explicitly
- // added any adds to the fwdfilter, this would indicate that all of the
- // adds were 1) settled locally by this link (exit hop), or 2) the htlc
- // was failed locally.
- if err := kvdb.Update(db, func(tx kvdb.RwTx) er.R {
- return packager.SetFwdFilter(tx, fwdPkg.Height, fwdPkg.FwdFilter)
- }, func() {}); err != nil {
- t.Fatalf("unable to set fwdfiter: %v", err)
- }
-
- for i := range adds {
- // We should still have one package on disk. Since the forwarding
- // decision has been written, it will minimally be in FwdStateProcessed.
- // However not allf of the HTLCs have been acked, so should not
- // have advanced further.
- fwdPkgs = loadFwdPkgs(t, db, packager)
- if len(fwdPkgs) != 1 {
- t.Fatalf("expected 1 fwdpkg, instead found %d", len(fwdPkgs))
- }
- assertFwdPkgState(t, fwdPkgs[0], channeldb.FwdStateProcessed)
- assertFwdPkgNumAddsSettleFails(t, fwdPkgs[0], nAdds, nSettleFails)
- assertSettleFailFilterIsFull(t, fwdPkgs[0], false)
- assertAckFilterIsFull(t, fwdPkgs[0], false)
-
- addRef := channeldb.AddRef{
- Height: fwdPkg.Height,
- Index: uint16(i),
- }
-
- if err := kvdb.Update(db, func(tx kvdb.RwTx) er.R {
- return packager.AckAddHtlcs(tx, addRef)
- }, func() {}); err != nil {
- t.Fatalf("unable to ack add htlc: %v", err)
- }
- }
-
- for i := range settleFails {
- // We should still have one package on disk. Since the
- // forwarding decision has been written, it will minimally be in
- // FwdStateProcessed. However not allf of the HTLCs have been
- // acked, so should not have advanced further.
- fwdPkgs = loadFwdPkgs(t, db, packager)
- if len(fwdPkgs) != 1 {
- t.Fatalf("expected 1 fwdpkg, instead found %d", len(fwdPkgs))
- }
- assertFwdPkgState(t, fwdPkgs[0], channeldb.FwdStateProcessed)
- assertFwdPkgNumAddsSettleFails(t, fwdPkgs[0], nAdds, nSettleFails)
- assertSettleFailFilterIsFull(t, fwdPkgs[0], false)
- assertAckFilterIsFull(t, fwdPkgs[0], true)
-
- failSettleRef := channeldb.SettleFailRef{
- Source: shortChanID,
- Height: fwdPkg.Height,
- Index: uint16(i),
- }
-
- if err := kvdb.Update(db, func(tx kvdb.RwTx) er.R {
- return packager.AckSettleFails(tx, failSettleRef)
- }, func() {}); err != nil {
- t.Fatalf("unable to remove settle/fail htlc: %v", err)
- }
- }
-
- // We should still have one package on disk. Now that all settles and
- // fails have been removed, package should be FwdStateCompleted since
- // there are no other add packets.
- fwdPkgs = loadFwdPkgs(t, db, packager)
- if len(fwdPkgs) != 1 {
- t.Fatalf("expected 1 fwdpkg, instead found %d", len(fwdPkgs))
- }
- assertFwdPkgState(t, fwdPkgs[0], channeldb.FwdStateCompleted)
- assertFwdPkgNumAddsSettleFails(t, fwdPkgs[0], nAdds, nSettleFails)
- assertSettleFailFilterIsFull(t, fwdPkgs[0], true)
- assertAckFilterIsFull(t, fwdPkgs[0], true)
-
- // Lastly, remove the completed forwarding package from disk.
- if err := kvdb.Update(db, func(tx kvdb.RwTx) er.R {
- return packager.RemovePkg(tx, fwdPkg.Height)
- }, func() {}); err != nil {
- t.Fatalf("unable to remove fwdpkg: %v", err)
- }
-
- // Check that the fwd package was actually removed.
- fwdPkgs = loadFwdPkgs(t, db, packager)
- if len(fwdPkgs) != 0 {
- t.Fatalf("no forwarding packages should exist, found %d", len(fwdPkgs))
- }
-}
-
-// TestPackagerSettleFailsThenAdds writes a fwdpkg with both adds and
-// settle/fails, then checks the behavior when the settle/fails are removed
-// before any of the adds have been acked. This should cause the fwdpkg to
-// remain in FwdStateProcessed until the final ack is recorded, at which point
-// it should be promoted directly to FwdStateCompleted.since all adds have been
-// removed.
-func TestPackagerSettleFailsThenAdds(t *testing.T) {
- t.Parallel()
-
- db := makeFwdPkgDB(t, "")
-
- shortChanID := lnwire.NewShortChanIDFromInt(1)
- packager := channeldb.NewChannelPackager(shortChanID)
-
- // To begin, there should be no forwarding packages on disk.
- fwdPkgs := loadFwdPkgs(t, db, packager)
- if len(fwdPkgs) != 0 {
- t.Fatalf("no forwarding packages should exist, found %d", len(fwdPkgs))
- }
-
- // Next, create and write a new forwarding package that has both add
- // and settle/fail htlcs.
- fwdPkg := channeldb.NewFwdPkg(shortChanID, 0, adds, settleFails)
-
- nAdds := len(adds)
- nSettleFails := len(settleFails)
-
- if err := kvdb.Update(db, func(tx kvdb.RwTx) er.R {
- return packager.AddFwdPkg(tx, fwdPkg)
- }, func() {}); err != nil {
- t.Fatalf("unable to add fwd pkg: %v", err)
- }
-
- // There should now be one fwdpkg on disk. Since no forwarding decision
- // has been written, we expect it to be FwdStateLockedIn. The package
- // has unacked add HTLCs, so the ack filter should not be full.
- fwdPkgs = loadFwdPkgs(t, db, packager)
- if len(fwdPkgs) != 1 {
- t.Fatalf("expected 1 fwdpkg, instead found %d", len(fwdPkgs))
- }
- assertFwdPkgState(t, fwdPkgs[0], channeldb.FwdStateLockedIn)
- assertFwdPkgNumAddsSettleFails(t, fwdPkgs[0], nAdds, nSettleFails)
- assertAckFilterIsFull(t, fwdPkgs[0], false)
-
- // Now, write the forwarding decision. Since we have not explicitly
- // added any adds to the fwdfilter, this would indicate that all of the
- // adds were 1) settled locally by this link (exit hop), or 2) the htlc
- // was failed locally.
- if err := kvdb.Update(db, func(tx kvdb.RwTx) er.R {
- return packager.SetFwdFilter(tx, fwdPkg.Height, fwdPkg.FwdFilter)
- }, func() {}); err != nil {
- t.Fatalf("unable to set fwdfiter: %v", err)
- }
-
- // Simulate another channel deleting the settle/fails it received from
- // the original fwd pkg.
- // TODO(conner): use different packager/s?
- for i := range settleFails {
- // We should still have one package on disk. Since the
- // forwarding decision has been written, it will minimally be in
- // FwdStateProcessed. However none all of the add HTLCs have
- // been acked, so should not have advanced further.
- fwdPkgs = loadFwdPkgs(t, db, packager)
- if len(fwdPkgs) != 1 {
- t.Fatalf("expected 1 fwdpkg, instead found %d", len(fwdPkgs))
- }
- assertFwdPkgState(t, fwdPkgs[0], channeldb.FwdStateProcessed)
- assertFwdPkgNumAddsSettleFails(t, fwdPkgs[0], nAdds, nSettleFails)
- assertSettleFailFilterIsFull(t, fwdPkgs[0], false)
- assertAckFilterIsFull(t, fwdPkgs[0], false)
-
- failSettleRef := channeldb.SettleFailRef{
- Source: shortChanID,
- Height: fwdPkg.Height,
- Index: uint16(i),
- }
-
- if err := kvdb.Update(db, func(tx kvdb.RwTx) er.R {
- return packager.AckSettleFails(tx, failSettleRef)
- }, func() {}); err != nil {
- t.Fatalf("unable to remove settle/fail htlc: %v", err)
- }
- }
-
- // Now simulate this channel receiving a fail/settle for the adds in the
- // fwdpkg.
- for i := range adds {
- // Again, we should still have one package on disk and be in
- // FwdStateProcessed. This should not change until all of the
- // add htlcs have been acked.
- fwdPkgs = loadFwdPkgs(t, db, packager)
- if len(fwdPkgs) != 1 {
- t.Fatalf("expected 1 fwdpkg, instead found %d", len(fwdPkgs))
- }
- assertFwdPkgState(t, fwdPkgs[0], channeldb.FwdStateProcessed)
- assertFwdPkgNumAddsSettleFails(t, fwdPkgs[0], nAdds, nSettleFails)
- assertSettleFailFilterIsFull(t, fwdPkgs[0], true)
- assertAckFilterIsFull(t, fwdPkgs[0], false)
-
- addRef := channeldb.AddRef{
- Height: fwdPkg.Height,
- Index: uint16(i),
- }
-
- if err := kvdb.Update(db, func(tx kvdb.RwTx) er.R {
- return packager.AckAddHtlcs(tx, addRef)
- }, func() {}); err != nil {
- t.Fatalf("unable to ack add htlc: %v", err)
- }
- }
-
- // We should still have one package on disk. Now that all settles and
- // fails have been removed, package should be FwdStateCompleted since
- // there are no other add packets.
- fwdPkgs = loadFwdPkgs(t, db, packager)
- if len(fwdPkgs) != 1 {
- t.Fatalf("expected 1 fwdpkg, instead found %d", len(fwdPkgs))
- }
- assertFwdPkgState(t, fwdPkgs[0], channeldb.FwdStateCompleted)
- assertFwdPkgNumAddsSettleFails(t, fwdPkgs[0], nAdds, nSettleFails)
- assertSettleFailFilterIsFull(t, fwdPkgs[0], true)
- assertAckFilterIsFull(t, fwdPkgs[0], true)
-
- // Lastly, remove the completed forwarding package from disk.
- if err := kvdb.Update(db, func(tx kvdb.RwTx) er.R {
- return packager.RemovePkg(tx, fwdPkg.Height)
- }, func() {}); err != nil {
- t.Fatalf("unable to remove fwdpkg: %v", err)
- }
-
- // Check that the fwd package was actually removed.
- fwdPkgs = loadFwdPkgs(t, db, packager)
- if len(fwdPkgs) != 0 {
- t.Fatalf("no forwarding packages should exist, found %d", len(fwdPkgs))
- }
-}
-
-// assertFwdPkgState checks the current state of a fwdpkg meets our
-// expectations.
-func assertFwdPkgState(t *testing.T, fwdPkg *channeldb.FwdPkg,
- state channeldb.FwdState) {
- _, _, line, _ := runtime.Caller(1)
- if fwdPkg.State != state {
- t.Fatalf("line %d: expected fwdpkg in state %v, found %v",
- line, state, fwdPkg.State)
- }
-}
-
-// assertFwdPkgNumAddsSettleFails checks that the number of adds and
-// settle/fail log updates are correct.
-func assertFwdPkgNumAddsSettleFails(t *testing.T, fwdPkg *channeldb.FwdPkg,
- expectedNumAdds, expectedNumSettleFails int) {
- _, _, line, _ := runtime.Caller(1)
- if len(fwdPkg.Adds) != expectedNumAdds {
- t.Fatalf("line %d: expected fwdpkg to have %d adds, found %d",
- line, expectedNumAdds, len(fwdPkg.Adds))
- }
-
- if len(fwdPkg.SettleFails) != expectedNumSettleFails {
- t.Fatalf("line %d: expected fwdpkg to have %d settle/fails, found %d",
- line, expectedNumSettleFails, len(fwdPkg.SettleFails))
- }
-}
-
-// assertAckFilterIsFull checks whether or not a fwdpkg's ack filter matches our
-// expected full-ness.
-func assertAckFilterIsFull(t *testing.T, fwdPkg *channeldb.FwdPkg, expected bool) {
- _, _, line, _ := runtime.Caller(1)
- if fwdPkg.AckFilter.IsFull() != expected {
- t.Fatalf("line %d: expected fwdpkg ack filter IsFull to be %v, "+
- "found %v", line, expected, fwdPkg.AckFilter.IsFull())
- }
-}
-
-// assertSettleFailFilterIsFull checks whether or not a fwdpkg's settle fail
-// filter matches our expected full-ness.
-func assertSettleFailFilterIsFull(t *testing.T, fwdPkg *channeldb.FwdPkg, expected bool) {
- _, _, line, _ := runtime.Caller(1)
- if fwdPkg.SettleFailFilter.IsFull() != expected {
- t.Fatalf("line %d: expected fwdpkg settle/fail filter IsFull to be %v, "+
- "found %v", line, expected, fwdPkg.SettleFailFilter.IsFull())
- }
-}
-
-// loadFwdPkgs is a helper method that reads all forwarding packages for a
-// particular packager.
-func loadFwdPkgs(t *testing.T, db kvdb.Backend,
- packager channeldb.FwdPackager) []*channeldb.FwdPkg {
-
- var fwdPkgs []*channeldb.FwdPkg
- if err := kvdb.View(db, func(tx kvdb.RTx) er.R {
- var err er.R
- fwdPkgs, err = packager.LoadFwdPkgs(tx)
- return err
- }, func() {
- fwdPkgs = nil
- }); err != nil {
- t.Fatalf("unable to load fwd pkgs: %v", err)
- }
-
- return fwdPkgs
-}
-
-// makeFwdPkgDB initializes a test database for forwarding packages. If the
-// provided path is an empty, it will create a temp dir/file to use.
-func makeFwdPkgDB(t *testing.T, path string) kvdb.Backend { // nolint:unparam
- if path == "" {
- var err error
- path, err = ioutil.TempDir("", "fwdpkgdb")
- if err != nil {
- t.Fatalf("unable to create temp path: %v", err)
- }
-
- path = filepath.Join(path, "fwdpkg.db")
- }
-
- bdb, err := kvdb.Create(kvdb.BoltBackendName, path, true)
- if err != nil {
- t.Fatalf("unable to open boltdb: %v", err)
- }
-
- return bdb
-}
diff --git a/lnd/channeldb/graph.go b/lnd/channeldb/graph.go
deleted file mode 100644
index 5f6b16d3..00000000
--- a/lnd/channeldb/graph.go
+++ /dev/null
@@ -1,4120 +0,0 @@
-package channeldb
-
-import (
- "bytes"
- "crypto/sha256"
- "encoding/binary"
- "fmt"
- "image/color"
- "io"
- "math"
- "net"
- "sync"
- "time"
-
- "github.com/pkt-cash/pktd/btcec"
- "github.com/pkt-cash/pktd/btcutil"
- "github.com/pkt-cash/pktd/btcutil/er"
- "github.com/pkt-cash/pktd/btcutil/util"
- "github.com/pkt-cash/pktd/chaincfg/chainhash"
- "github.com/pkt-cash/pktd/lnd/channeldb/kvdb"
- "github.com/pkt-cash/pktd/lnd/lnwire"
- "github.com/pkt-cash/pktd/lnd/routing/route"
- "github.com/pkt-cash/pktd/pktlog/log"
- "github.com/pkt-cash/pktd/txscript/opcode"
- "github.com/pkt-cash/pktd/txscript/scriptbuilder"
- "github.com/pkt-cash/pktd/wire"
-)
-
-var (
- // nodeBucket is a bucket which houses all the vertices or nodes within
- // the channel graph. This bucket has a single-sub bucket which adds an
- // additional index from pubkey -> alias. Within the top-level of this
- // bucket, the key space maps a node's compressed public key to the
- // serialized information for that node. Additionally, there's a
- // special key "source" which stores the pubkey of the source node. The
- // source node is used as the starting point for all graph/queries and
- // traversals. The graph is formed as a star-graph with the source node
- // at the center.
- //
- // maps: pubKey -> nodeInfo
- // maps: source -> selfPubKey
- nodeBucket = []byte("graph-node")
-
- // nodeUpdateIndexBucket is a sub-bucket of the nodeBucket. This bucket
- // will be used to quickly look up the "freshness" of a node's last
- // update to the network. The bucket only contains keys, and no values,
- // it's mapping:
- //
- // maps: updateTime || nodeID -> nil
- nodeUpdateIndexBucket = []byte("graph-node-update-index")
-
- // sourceKey is a special key that resides within the nodeBucket. The
- // sourceKey maps a key to the public key of the "self node".
- sourceKey = []byte("source")
-
- // aliasIndexBucket is a sub-bucket that's nested within the main
- // nodeBucket. This bucket maps the public key of a node to its
- // current alias. This bucket is provided as it can be used within a
- // future UI layer to add an additional degree of confirmation.
- aliasIndexBucket = []byte("alias")
-
- // edgeBucket is a bucket which houses all of the edge or channel
- // information within the channel graph. This bucket essentially acts
- // as an adjacency list, which in conjunction with a range scan, can be
- // used to iterate over all the incoming and outgoing edges for a
- // particular node. Key in the bucket use a prefix scheme which leads
- // with the node's public key and sends with the compact edge ID.
- // For each chanID, there will be two entries within the bucket, as the
- // graph is directed: nodes may have different policies w.r.t to fees
- // for their respective directions.
- //
- // maps: pubKey || chanID -> channel edge policy for node
- edgeBucket = []byte("graph-edge")
-
- // unknownPolicy is represented as an empty slice. It is
- // used as the value in edgeBucket for unknown channel edge policies.
- // Unknown policies are still stored in the database to enable efficient
- // lookup of incoming channel edges.
- unknownPolicy = []byte{}
-
- // chanStart is an array of all zero bytes which is used to perform
- // range scans within the edgeBucket to obtain all of the outgoing
- // edges for a particular node.
- chanStart [8]byte
-
- // edgeIndexBucket is an index which can be used to iterate all edges
- // in the bucket, grouping them according to their in/out nodes.
- // Additionally, the items in this bucket also contain the complete
- // edge information for a channel. The edge information includes the
- // capacity of the channel, the nodes that made the channel, etc. This
- // bucket resides within the edgeBucket above. Creation of an edge
- // proceeds in two phases: first the edge is added to the edge index,
- // afterwards the edgeBucket can be updated with the latest details of
- // the edge as they are announced on the network.
- //
- // maps: chanID -> pubKey1 || pubKey2 || restofEdgeInfo
- edgeIndexBucket = []byte("edge-index")
-
- // edgeUpdateIndexBucket is a sub-bucket of the main edgeBucket. This
- // bucket contains an index which allows us to gauge the "freshness" of
- // a channel's last updates.
- //
- // maps: updateTime || chanID -> nil
- edgeUpdateIndexBucket = []byte("edge-update-index")
-
- // channelPointBucket maps a channel's full outpoint (txid:index) to
- // its short 8-byte channel ID. This bucket resides within the
- // edgeBucket above, and can be used to quickly remove an edge due to
- // the outpoint being spent, or to query for existence of a channel.
- //
- // maps: outPoint -> chanID
- channelPointBucket = []byte("chan-index")
-
- // zombieBucket is a sub-bucket of the main edgeBucket bucket
- // responsible for maintaining an index of zombie channels. Each entry
- // exists within the bucket as follows:
- //
- // maps: chanID -> pubKey1 || pubKey2
- //
- // The chanID represents the channel ID of the edge that is marked as a
- // zombie and is used as the key, which maps to the public keys of the
- // edge's participants.
- zombieBucket = []byte("zombie-index")
-
- // disabledEdgePolicyBucket is a sub-bucket of the main edgeBucket bucket
- // responsible for maintaining an index of disabled edge policies. Each
- // entry exists within the bucket as follows:
- //
- // maps: -> []byte{}
- //
- // The chanID represents the channel ID of the edge and the direction is
- // one byte representing the direction of the edge. The main purpose of
- // this index is to allow pruning disabled channels in a fast way without
- // the need to iterate all over the graph.
- disabledEdgePolicyBucket = []byte("disabled-edge-policy-index")
-
- // graphMetaBucket is a top-level bucket which stores various meta-deta
- // related to the on-disk channel graph. Data stored in this bucket
- // includes the block to which the graph has been synced to, the total
- // number of channels, etc.
- graphMetaBucket = []byte("graph-meta")
-
- // pruneLogBucket is a bucket within the graphMetaBucket that stores
- // a mapping from the block height to the hash for the blocks used to
- // prune the graph.
- // Once a new block is discovered, any channels that have been closed
- // (by spending the outpoint) can safely be removed from the graph, and
- // the block is added to the prune log. We need to keep such a log for
- // the case where a reorg happens, and we must "rewind" the state of the
- // graph by removing channels that were previously confirmed. In such a
- // case we'll remove all entries from the prune log with a block height
- // that no longer exists.
- pruneLogBucket = []byte("prune-log")
-)
-
-const (
- // MaxAllowedExtraOpaqueBytes is the largest amount of opaque bytes that
- // we'll permit to be written to disk. We limit this as otherwise, it
- // would be possible for a node to create a ton of updates and slowly
- // fill our disk, and also waste bandwidth due to relaying.
- MaxAllowedExtraOpaqueBytes = 10000
-
- // feeRateParts is the total number of parts used to express fee rates.
- feeRateParts = 1e6
-)
-
-// ChannelGraph is a persistent, on-disk graph representation of the Lightning
-// Network. This struct can be used to implement path finding algorithms on top
-// of, and also to update a node's view based on information received from the
-// p2p network. Internally, the graph is stored using a modified adjacency list
-// representation with some added object interaction possible with each
-// serialized edge/node. The graph is stored is directed, meaning that are two
-// edges stored for each channel: an inbound/outbound edge for each node pair.
-// Nodes, edges, and edge information can all be added to the graph
-// independently. Edge removal results in the deletion of all edge information
-// for that edge.
-type ChannelGraph struct {
- db *DB
-
- cacheMu sync.RWMutex
- rejectCache *rejectCache
- chanCache *channelCache
-}
-
-// newChannelGraph allocates a new ChannelGraph backed by a DB instance. The
-// returned instance has its own unique reject cache and channel cache.
-func newChannelGraph(db *DB, rejectCacheSize, chanCacheSize int) *ChannelGraph {
- return &ChannelGraph{
- db: db,
- rejectCache: newRejectCache(rejectCacheSize),
- chanCache: newChannelCache(chanCacheSize),
- }
-}
-
-// Database returns a pointer to the underlying database.
-func (c *ChannelGraph) Database() *DB {
- return c.db
-}
-
-// ForEachChannel iterates through all the channel edges stored within the
-// graph and invokes the passed callback for each edge. The callback takes two
-// edges as since this is a directed graph, both the in/out edges are visited.
-// If the callback returns an error, then the transaction is aborted and the
-// iteration stops early.
-//
-// NOTE: If an edge can't be found, or wasn't advertised, then a nil pointer
-// for that particular channel edge routing policy will be passed into the
-// callback.
-func (c *ChannelGraph) ForEachChannel(cb func(*ChannelEdgeInfo, *ChannelEdgePolicy, *ChannelEdgePolicy) er.R) er.R {
- // TODO(roasbeef): ptr map to reduce # of allocs? no duplicates
-
- return kvdb.View(c.db, func(tx kvdb.RTx) er.R {
- // First, grab the node bucket. This will be used to populate
- // the Node pointers in each edge read from disk.
- nodes := tx.ReadBucket(nodeBucket)
- if nodes == nil {
- return ErrGraphNotFound.Default()
- }
-
- // Next, grab the edge bucket which stores the edges, and also
- // the index itself so we can group the directed edges together
- // logically.
- edges := tx.ReadBucket(edgeBucket)
- if edges == nil {
- return ErrGraphNoEdgesFound.Default()
- }
- edgeIndex := edges.NestedReadBucket(edgeIndexBucket)
- if edgeIndex == nil {
- return ErrGraphNoEdgesFound.Default()
- }
-
- // For each edge pair within the edge index, we fetch each edge
- // itself and also the node information in order to fully
- // populated the object.
- return edgeIndex.ForEach(func(chanID, edgeInfoBytes []byte) er.R {
- infoReader := bytes.NewReader(edgeInfoBytes)
- edgeInfo, err := deserializeChanEdgeInfo(infoReader)
- if err != nil {
- return err
- }
- edgeInfo.db = c.db
-
- edge1, edge2, err := fetchChanEdgePolicies(
- edgeIndex, edges, nodes, chanID, c.db,
- )
- if err != nil {
- return err
- }
-
- // With both edges read, execute the call back. IF this
- // function returns an error then the transaction will
- // be aborted.
- return cb(&edgeInfo, edge1, edge2)
- })
- }, func() {})
-}
-
-// ForEachNodeChannel iterates through all channels of a given node, executing the
-// passed callback with an edge info structure and the policies of each end
-// of the channel. The first edge policy is the outgoing edge *to* the
-// the connecting node, while the second is the incoming edge *from* the
-// connecting node. If the callback returns an error, then the iteration is
-// halted with the error propagated back up to the caller.
-//
-// Unknown policies are passed into the callback as nil values.
-//
-// If the caller wishes to re-use an existing boltdb transaction, then it
-// should be passed as the first argument. Otherwise the first argument should
-// be nil and a fresh transaction will be created to execute the graph
-// traversal.
-func (c *ChannelGraph) ForEachNodeChannel(tx kvdb.RTx, nodePub []byte,
- cb func(kvdb.RTx, *ChannelEdgeInfo, *ChannelEdgePolicy,
- *ChannelEdgePolicy) er.R) er.R {
-
- db := c.db
-
- return nodeTraversal(tx, nodePub, db, cb)
-}
-
-// DisabledChannelIDs returns the channel ids of disabled channels.
-// A channel is disabled when two of the associated ChanelEdgePolicies
-// have their disabled bit on.
-func (c *ChannelGraph) DisabledChannelIDs() ([]uint64, er.R) {
- var disabledChanIDs []uint64
- var chanEdgeFound map[uint64]struct{}
-
- err := kvdb.View(c.db, func(tx kvdb.RTx) er.R {
- edges := tx.ReadBucket(edgeBucket)
- if edges == nil {
- return ErrGraphNoEdgesFound.Default()
- }
-
- disabledEdgePolicyIndex := edges.NestedReadBucket(
- disabledEdgePolicyBucket,
- )
- if disabledEdgePolicyIndex == nil {
- return nil
- }
-
- // We iterate over all disabled policies and we add each channel that
- // has more than one disabled policy to disabledChanIDs array.
- return disabledEdgePolicyIndex.ForEach(func(k, v []byte) er.R {
- chanID := byteOrder.Uint64(k[:8])
- _, edgeFound := chanEdgeFound[chanID]
- if edgeFound {
- delete(chanEdgeFound, chanID)
- disabledChanIDs = append(disabledChanIDs, chanID)
- return nil
- }
-
- chanEdgeFound[chanID] = struct{}{}
- return nil
- })
- }, func() {
- disabledChanIDs = nil
- chanEdgeFound = make(map[uint64]struct{})
- })
- if err != nil {
- return nil, err
- }
-
- return disabledChanIDs, nil
-}
-
-// ForEachNode iterates through all the stored vertices/nodes in the graph,
-// executing the passed callback with each node encountered. If the callback
-// returns an error, then the transaction is aborted and the iteration stops
-// early.
-//
-// TODO(roasbeef): add iterator interface to allow for memory efficient graph
-// traversal when graph gets mega
-func (c *ChannelGraph) ForEachNode(cb func(kvdb.RTx, *LightningNode) er.R) er.R { // nolint:interfacer
- traversal := func(tx kvdb.RTx) er.R {
- // First grab the nodes bucket which stores the mapping from
- // pubKey to node information.
- nodes := tx.ReadBucket(nodeBucket)
- if nodes == nil {
- return ErrGraphNotFound.Default()
- }
-
- return nodes.ForEach(func(pubKey, nodeBytes []byte) er.R {
- // If this is the source key, then we skip this
- // iteration as the value for this key is a pubKey
- // rather than raw node information.
- if bytes.Equal(pubKey, sourceKey) || len(pubKey) != 33 {
- return nil
- }
-
- nodeReader := bytes.NewReader(nodeBytes)
- node, err := deserializeLightningNode(nodeReader)
- if err != nil {
- return err
- }
- node.db = c.db
-
- // Execute the callback, the transaction will abort if
- // this returns an error.
- return cb(tx, &node)
- })
- }
-
- return kvdb.View(c.db, traversal, func() {})
-}
-
-// SourceNode returns the source node of the graph. The source node is treated
-// as the center node within a star-graph. This method may be used to kick off
-// a path finding algorithm in order to explore the reachability of another
-// node based off the source node.
-func (c *ChannelGraph) SourceNode() (*LightningNode, er.R) {
- var source *LightningNode
- err := kvdb.View(c.db, func(tx kvdb.RTx) er.R {
- // First grab the nodes bucket which stores the mapping from
- // pubKey to node information.
- nodes := tx.ReadBucket(nodeBucket)
- if nodes == nil {
- return ErrGraphNotFound.Default()
- }
-
- node, err := c.sourceNode(nodes)
- if err != nil {
- return err
- }
- source = node
-
- return nil
- }, func() {
- source = nil
- })
- if err != nil {
- return nil, err
- }
-
- return source, nil
-}
-
-// sourceNode uses an existing database transaction and returns the source node
-// of the graph. The source node is treated as the center node within a
-// star-graph. This method may be used to kick off a path finding algorithm in
-// order to explore the reachability of another node based off the source node.
-func (c *ChannelGraph) sourceNode(nodes kvdb.RBucket) (*LightningNode, er.R) {
- selfPub := nodes.Get(sourceKey)
- if selfPub == nil {
- return nil, ErrSourceNodeNotSet.Default()
- }
-
- // With the pubKey of the source node retrieved, we're able to
- // fetch the full node information.
- node, err := fetchLightningNode(nodes, selfPub)
- if err != nil {
- return nil, err
- }
- node.db = c.db
-
- return &node, nil
-}
-
-// SetSourceNode sets the source node within the graph database. The source
-// node is to be used as the center of a star-graph within path finding
-// algorithms.
-func (c *ChannelGraph) SetSourceNode(node *LightningNode) er.R {
- nodePubBytes := node.PubKeyBytes[:]
-
- return kvdb.Update(c.db, func(tx kvdb.RwTx) er.R {
- // First grab the nodes bucket which stores the mapping from
- // pubKey to node information.
- nodes, err := tx.CreateTopLevelBucket(nodeBucket)
- if err != nil {
- return err
- }
-
- // Next we create the mapping from source to the targeted
- // public key.
- if err := nodes.Put(sourceKey, nodePubBytes); err != nil {
- return err
- }
-
- // Finally, we commit the information of the lightning node
- // itself.
- return addLightningNode(tx, node)
- }, func() {})
-}
-
-// AddLightningNode adds a vertex/node to the graph database. If the node is not
-// in the database from before, this will add a new, unconnected one to the
-// graph. If it is present from before, this will update that node's
-// information. Note that this method is expected to only be called to update
-// an already present node from a node announcement, or to insert a node found
-// in a channel update.
-//
-// TODO(roasbeef): also need sig of announcement
-func (c *ChannelGraph) AddLightningNode(node *LightningNode) er.R {
- return kvdb.Update(c.db, func(tx kvdb.RwTx) er.R {
- return addLightningNode(tx, node)
- }, func() {})
-}
-
-func addLightningNode(tx kvdb.RwTx, node *LightningNode) er.R {
- nodes, err := tx.CreateTopLevelBucket(nodeBucket)
- if err != nil {
- return err
- }
-
- aliases, err := nodes.CreateBucketIfNotExists(aliasIndexBucket)
- if err != nil {
- return err
- }
-
- updateIndex, err := nodes.CreateBucketIfNotExists(
- nodeUpdateIndexBucket,
- )
- if err != nil {
- return err
- }
-
- return putLightningNode(nodes, aliases, updateIndex, node)
-}
-
-// LookupAlias attempts to return the alias as advertised by the target node.
-// TODO(roasbeef): currently assumes that aliases are unique...
-func (c *ChannelGraph) LookupAlias(pub *btcec.PublicKey) (string, er.R) {
- var alias string
-
- err := kvdb.View(c.db, func(tx kvdb.RTx) er.R {
- nodes := tx.ReadBucket(nodeBucket)
- if nodes == nil {
- return ErrGraphNodesNotFound.Default()
- }
-
- aliases := nodes.NestedReadBucket(aliasIndexBucket)
- if aliases == nil {
- return ErrGraphNodesNotFound.Default()
- }
-
- nodePub := pub.SerializeCompressed()
- a := aliases.Get(nodePub)
- if a == nil {
- return ErrNodeAliasNotFound.Default()
- }
-
- // TODO(roasbeef): should actually be using the utf-8
- // package...
- alias = string(a)
- return nil
- }, func() {
- alias = ""
- })
- if err != nil {
- return "", err
- }
-
- return alias, nil
-}
-
-// DeleteLightningNode starts a new database transaction to remove a vertex/node
-// from the database according to the node's public key.
-func (c *ChannelGraph) DeleteLightningNode(nodePub route.Vertex) er.R {
- // TODO(roasbeef): ensure dangling edges are removed...
- return kvdb.Update(c.db, func(tx kvdb.RwTx) er.R {
- nodes := tx.ReadWriteBucket(nodeBucket)
- if nodes == nil {
- return ErrGraphNodeNotFound.Default()
- }
-
- return c.deleteLightningNode(nodes, nodePub[:])
- }, func() {})
-}
-
-// deleteLightningNode uses an existing database transaction to remove a
-// vertex/node from the database according to the node's public key.
-func (c *ChannelGraph) deleteLightningNode(nodes kvdb.RwBucket,
- compressedPubKey []byte) er.R {
-
- aliases := nodes.NestedReadWriteBucket(aliasIndexBucket)
- if aliases == nil {
- return ErrGraphNodesNotFound.Default()
- }
-
- if err := aliases.Delete(compressedPubKey); err != nil {
- return err
- }
-
- // Before we delete the node, we'll fetch its current state so we can
- // determine when its last update was to clear out the node update
- // index.
- node, err := fetchLightningNode(nodes, compressedPubKey)
- if err != nil {
- return err
- }
-
- if err := nodes.Delete(compressedPubKey); err != nil {
-
- return err
- }
-
- // Finally, we'll delete the index entry for the node within the
- // nodeUpdateIndexBucket as this node is no longer active, so we don't
- // need to track its last update.
- nodeUpdateIndex := nodes.NestedReadWriteBucket(nodeUpdateIndexBucket)
- if nodeUpdateIndex == nil {
- return ErrGraphNodesNotFound.Default()
- }
-
- // In order to delete the entry, we'll need to reconstruct the key for
- // its last update.
- updateUnix := uint64(node.LastUpdate.Unix())
- var indexKey [8 + 33]byte
- byteOrder.PutUint64(indexKey[:8], updateUnix)
- copy(indexKey[8:], compressedPubKey)
-
- return nodeUpdateIndex.Delete(indexKey[:])
-}
-
-// AddChannelEdge adds a new (undirected, blank) edge to the graph database. An
-// undirected edge from the two target nodes are created. The information
-// stored denotes the static attributes of the channel, such as the channelID,
-// the keys involved in creation of the channel, and the set of features that
-// the channel supports. The chanPoint and chanID are used to uniquely identify
-// the edge globally within the database.
-func (c *ChannelGraph) AddChannelEdge(edge *ChannelEdgeInfo) er.R {
- c.cacheMu.Lock()
- defer c.cacheMu.Unlock()
-
- err := kvdb.Update(c.db, func(tx kvdb.RwTx) er.R {
- return c.addChannelEdge(tx, edge)
- }, func() {})
- if err != nil {
- return err
- }
-
- c.rejectCache.remove(edge.ChannelID)
- c.chanCache.remove(edge.ChannelID)
-
- return nil
-}
-
-// addChannelEdge is the private form of AddChannelEdge that allows callers to
-// utilize an existing db transaction.
-func (c *ChannelGraph) addChannelEdge(tx kvdb.RwTx, edge *ChannelEdgeInfo) er.R {
- // Construct the channel's primary key which is the 8-byte channel ID.
- var chanKey [8]byte
- binary.BigEndian.PutUint64(chanKey[:], edge.ChannelID)
-
- nodes, err := tx.CreateTopLevelBucket(nodeBucket)
- if err != nil {
- return err
- }
- edges, err := tx.CreateTopLevelBucket(edgeBucket)
- if err != nil {
- return err
- }
- edgeIndex, err := edges.CreateBucketIfNotExists(edgeIndexBucket)
- if err != nil {
- return err
- }
- chanIndex, err := edges.CreateBucketIfNotExists(channelPointBucket)
- if err != nil {
- return err
- }
-
- // First, attempt to check if this edge has already been created. If
- // so, then we can exit early as this method is meant to be idempotent.
- if edgeInfo := edgeIndex.Get(chanKey[:]); edgeInfo != nil {
- return ErrEdgeAlreadyExist.Default()
- }
-
- // Before we insert the channel into the database, we'll ensure that
- // both nodes already exist in the channel graph. If either node
- // doesn't, then we'll insert a "shell" node that just includes its
- // public key, so subsequent validation and queries can work properly.
- _, node1Err := fetchLightningNode(nodes, edge.NodeKey1Bytes[:])
- switch {
- case ErrGraphNodeNotFound.Is(node1Err):
- node1Shell := LightningNode{
- PubKeyBytes: edge.NodeKey1Bytes,
- HaveNodeAnnouncement: false,
- }
- err := addLightningNode(tx, &node1Shell)
- if err != nil {
- return er.Errorf("unable to create shell node "+
- "for: %x", edge.NodeKey1Bytes)
-
- }
- case node1Err != nil:
- return err
- }
-
- _, node2Err := fetchLightningNode(nodes, edge.NodeKey2Bytes[:])
- switch {
- case ErrGraphNodeNotFound.Is(node2Err):
- node2Shell := LightningNode{
- PubKeyBytes: edge.NodeKey2Bytes,
- HaveNodeAnnouncement: false,
- }
- err := addLightningNode(tx, &node2Shell)
- if err != nil {
- return er.Errorf("unable to create shell node "+
- "for: %x", edge.NodeKey2Bytes)
-
- }
- case node2Err != nil:
- return err
- }
-
- // If the edge hasn't been created yet, then we'll first add it to the
- // edge index in order to associate the edge between two nodes and also
- // store the static components of the channel.
- if err := putChanEdgeInfo(edgeIndex, edge, chanKey); err != nil {
- return err
- }
-
- // Mark edge policies for both sides as unknown. This is to enable
- // efficient incoming channel lookup for a node.
- for _, key := range []*[33]byte{&edge.NodeKey1Bytes,
- &edge.NodeKey2Bytes} {
-
- err := putChanEdgePolicyUnknown(edges, edge.ChannelID,
- key[:])
- if err != nil {
- return err
- }
- }
-
- // Finally we add it to the channel index which maps channel points
- // (outpoints) to the shorter channel ID's.
- var b bytes.Buffer
- if err := writeOutpoint(&b, &edge.ChannelPoint); err != nil {
- return err
- }
- return chanIndex.Put(b.Bytes(), chanKey[:])
-}
-
-// HasChannelEdge returns true if the database knows of a channel edge with the
-// passed channel ID, and false otherwise. If an edge with that ID is found
-// within the graph, then two time stamps representing the last time the edge
-// was updated for both directed edges are returned along with the boolean. If
-// it is not found, then the zombie index is checked and its result is returned
-// as the second boolean.
-func (c *ChannelGraph) HasChannelEdge(
- chanID uint64) (time.Time, time.Time, bool, bool, er.R) {
-
- var (
- upd1Time time.Time
- upd2Time time.Time
- exists bool
- isZombie bool
- )
-
- // We'll query the cache with the shared lock held to allow multiple
- // readers to access values in the cache concurrently if they exist.
- c.cacheMu.RLock()
- if entry, ok := c.rejectCache.get(chanID); ok {
- c.cacheMu.RUnlock()
- upd1Time = time.Unix(entry.upd1Time, 0)
- upd2Time = time.Unix(entry.upd2Time, 0)
- exists, isZombie = entry.flags.unpack()
- return upd1Time, upd2Time, exists, isZombie, nil
- }
- c.cacheMu.RUnlock()
-
- c.cacheMu.Lock()
- defer c.cacheMu.Unlock()
-
- // The item was not found with the shared lock, so we'll acquire the
- // exclusive lock and check the cache again in case another method added
- // the entry to the cache while no lock was held.
- if entry, ok := c.rejectCache.get(chanID); ok {
- upd1Time = time.Unix(entry.upd1Time, 0)
- upd2Time = time.Unix(entry.upd2Time, 0)
- exists, isZombie = entry.flags.unpack()
- return upd1Time, upd2Time, exists, isZombie, nil
- }
-
- if err := kvdb.View(c.db, func(tx kvdb.RTx) er.R {
- edges := tx.ReadBucket(edgeBucket)
- if edges == nil {
- return ErrGraphNoEdgesFound.Default()
- }
- edgeIndex := edges.NestedReadBucket(edgeIndexBucket)
- if edgeIndex == nil {
- return ErrGraphNoEdgesFound.Default()
- }
-
- var channelID [8]byte
- byteOrder.PutUint64(channelID[:], chanID)
-
- // If the edge doesn't exist, then we'll also check our zombie
- // index.
- if edgeIndex.Get(channelID[:]) == nil {
- exists = false
- zombieIndex := edges.NestedReadBucket(zombieBucket)
- if zombieIndex != nil {
- isZombie, _, _ = isZombieEdge(
- zombieIndex, chanID,
- )
- }
-
- return nil
- }
-
- exists = true
- isZombie = false
-
- // If the channel has been found in the graph, then retrieve
- // the edges itself so we can return the last updated
- // timestamps.
- nodes := tx.ReadBucket(nodeBucket)
- if nodes == nil {
- return ErrGraphNodeNotFound.Default()
- }
-
- e1, e2, err := fetchChanEdgePolicies(edgeIndex, edges, nodes,
- channelID[:], c.db)
- if err != nil {
- return err
- }
-
- // As we may have only one of the edges populated, only set the
- // update time if the edge was found in the database.
- if e1 != nil {
- upd1Time = e1.LastUpdate
- }
- if e2 != nil {
- upd2Time = e2.LastUpdate
- }
-
- return nil
- }, func() {}); err != nil {
- return time.Time{}, time.Time{}, exists, isZombie, err
- }
-
- c.rejectCache.insert(chanID, rejectCacheEntry{
- upd1Time: upd1Time.Unix(),
- upd2Time: upd2Time.Unix(),
- flags: packRejectFlags(exists, isZombie),
- })
-
- return upd1Time, upd2Time, exists, isZombie, nil
-}
-
-// UpdateChannelEdge retrieves and update edge of the graph database. Method
-// only reserved for updating an edge info after its already been created.
-// In order to maintain this constraints, we return an error in the scenario
-// that an edge info hasn't yet been created yet, but someone attempts to update
-// it.
-func (c *ChannelGraph) UpdateChannelEdge(edge *ChannelEdgeInfo) er.R {
- // Construct the channel's primary key which is the 8-byte channel ID.
- var chanKey [8]byte
- binary.BigEndian.PutUint64(chanKey[:], edge.ChannelID)
-
- return kvdb.Update(c.db, func(tx kvdb.RwTx) er.R {
- edges := tx.ReadWriteBucket(edgeBucket)
- if edge == nil {
- return ErrEdgeNotFound.Default()
- }
-
- edgeIndex := edges.NestedReadWriteBucket(edgeIndexBucket)
- if edgeIndex == nil {
- return ErrEdgeNotFound.Default()
- }
-
- if edgeInfo := edgeIndex.Get(chanKey[:]); edgeInfo == nil {
- return ErrEdgeNotFound.Default()
- }
-
- return putChanEdgeInfo(edgeIndex, edge, chanKey)
- }, func() {})
-}
-
-const (
- // pruneTipBytes is the total size of the value which stores a prune
- // entry of the graph in the prune log. The "prune tip" is the last
- // entry in the prune log, and indicates if the channel graph is in
- // sync with the current UTXO state. The structure of the value
- // is: blockHash, taking 32 bytes total.
- pruneTipBytes = 32
-)
-
-// PruneGraph prunes newly closed channels from the channel graph in response
-// to a new block being solved on the network. Any transactions which spend the
-// funding output of any known channels within he graph will be deleted.
-// Additionally, the "prune tip", or the last block which has been used to
-// prune the graph is stored so callers can ensure the graph is fully in sync
-// with the current UTXO state. A slice of channels that have been closed by
-// the target block are returned if the function succeeds without error.
-func (c *ChannelGraph) PruneGraph(spentOutputs []*wire.OutPoint,
- blockHash *chainhash.Hash, blockHeight uint32) ([]*ChannelEdgeInfo, er.R) {
-
- c.cacheMu.Lock()
- defer c.cacheMu.Unlock()
-
- var chansClosed []*ChannelEdgeInfo
-
- err := kvdb.Update(c.db, func(tx kvdb.RwTx) er.R {
- // First grab the edges bucket which houses the information
- // we'd like to delete
- edges, err := tx.CreateTopLevelBucket(edgeBucket)
- if err != nil {
- return err
- }
-
- // Next grab the two edge indexes which will also need to be updated.
- edgeIndex, err := edges.CreateBucketIfNotExists(edgeIndexBucket)
- if err != nil {
- return err
- }
- chanIndex, err := edges.CreateBucketIfNotExists(channelPointBucket)
- if err != nil {
- return err
- }
- nodes := tx.ReadWriteBucket(nodeBucket)
- if nodes == nil {
- return ErrSourceNodeNotSet.Default()
- }
- zombieIndex, err := edges.CreateBucketIfNotExists(zombieBucket)
- if err != nil {
- return err
- }
-
- // For each of the outpoints that have been spent within the
- // block, we attempt to delete them from the graph as if that
- // outpoint was a channel, then it has now been closed.
- for _, chanPoint := range spentOutputs {
- // TODO(roasbeef): load channel bloom filter, continue
- // if NOT if filter
-
- var opBytes bytes.Buffer
- if err := writeOutpoint(&opBytes, chanPoint); err != nil {
- return err
- }
-
- // First attempt to see if the channel exists within
- // the database, if not, then we can exit early.
- chanID := chanIndex.Get(opBytes.Bytes())
- if chanID == nil {
- continue
- }
-
- // However, if it does, then we'll read out the full
- // version so we can add it to the set of deleted
- // channels.
- edgeInfo, err := fetchChanEdgeInfo(edgeIndex, chanID)
- if err != nil {
- return err
- }
-
- // Attempt to delete the channel, an ErrEdgeNotFound
- // will be returned if that outpoint isn't known to be
- // a channel. If no error is returned, then a channel
- // was successfully pruned.
- err = delChannelEdge(
- edges, edgeIndex, chanIndex, zombieIndex, nodes,
- chanID, false,
- )
- if err != nil && !ErrEdgeNotFound.Is(err) {
- return err
- }
-
- chansClosed = append(chansClosed, &edgeInfo)
- }
-
- metaBucket, err := tx.CreateTopLevelBucket(graphMetaBucket)
- if err != nil {
- return err
- }
-
- pruneBucket, err := metaBucket.CreateBucketIfNotExists(pruneLogBucket)
- if err != nil {
- return err
- }
-
- // With the graph pruned, add a new entry to the prune log,
- // which can be used to check if the graph is fully synced with
- // the current UTXO state.
- var blockHeightBytes [4]byte
- byteOrder.PutUint32(blockHeightBytes[:], blockHeight)
-
- var newTip [pruneTipBytes]byte
- copy(newTip[:], blockHash[:])
-
- err = pruneBucket.Put(blockHeightBytes[:], newTip[:])
- if err != nil {
- return err
- }
-
- // Now that the graph has been pruned, we'll also attempt to
- // prune any nodes that have had a channel closed within the
- // latest block.
- return c.pruneGraphNodes(nodes, edgeIndex)
- }, func() {
- chansClosed = nil
- })
- if err != nil {
- return nil, err
- }
-
- for _, channel := range chansClosed {
- c.rejectCache.remove(channel.ChannelID)
- c.chanCache.remove(channel.ChannelID)
- }
-
- return chansClosed, nil
-}
-
-// PruneGraphNodes is a garbage collection method which attempts to prune out
-// any nodes from the channel graph that are currently unconnected. This ensure
-// that we only maintain a graph of reachable nodes. In the event that a pruned
-// node gains more channels, it will be re-added back to the graph.
-func (c *ChannelGraph) PruneGraphNodes() er.R {
- return kvdb.Update(c.db, func(tx kvdb.RwTx) er.R {
- nodes := tx.ReadWriteBucket(nodeBucket)
- if nodes == nil {
- return ErrGraphNodesNotFound.Default()
- }
- edges := tx.ReadWriteBucket(edgeBucket)
- if edges == nil {
- return ErrGraphNotFound.Default()
- }
- edgeIndex := edges.NestedReadWriteBucket(edgeIndexBucket)
- if edgeIndex == nil {
- return ErrGraphNoEdgesFound.Default()
- }
-
- return c.pruneGraphNodes(nodes, edgeIndex)
- }, func() {})
-}
-
-// pruneGraphNodes attempts to remove any nodes from the graph who have had a
-// channel closed within the current block. If the node still has existing
-// channels in the graph, this will act as a no-op.
-func (c *ChannelGraph) pruneGraphNodes(nodes kvdb.RwBucket,
- edgeIndex kvdb.RwBucket) er.R {
-
- log.Trace("Pruning nodes from graph with no open channels")
-
- // We'll retrieve the graph's source node to ensure we don't remove it
- // even if it no longer has any open channels.
- sourceNode, err := c.sourceNode(nodes)
- if err != nil {
- return err
- }
-
- // We'll use this map to keep count the number of references to a node
- // in the graph. A node should only be removed once it has no more
- // references in the graph.
- nodeRefCounts := make(map[[33]byte]int)
- err = nodes.ForEach(func(pubKey, nodeBytes []byte) er.R {
- // If this is the source key, then we skip this
- // iteration as the value for this key is a pubKey
- // rather than raw node information.
- if bytes.Equal(pubKey, sourceKey) || len(pubKey) != 33 {
- return nil
- }
-
- var nodePub [33]byte
- copy(nodePub[:], pubKey)
- nodeRefCounts[nodePub] = 0
-
- return nil
- })
- if err != nil {
- return err
- }
-
- // To ensure we never delete the source node, we'll start off by
- // bumping its ref count to 1.
- nodeRefCounts[sourceNode.PubKeyBytes] = 1
-
- // Next, we'll run through the edgeIndex which maps a channel ID to the
- // edge info. We'll use this scan to populate our reference count map
- // above.
- err = edgeIndex.ForEach(func(chanID, edgeInfoBytes []byte) er.R {
- // The first 66 bytes of the edge info contain the pubkeys of
- // the nodes that this edge attaches. We'll extract them, and
- // add them to the ref count map.
- var node1, node2 [33]byte
- copy(node1[:], edgeInfoBytes[:33])
- copy(node2[:], edgeInfoBytes[33:])
-
- // With the nodes extracted, we'll increase the ref count of
- // each of the nodes.
- nodeRefCounts[node1]++
- nodeRefCounts[node2]++
-
- return nil
- })
- if err != nil {
- return err
- }
-
- // Finally, we'll make a second pass over the set of nodes, and delete
- // any nodes that have a ref count of zero.
- var numNodesPruned int
- for nodePubKey, refCount := range nodeRefCounts {
- // If the ref count of the node isn't zero, then we can safely
- // skip it as it still has edges to or from it within the
- // graph.
- if refCount != 0 {
- continue
- }
-
- // If we reach this point, then there are no longer any edges
- // that connect this node, so we can delete it.
- if err := c.deleteLightningNode(nodes, nodePubKey[:]); err != nil {
- log.Warnf("Unable to prune node %x from the "+
- "graph: %v", nodePubKey, err)
- continue
- }
-
- log.Infof("Pruned unconnected node %x from channel graph",
- nodePubKey[:])
-
- numNodesPruned++
- }
-
- if numNodesPruned > 0 {
- log.Infof("Pruned %v unconnected nodes from the channel graph",
- numNodesPruned)
- }
-
- return nil
-}
-
-// DisconnectBlockAtHeight is used to indicate that the block specified
-// by the passed height has been disconnected from the main chain. This
-// will "rewind" the graph back to the height below, deleting channels
-// that are no longer confirmed from the graph. The prune log will be
-// set to the last prune height valid for the remaining chain.
-// Channels that were removed from the graph resulting from the
-// disconnected block are returned.
-func (c *ChannelGraph) DisconnectBlockAtHeight(height uint32) ([]*ChannelEdgeInfo,
- er.R) {
-
- // Every channel having a ShortChannelID starting at 'height'
- // will no longer be confirmed.
- startShortChanID := lnwire.ShortChannelID{
- BlockHeight: height,
- }
-
- // Delete everything after this height from the db.
- endShortChanID := lnwire.ShortChannelID{
- BlockHeight: math.MaxUint32 & 0x00ffffff,
- TxIndex: math.MaxUint32 & 0x00ffffff,
- TxPosition: math.MaxUint16,
- }
- // The block height will be the 3 first bytes of the channel IDs.
- var chanIDStart [8]byte
- byteOrder.PutUint64(chanIDStart[:], startShortChanID.ToUint64())
- var chanIDEnd [8]byte
- byteOrder.PutUint64(chanIDEnd[:], endShortChanID.ToUint64())
-
- c.cacheMu.Lock()
- defer c.cacheMu.Unlock()
-
- // Keep track of the channels that are removed from the graph.
- var removedChans []*ChannelEdgeInfo
-
- if err := kvdb.Update(c.db, func(tx kvdb.RwTx) er.R {
- edges, err := tx.CreateTopLevelBucket(edgeBucket)
- if err != nil {
- return err
- }
- edgeIndex, err := edges.CreateBucketIfNotExists(edgeIndexBucket)
- if err != nil {
- return err
- }
- chanIndex, err := edges.CreateBucketIfNotExists(channelPointBucket)
- if err != nil {
- return err
- }
- zombieIndex, err := edges.CreateBucketIfNotExists(zombieBucket)
- if err != nil {
- return err
- }
- nodes, err := tx.CreateTopLevelBucket(nodeBucket)
- if err != nil {
- return err
- }
-
- // Scan from chanIDStart to chanIDEnd, deleting every
- // found edge.
- // NOTE: we must delete the edges after the cursor loop, since
- // modifying the bucket while traversing is not safe.
- var keys [][]byte
- cursor := edgeIndex.ReadWriteCursor()
- for k, v := cursor.Seek(chanIDStart[:]); k != nil &&
- bytes.Compare(k, chanIDEnd[:]) <= 0; k, v = cursor.Next() {
-
- edgeInfoReader := bytes.NewReader(v)
- edgeInfo, err := deserializeChanEdgeInfo(edgeInfoReader)
- if err != nil {
- return err
- }
-
- keys = append(keys, k)
- removedChans = append(removedChans, &edgeInfo)
- }
-
- for _, k := range keys {
- err := delChannelEdge(
- edges, edgeIndex, chanIndex, zombieIndex, nodes,
- k, false,
- )
- if err != nil && ErrEdgeNotFound.Is(err) {
- return err
- }
- }
-
- // Delete all the entries in the prune log having a height
- // greater or equal to the block disconnected.
- metaBucket, err := tx.CreateTopLevelBucket(graphMetaBucket)
- if err != nil {
- return err
- }
-
- pruneBucket, err := metaBucket.CreateBucketIfNotExists(pruneLogBucket)
- if err != nil {
- return err
- }
-
- var pruneKeyStart [4]byte
- byteOrder.PutUint32(pruneKeyStart[:], height)
-
- var pruneKeyEnd [4]byte
- byteOrder.PutUint32(pruneKeyEnd[:], math.MaxUint32)
-
- // To avoid modifying the bucket while traversing, we delete
- // the keys in a second loop.
- var pruneKeys [][]byte
- pruneCursor := pruneBucket.ReadWriteCursor()
- for k, _ := pruneCursor.Seek(pruneKeyStart[:]); k != nil &&
- bytes.Compare(k, pruneKeyEnd[:]) <= 0; k, _ = pruneCursor.Next() {
-
- pruneKeys = append(pruneKeys, k)
- }
-
- for _, k := range pruneKeys {
- if err := pruneBucket.Delete(k); err != nil {
- return err
- }
- }
-
- return nil
- }, func() {
- removedChans = nil
- }); err != nil {
- return nil, err
- }
-
- for _, channel := range removedChans {
- c.rejectCache.remove(channel.ChannelID)
- c.chanCache.remove(channel.ChannelID)
- }
-
- return removedChans, nil
-}
-
-// PruneTip returns the block height and hash of the latest block that has been
-// used to prune channels in the graph. Knowing the "prune tip" allows callers
-// to tell if the graph is currently in sync with the current best known UTXO
-// state.
-func (c *ChannelGraph) PruneTip() (*chainhash.Hash, uint32, er.R) {
- var (
- tipHash chainhash.Hash
- tipHeight uint32
- )
-
- err := kvdb.View(c.db, func(tx kvdb.RTx) er.R {
- graphMeta := tx.ReadBucket(graphMetaBucket)
- if graphMeta == nil {
- return ErrGraphNotFound.Default()
- }
- pruneBucket := graphMeta.NestedReadBucket(pruneLogBucket)
- if pruneBucket == nil {
- return ErrGraphNeverPruned.Default()
- }
-
- pruneCursor := pruneBucket.ReadCursor()
-
- // The prune key with the largest block height will be our
- // prune tip.
- k, v := pruneCursor.Last()
- if k == nil {
- return ErrGraphNeverPruned.Default()
- }
-
- // Once we have the prune tip, the value will be the block hash,
- // and the key the block height.
- copy(tipHash[:], v[:])
- tipHeight = byteOrder.Uint32(k[:])
-
- return nil
- }, func() {})
- if err != nil {
- return nil, 0, err
- }
-
- return &tipHash, tipHeight, nil
-}
-
-// DeleteChannelEdges removes edges with the given channel IDs from the database
-// and marks them as zombies. This ensures that we're unable to re-add it to our
-// database once again. If an edge does not exist within the database, then
-// ErrEdgeNotFound will be returned.
-func (c *ChannelGraph) DeleteChannelEdges(chanIDs ...uint64) er.R {
- // TODO(roasbeef): possibly delete from node bucket if node has no more
- // channels
- // TODO(roasbeef): don't delete both edges?
-
- c.cacheMu.Lock()
- defer c.cacheMu.Unlock()
-
- err := kvdb.Update(c.db, func(tx kvdb.RwTx) er.R {
- edges := tx.ReadWriteBucket(edgeBucket)
- if edges == nil {
- return ErrEdgeNotFound.Default()
- }
- edgeIndex := edges.NestedReadWriteBucket(edgeIndexBucket)
- if edgeIndex == nil {
- return ErrEdgeNotFound.Default()
- }
- chanIndex := edges.NestedReadWriteBucket(channelPointBucket)
- if chanIndex == nil {
- return ErrEdgeNotFound.Default()
- }
- nodes := tx.ReadWriteBucket(nodeBucket)
- if nodes == nil {
- return ErrGraphNodeNotFound.Default()
- }
- zombieIndex, err := edges.CreateBucketIfNotExists(zombieBucket)
- if err != nil {
- return err
- }
-
- var rawChanID [8]byte
- for _, chanID := range chanIDs {
- byteOrder.PutUint64(rawChanID[:], chanID)
- err := delChannelEdge(
- edges, edgeIndex, chanIndex, zombieIndex, nodes,
- rawChanID[:], true,
- )
- if err != nil {
- return err
- }
- }
-
- return nil
- }, func() {})
- if err != nil {
- return err
- }
-
- for _, chanID := range chanIDs {
- c.rejectCache.remove(chanID)
- c.chanCache.remove(chanID)
- }
-
- return nil
-}
-
-// ChannelID attempt to lookup the 8-byte compact channel ID which maps to the
-// passed channel point (outpoint). If the passed channel doesn't exist within
-// the database, then ErrEdgeNotFound is returned.
-func (c *ChannelGraph) ChannelID(chanPoint *wire.OutPoint) (uint64, er.R) {
- var chanID uint64
- if err := kvdb.View(c.db, func(tx kvdb.RTx) er.R {
- var err er.R
- chanID, err = getChanID(tx, chanPoint)
- return err
- }, func() {
- chanID = 0
- }); err != nil {
- return 0, err
- }
-
- return chanID, nil
-}
-
-// getChanID returns the assigned channel ID for a given channel point.
-func getChanID(tx kvdb.RTx, chanPoint *wire.OutPoint) (uint64, er.R) {
- var b bytes.Buffer
- if err := writeOutpoint(&b, chanPoint); err != nil {
- return 0, err
- }
-
- edges := tx.ReadBucket(edgeBucket)
- if edges == nil {
- return 0, ErrGraphNoEdgesFound.Default()
- }
- chanIndex := edges.NestedReadBucket(channelPointBucket)
- if chanIndex == nil {
- return 0, ErrGraphNoEdgesFound.Default()
- }
-
- chanIDBytes := chanIndex.Get(b.Bytes())
- if chanIDBytes == nil {
- return 0, ErrEdgeNotFound.Default()
- }
-
- chanID := byteOrder.Uint64(chanIDBytes)
-
- return chanID, nil
-}
-
-// TODO(roasbeef): allow updates to use Batch?
-
-// HighestChanID returns the "highest" known channel ID in the channel graph.
-// This represents the "newest" channel from the PoV of the chain. This method
-// can be used by peers to quickly determine if they're graphs are in sync.
-func (c *ChannelGraph) HighestChanID() (uint64, er.R) {
- var cid uint64
-
- err := kvdb.View(c.db, func(tx kvdb.RTx) er.R {
- edges := tx.ReadBucket(edgeBucket)
- if edges == nil {
- return ErrGraphNoEdgesFound.Default()
- }
- edgeIndex := edges.NestedReadBucket(edgeIndexBucket)
- if edgeIndex == nil {
- return ErrGraphNoEdgesFound.Default()
- }
-
- // In order to find the highest chan ID, we'll fetch a cursor
- // and use that to seek to the "end" of our known rage.
- cidCursor := edgeIndex.ReadCursor()
-
- lastChanID, _ := cidCursor.Last()
-
- // If there's no key, then this means that we don't actually
- // know of any channels, so we'll return a predicable error.
- if lastChanID == nil {
- return ErrGraphNoEdgesFound.Default()
- }
-
- // Otherwise, we'll de serialize the channel ID and return it
- // to the caller.
- cid = byteOrder.Uint64(lastChanID)
- return nil
- }, func() {
- cid = 0
- })
- if err != nil && !ErrGraphNoEdgesFound.Is(err) {
- return 0, err
- }
-
- return cid, nil
-}
-
-// ChannelEdge represents the complete set of information for a channel edge in
-// the known channel graph. This struct couples the core information of the
-// edge as well as each of the known advertised edge policies.
-type ChannelEdge struct {
- // Info contains all the static information describing the channel.
- Info *ChannelEdgeInfo
-
- // Policy1 points to the "first" edge policy of the channel containing
- // the dynamic information required to properly route through the edge.
- Policy1 *ChannelEdgePolicy
-
- // Policy2 points to the "second" edge policy of the channel containing
- // the dynamic information required to properly route through the edge.
- Policy2 *ChannelEdgePolicy
-}
-
-// ChanUpdatesInHorizon returns all the known channel edges which have at least
-// one edge that has an update timestamp within the specified horizon.
-func (c *ChannelGraph) ChanUpdatesInHorizon(startTime, endTime time.Time) ([]ChannelEdge, er.R) {
- // To ensure we don't return duplicate ChannelEdges, we'll use an
- // additional map to keep track of the edges already seen to prevent
- // re-adding it.
- var edgesSeen map[uint64]struct{}
- var edgesToCache map[uint64]ChannelEdge
- var edgesInHorizon []ChannelEdge
-
- c.cacheMu.Lock()
- defer c.cacheMu.Unlock()
-
- var hits int
- err := kvdb.View(c.db, func(tx kvdb.RTx) er.R {
- edges := tx.ReadBucket(edgeBucket)
- if edges == nil {
- return ErrGraphNoEdgesFound.Default()
- }
- edgeIndex := edges.NestedReadBucket(edgeIndexBucket)
- if edgeIndex == nil {
- return ErrGraphNoEdgesFound.Default()
- }
- edgeUpdateIndex := edges.NestedReadBucket(edgeUpdateIndexBucket)
- if edgeUpdateIndex == nil {
- return ErrGraphNoEdgesFound.Default()
- }
-
- nodes := tx.ReadBucket(nodeBucket)
- if nodes == nil {
- return ErrGraphNodesNotFound.Default()
- }
-
- // We'll now obtain a cursor to perform a range query within
- // the index to find all channels within the horizon.
- updateCursor := edgeUpdateIndex.ReadCursor()
-
- var startTimeBytes, endTimeBytes [8 + 8]byte
- byteOrder.PutUint64(
- startTimeBytes[:8], uint64(startTime.Unix()),
- )
- byteOrder.PutUint64(
- endTimeBytes[:8], uint64(endTime.Unix()),
- )
-
- // With our start and end times constructed, we'll step through
- // the index collecting the info and policy of each update of
- // each channel that has a last update within the time range.
- for indexKey, _ := updateCursor.Seek(startTimeBytes[:]); indexKey != nil &&
- bytes.Compare(indexKey, endTimeBytes[:]) <= 0; indexKey, _ = updateCursor.Next() {
-
- // We have a new eligible entry, so we'll slice of the
- // chan ID so we can query it in the DB.
- chanID := indexKey[8:]
-
- // If we've already retrieved the info and policies for
- // this edge, then we can skip it as we don't need to do
- // so again.
- chanIDInt := byteOrder.Uint64(chanID)
- if _, ok := edgesSeen[chanIDInt]; ok {
- continue
- }
-
- if channel, ok := c.chanCache.get(chanIDInt); ok {
- hits++
- edgesSeen[chanIDInt] = struct{}{}
- edgesInHorizon = append(edgesInHorizon, channel)
- continue
- }
-
- // First, we'll fetch the static edge information.
- edgeInfo, err := fetchChanEdgeInfo(edgeIndex, chanID)
- if err != nil {
- chanID := byteOrder.Uint64(chanID)
- return er.Errorf("unable to fetch info for "+
- "edge with chan_id=%v: %v", chanID, err)
- }
- edgeInfo.db = c.db
-
- // With the static information obtained, we'll now
- // fetch the dynamic policy info.
- edge1, edge2, err := fetchChanEdgePolicies(
- edgeIndex, edges, nodes, chanID, c.db,
- )
- if err != nil {
- chanID := byteOrder.Uint64(chanID)
- return er.Errorf("unable to fetch policies "+
- "for edge with chan_id=%v: %v", chanID,
- err)
- }
-
- // Finally, we'll collate this edge with the rest of
- // edges to be returned.
- edgesSeen[chanIDInt] = struct{}{}
- channel := ChannelEdge{
- Info: &edgeInfo,
- Policy1: edge1,
- Policy2: edge2,
- }
- edgesInHorizon = append(edgesInHorizon, channel)
- edgesToCache[chanIDInt] = channel
- }
-
- return nil
- }, func() {
- edgesSeen = make(map[uint64]struct{})
- edgesToCache = make(map[uint64]ChannelEdge)
- edgesInHorizon = nil
- })
- switch {
- case ErrGraphNoEdgesFound.Is(err):
- fallthrough
- case ErrGraphNodesNotFound.Is(err):
- break
-
- case err != nil:
- return nil, err
- }
-
- // Insert any edges loaded from disk into the cache.
- for chanid, channel := range edgesToCache {
- c.chanCache.insert(chanid, channel)
- }
-
- log.Debugf("ChanUpdatesInHorizon hit percentage: %f (%d/%d)",
- float64(hits)/float64(len(edgesInHorizon)), hits,
- len(edgesInHorizon))
-
- return edgesInHorizon, nil
-}
-
-// NodeUpdatesInHorizon returns all the known lightning node which have an
-// update timestamp within the passed range. This method can be used by two
-// nodes to quickly determine if they have the same set of up to date node
-// announcements.
-func (c *ChannelGraph) NodeUpdatesInHorizon(startTime, endTime time.Time) ([]LightningNode, er.R) {
- var nodesInHorizon []LightningNode
-
- err := kvdb.View(c.db, func(tx kvdb.RTx) er.R {
- nodes := tx.ReadBucket(nodeBucket)
- if nodes == nil {
- return ErrGraphNodesNotFound.Default()
- }
-
- nodeUpdateIndex := nodes.NestedReadBucket(nodeUpdateIndexBucket)
- if nodeUpdateIndex == nil {
- return ErrGraphNodesNotFound.Default()
- }
-
- // We'll now obtain a cursor to perform a range query within
- // the index to find all node announcements within the horizon.
- updateCursor := nodeUpdateIndex.ReadCursor()
-
- var startTimeBytes, endTimeBytes [8 + 33]byte
- byteOrder.PutUint64(
- startTimeBytes[:8], uint64(startTime.Unix()),
- )
- byteOrder.PutUint64(
- endTimeBytes[:8], uint64(endTime.Unix()),
- )
-
- // With our start and end times constructed, we'll step through
- // the index collecting info for each node within the time
- // range.
- for indexKey, _ := updateCursor.Seek(startTimeBytes[:]); indexKey != nil &&
- bytes.Compare(indexKey, endTimeBytes[:]) <= 0; indexKey, _ = updateCursor.Next() {
-
- nodePub := indexKey[8:]
- node, err := fetchLightningNode(nodes, nodePub)
- if err != nil {
- return err
- }
- node.db = c.db
-
- nodesInHorizon = append(nodesInHorizon, node)
- }
-
- return nil
- }, func() {
- nodesInHorizon = nil
- })
- switch {
- case ErrGraphNoEdgesFound.Is(err):
- fallthrough
- case ErrGraphNodesNotFound.Is(err):
- break
-
- case err != nil:
- return nil, err
- }
-
- return nodesInHorizon, nil
-}
-
-// FilterKnownChanIDs takes a set of channel IDs and return the subset of chan
-// ID's that we don't know and are not known zombies of the passed set. In other
-// words, we perform a set difference of our set of chan ID's and the ones
-// passed in. This method can be used by callers to determine the set of
-// channels another peer knows of that we don't.
-func (c *ChannelGraph) FilterKnownChanIDs(chanIDs []uint64) ([]uint64, er.R) {
- var newChanIDs []uint64
-
- err := kvdb.View(c.db, func(tx kvdb.RTx) er.R {
- edges := tx.ReadBucket(edgeBucket)
- if edges == nil {
- return ErrGraphNoEdgesFound.Default()
- }
- edgeIndex := edges.NestedReadBucket(edgeIndexBucket)
- if edgeIndex == nil {
- return ErrGraphNoEdgesFound.Default()
- }
-
- // Fetch the zombie index, it may not exist if no edges have
- // ever been marked as zombies. If the index has been
- // initialized, we will use it later to skip known zombie edges.
- zombieIndex := edges.NestedReadBucket(zombieBucket)
-
- // We'll run through the set of chanIDs and collate only the
- // set of channel that are unable to be found within our db.
- var cidBytes [8]byte
- for _, cid := range chanIDs {
- byteOrder.PutUint64(cidBytes[:], cid)
-
- // If the edge is already known, skip it.
- if v := edgeIndex.Get(cidBytes[:]); v != nil {
- continue
- }
-
- // If the edge is a known zombie, skip it.
- if zombieIndex != nil {
- isZombie, _, _ := isZombieEdge(zombieIndex, cid)
- if isZombie {
- continue
- }
- }
-
- newChanIDs = append(newChanIDs, cid)
- }
-
- return nil
- }, func() {
- newChanIDs = nil
- })
- switch {
- // If we don't know of any edges yet, then we'll return the entire set
- // of chan IDs specified.
- case ErrGraphNoEdgesFound.Is(err):
- return chanIDs, nil
-
- case err != nil:
- return nil, err
- }
-
- return newChanIDs, nil
-}
-
-// FilterChannelRange returns the channel ID's of all known channels which were
-// mined in a block height within the passed range. This method can be used to
-// quickly share with a peer the set of channels we know of within a particular
-// range to catch them up after a period of time offline.
-func (c *ChannelGraph) FilterChannelRange(startHeight, endHeight uint32) ([]uint64, er.R) {
- var chanIDs []uint64
-
- startChanID := &lnwire.ShortChannelID{
- BlockHeight: startHeight,
- }
-
- endChanID := lnwire.ShortChannelID{
- BlockHeight: endHeight,
- TxIndex: math.MaxUint32 & 0x00ffffff,
- TxPosition: math.MaxUint16,
- }
-
- // As we need to perform a range scan, we'll convert the starting and
- // ending height to their corresponding values when encoded using short
- // channel ID's.
- var chanIDStart, chanIDEnd [8]byte
- byteOrder.PutUint64(chanIDStart[:], startChanID.ToUint64())
- byteOrder.PutUint64(chanIDEnd[:], endChanID.ToUint64())
-
- err := kvdb.View(c.db, func(tx kvdb.RTx) er.R {
- edges := tx.ReadBucket(edgeBucket)
- if edges == nil {
- return ErrGraphNoEdgesFound.Default()
- }
- edgeIndex := edges.NestedReadBucket(edgeIndexBucket)
- if edgeIndex == nil {
- return ErrGraphNoEdgesFound.Default()
- }
-
- cursor := edgeIndex.ReadCursor()
-
- // We'll now iterate through the database, and find each
- // channel ID that resides within the specified range.
- var cid uint64
- for k, _ := cursor.Seek(chanIDStart[:]); k != nil &&
- bytes.Compare(k, chanIDEnd[:]) <= 0; k, _ = cursor.Next() {
-
- // This channel ID rests within the target range, so
- // we'll convert it into an integer and add it to our
- // returned set.
- cid = byteOrder.Uint64(k)
- chanIDs = append(chanIDs, cid)
- }
-
- return nil
- }, func() {
- chanIDs = nil
- })
-
- switch {
- // If we don't know of any channels yet, then there's nothing to
- // filter, so we'll return an empty slice.
- case ErrGraphNoEdgesFound.Is(err):
- return chanIDs, nil
-
- case err != nil:
- return nil, err
- }
-
- return chanIDs, nil
-}
-
-// FetchChanInfos returns the set of channel edges that correspond to the passed
-// channel ID's. If an edge is the query is unknown to the database, it will
-// skipped and the result will contain only those edges that exist at the time
-// of the query. This can be used to respond to peer queries that are seeking to
-// fill in gaps in their view of the channel graph.
-func (c *ChannelGraph) FetchChanInfos(chanIDs []uint64) ([]ChannelEdge, er.R) {
- // TODO(roasbeef): sort cids?
-
- var (
- chanEdges []ChannelEdge
- cidBytes [8]byte
- )
-
- err := kvdb.View(c.db, func(tx kvdb.RTx) er.R {
- edges := tx.ReadBucket(edgeBucket)
- if edges == nil {
- return ErrGraphNoEdgesFound.Default()
- }
- edgeIndex := edges.NestedReadBucket(edgeIndexBucket)
- if edgeIndex == nil {
- return ErrGraphNoEdgesFound.Default()
- }
- nodes := tx.ReadBucket(nodeBucket)
- if nodes == nil {
- return ErrGraphNotFound.Default()
- }
-
- for _, cid := range chanIDs {
- byteOrder.PutUint64(cidBytes[:], cid)
-
- // First, we'll fetch the static edge information. If
- // the edge is unknown, we will skip the edge and
- // continue gathering all known edges.
- edgeInfo, err := fetchChanEdgeInfo(
- edgeIndex, cidBytes[:],
- )
- switch {
- case ErrEdgeNotFound.Is(err):
- continue
- case err != nil:
- return err
- }
- edgeInfo.db = c.db
-
- // With the static information obtained, we'll now
- // fetch the dynamic policy info.
- edge1, edge2, err := fetchChanEdgePolicies(
- edgeIndex, edges, nodes, cidBytes[:], c.db,
- )
- if err != nil {
- return err
- }
-
- chanEdges = append(chanEdges, ChannelEdge{
- Info: &edgeInfo,
- Policy1: edge1,
- Policy2: edge2,
- })
- }
- return nil
- }, func() {
- chanEdges = nil
- })
- if err != nil {
- return nil, err
- }
-
- return chanEdges, nil
-}
-
-func delEdgeUpdateIndexEntry(edgesBucket kvdb.RwBucket, chanID uint64,
- edge1, edge2 *ChannelEdgePolicy) er.R {
-
- // First, we'll fetch the edge update index bucket which currently
- // stores an entry for the channel we're about to delete.
- updateIndex := edgesBucket.NestedReadWriteBucket(edgeUpdateIndexBucket)
- if updateIndex == nil {
- // No edges in bucket, return early.
- return nil
- }
-
- // Now that we have the bucket, we'll attempt to construct a template
- // for the index key: updateTime || chanid.
- var indexKey [8 + 8]byte
- byteOrder.PutUint64(indexKey[8:], chanID)
-
- // With the template constructed, we'll attempt to delete an entry that
- // would have been created by both edges: we'll alternate the update
- // times, as one may had overridden the other.
- if edge1 != nil {
- byteOrder.PutUint64(indexKey[:8], uint64(edge1.LastUpdate.Unix()))
- if err := updateIndex.Delete(indexKey[:]); err != nil {
- return err
- }
- }
-
- // We'll also attempt to delete the entry that may have been created by
- // the second edge.
- if edge2 != nil {
- byteOrder.PutUint64(indexKey[:8], uint64(edge2.LastUpdate.Unix()))
- if err := updateIndex.Delete(indexKey[:]); err != nil {
- return err
- }
- }
-
- return nil
-}
-
-func delChannelEdge(edges, edgeIndex, chanIndex, zombieIndex,
- nodes kvdb.RwBucket, chanID []byte, isZombie bool) er.R {
-
- edgeInfo, err := fetchChanEdgeInfo(edgeIndex, chanID)
- if err != nil {
- return err
- }
-
- // We'll also remove the entry in the edge update index bucket before
- // we delete the edges themselves so we can access their last update
- // times.
- cid := byteOrder.Uint64(chanID)
- edge1, edge2, err := fetchChanEdgePolicies(
- edgeIndex, edges, nodes, chanID, nil,
- )
- if err != nil {
- return err
- }
- err = delEdgeUpdateIndexEntry(edges, cid, edge1, edge2)
- if err != nil {
- return err
- }
-
- // The edge key is of the format pubKey || chanID. First we construct
- // the latter half, populating the channel ID.
- var edgeKey [33 + 8]byte
- copy(edgeKey[33:], chanID)
-
- // With the latter half constructed, copy over the first public key to
- // delete the edge in this direction, then the second to delete the
- // edge in the opposite direction.
- copy(edgeKey[:33], edgeInfo.NodeKey1Bytes[:])
- if edges.Get(edgeKey[:]) != nil {
- if err := edges.Delete(edgeKey[:]); err != nil {
- return err
- }
- }
- copy(edgeKey[:33], edgeInfo.NodeKey2Bytes[:])
- if edges.Get(edgeKey[:]) != nil {
- if err := edges.Delete(edgeKey[:]); err != nil {
- return err
- }
- }
-
- // As part of deleting the edge we also remove all disabled entries
- // from the edgePolicyDisabledIndex bucket. We do that for both directions.
- updateEdgePolicyDisabledIndex(edges, cid, false, false)
- updateEdgePolicyDisabledIndex(edges, cid, true, false)
-
- // With the edge data deleted, we can purge the information from the two
- // edge indexes.
- if err := edgeIndex.Delete(chanID); err != nil {
- return err
- }
- var b bytes.Buffer
- if err := writeOutpoint(&b, &edgeInfo.ChannelPoint); err != nil {
- return err
- }
- if err := chanIndex.Delete(b.Bytes()); err != nil {
- return err
- }
-
- // Finally, we'll mark the edge as a zombie within our index if it's
- // being removed due to the channel becoming a zombie. We do this to
- // ensure we don't store unnecessary data for spent channels.
- if !isZombie {
- return nil
- }
-
- return markEdgeZombie(
- zombieIndex, byteOrder.Uint64(chanID), edgeInfo.NodeKey1Bytes,
- edgeInfo.NodeKey2Bytes,
- )
-}
-
-// UpdateEdgePolicy updates the edge routing policy for a single directed edge
-// within the database for the referenced channel. The `flags` attribute within
-// the ChannelEdgePolicy determines which of the directed edges are being
-// updated. If the flag is 1, then the first node's information is being
-// updated, otherwise it's the second node's information. The node ordering is
-// determined by the lexicographical ordering of the identity public keys of
-// the nodes on either side of the channel.
-func (c *ChannelGraph) UpdateEdgePolicy(edge *ChannelEdgePolicy) er.R {
- c.cacheMu.Lock()
- defer c.cacheMu.Unlock()
-
- var isUpdate1 bool
- err := kvdb.Update(c.db, func(tx kvdb.RwTx) er.R {
- var err er.R
- isUpdate1, err = updateEdgePolicy(tx, edge)
- return err
- }, func() {
- isUpdate1 = false
- })
- if err != nil {
- return err
- }
-
- // If an entry for this channel is found in reject cache, we'll modify
- // the entry with the updated timestamp for the direction that was just
- // written. If the edge doesn't exist, we'll load the cache entry lazily
- // during the next query for this edge.
- if entry, ok := c.rejectCache.get(edge.ChannelID); ok {
- if isUpdate1 {
- entry.upd1Time = edge.LastUpdate.Unix()
- } else {
- entry.upd2Time = edge.LastUpdate.Unix()
- }
- c.rejectCache.insert(edge.ChannelID, entry)
- }
-
- // If an entry for this channel is found in channel cache, we'll modify
- // the entry with the updated policy for the direction that was just
- // written. If the edge doesn't exist, we'll defer loading the info and
- // policies and lazily read from disk during the next query.
- if channel, ok := c.chanCache.get(edge.ChannelID); ok {
- if isUpdate1 {
- channel.Policy1 = edge
- } else {
- channel.Policy2 = edge
- }
- c.chanCache.insert(edge.ChannelID, channel)
- }
-
- return nil
-}
-
-// updateEdgePolicy attempts to update an edge's policy within the relevant
-// buckets using an existing database transaction. The returned boolean will be
-// true if the updated policy belongs to node1, and false if the policy belonged
-// to node2.
-func updateEdgePolicy(tx kvdb.RwTx, edge *ChannelEdgePolicy) (bool, er.R) {
- edges := tx.ReadWriteBucket(edgeBucket)
- if edges == nil {
- return false, ErrEdgeNotFound.Default()
-
- }
- edgeIndex := edges.NestedReadWriteBucket(edgeIndexBucket)
- if edgeIndex == nil {
- return false, ErrEdgeNotFound.Default()
- }
- nodes, err := tx.CreateTopLevelBucket(nodeBucket)
- if err != nil {
- return false, err
- }
-
- // Create the channelID key be converting the channel ID
- // integer into a byte slice.
- var chanID [8]byte
- byteOrder.PutUint64(chanID[:], edge.ChannelID)
-
- // With the channel ID, we then fetch the value storing the two
- // nodes which connect this channel edge.
- nodeInfo := edgeIndex.Get(chanID[:])
- if nodeInfo == nil {
- return false, ErrEdgeNotFound.Default()
- }
-
- // Depending on the flags value passed above, either the first
- // or second edge policy is being updated.
- var fromNode, toNode []byte
- var isUpdate1 bool
- if edge.ChannelFlags&lnwire.ChanUpdateDirection == 0 {
- fromNode = nodeInfo[:33]
- toNode = nodeInfo[33:66]
- isUpdate1 = true
- } else {
- fromNode = nodeInfo[33:66]
- toNode = nodeInfo[:33]
- isUpdate1 = false
- }
-
- // Finally, with the direction of the edge being updated
- // identified, we update the on-disk edge representation.
- errr := putChanEdgePolicy(edges, nodes, edge, fromNode, toNode)
- if errr != nil {
- return false, errr
- }
-
- return isUpdate1, nil
-}
-
-// LightningNode represents an individual vertex/node within the channel graph.
-// A node is connected to other nodes by one or more channel edges emanating
-// from it. As the graph is directed, a node will also have an incoming edge
-// attached to it for each outgoing edge.
-type LightningNode struct {
- // PubKeyBytes is the raw bytes of the public key of the target node.
- PubKeyBytes [33]byte
- pubKey *btcec.PublicKey
-
- // HaveNodeAnnouncement indicates whether we received a node
- // announcement for this particular node. If true, the remaining fields
- // will be set, if false only the PubKey is known for this node.
- HaveNodeAnnouncement bool
-
- // LastUpdate is the last time the vertex information for this node has
- // been updated.
- LastUpdate time.Time
-
- // Address is the TCP address this node is reachable over.
- Addresses []net.Addr
-
- // Color is the selected color for the node.
- Color color.RGBA
-
- // Alias is a nick-name for the node. The alias can be used to confirm
- // a node's identity or to serve as a short ID for an address book.
- Alias string
-
- // AuthSigBytes is the raw signature under the advertised public key
- // which serves to authenticate the attributes announced by this node.
- AuthSigBytes []byte
-
- // Features is the list of protocol features supported by this node.
- Features *lnwire.FeatureVector
-
- // ExtraOpaqueData is the set of data that was appended to this
- // message, some of which we may not actually know how to iterate or
- // parse. By holding onto this data, we ensure that we're able to
- // properly validate the set of signatures that cover these new fields,
- // and ensure we're able to make upgrades to the network in a forwards
- // compatible manner.
- ExtraOpaqueData []byte
-
- db *DB
-
- // TODO(roasbeef): discovery will need storage to keep it's last IP
- // address and re-announce if interface changes?
-
- // TODO(roasbeef): add update method and fetch?
-}
-
-// PubKey is the node's long-term identity public key. This key will be used to
-// authenticated any advertisements/updates sent by the node.
-//
-// NOTE: By having this method to access an attribute, we ensure we only need
-// to fully deserialize the pubkey if absolutely necessary.
-func (l *LightningNode) PubKey() (*btcec.PublicKey, er.R) {
- if l.pubKey != nil {
- return l.pubKey, nil
- }
-
- key, err := btcec.ParsePubKey(l.PubKeyBytes[:], btcec.S256())
- if err != nil {
- return nil, err
- }
- l.pubKey = key
-
- return key, nil
-}
-
-// AuthSig is a signature under the advertised public key which serves to
-// authenticate the attributes announced by this node.
-//
-// NOTE: By having this method to access an attribute, we ensure we only need
-// to fully deserialize the signature if absolutely necessary.
-func (l *LightningNode) AuthSig() (*btcec.Signature, er.R) {
- return btcec.ParseSignature(l.AuthSigBytes, btcec.S256())
-}
-
-// AddPubKey is a setter-link method that can be used to swap out the public
-// key for a node.
-func (l *LightningNode) AddPubKey(key *btcec.PublicKey) {
- l.pubKey = key
- copy(l.PubKeyBytes[:], key.SerializeCompressed())
-}
-
-// NodeAnnouncement retrieves the latest node announcement of the node.
-func (l *LightningNode) NodeAnnouncement(signed bool) (*lnwire.NodeAnnouncement,
- er.R) {
-
- if !l.HaveNodeAnnouncement {
- return nil, er.Errorf("node does not have node announcement")
- }
-
- alias, err := lnwire.NewNodeAlias(l.Alias)
- if err != nil {
- return nil, err
- }
-
- nodeAnn := &lnwire.NodeAnnouncement{
- Features: l.Features.RawFeatureVector,
- NodeID: l.PubKeyBytes,
- RGBColor: l.Color,
- Alias: alias,
- Addresses: l.Addresses,
- Timestamp: uint32(l.LastUpdate.Unix()),
- ExtraOpaqueData: l.ExtraOpaqueData,
- }
-
- if !signed {
- return nodeAnn, nil
- }
-
- sig, err := lnwire.NewSigFromRawSignature(l.AuthSigBytes)
- if err != nil {
- return nil, err
- }
-
- nodeAnn.Signature = sig
-
- return nodeAnn, nil
-}
-
-// isPublic determines whether the node is seen as public within the graph from
-// the source node's point of view. An existing database transaction can also be
-// specified.
-func (l *LightningNode) isPublic(tx kvdb.RTx, sourcePubKey []byte) (bool, er.R) {
- // In order to determine whether this node is publicly advertised within
- // the graph, we'll need to look at all of its edges and check whether
- // they extend to any other node than the source node. errDone will be
- // used to terminate the check early.
- nodeIsPublic := false
- err := l.ForEachChannel(tx, func(_ kvdb.RTx, info *ChannelEdgeInfo,
- _, _ *ChannelEdgePolicy) er.R {
-
- // If this edge doesn't extend to the source node, we'll
- // terminate our search as we can now conclude that the node is
- // publicly advertised within the graph due to the local node
- // knowing of the current edge.
- if !bytes.Equal(info.NodeKey1Bytes[:], sourcePubKey) &&
- !bytes.Equal(info.NodeKey2Bytes[:], sourcePubKey) {
-
- nodeIsPublic = true
- return er.LoopBreak
- }
-
- // Since the edge _does_ extend to the source node, we'll also
- // need to ensure that this is a public edge.
- if info.AuthProof != nil {
- nodeIsPublic = true
- return er.LoopBreak
- }
-
- // Otherwise, we'll continue our search.
- return nil
- })
- if err != nil && !er.IsLoopBreak(err) {
- return false, err
- }
-
- return nodeIsPublic, nil
-}
-
-// FetchLightningNode attempts to look up a target node by its identity public
-// key. If the node isn't found in the database, then ErrGraphNodeNotFound is
-// returned.
-//
-// If the caller wishes to re-use an existing boltdb transaction, then it
-// should be passed as the first argument. Otherwise the first argument should
-// be nil and a fresh transaction will be created to execute the graph
-// traversal.
-func (c *ChannelGraph) FetchLightningNode(tx kvdb.RTx, nodePub route.Vertex) (
- *LightningNode, er.R) {
-
- var node *LightningNode
-
- fetchNode := func(tx kvdb.RTx) er.R {
- // First grab the nodes bucket which stores the mapping from
- // pubKey to node information.
- nodes := tx.ReadBucket(nodeBucket)
- if nodes == nil {
- return ErrGraphNotFound.Default()
- }
-
- // If a key for this serialized public key isn't found, then
- // the target node doesn't exist within the database.
- nodeBytes := nodes.Get(nodePub[:])
- if nodeBytes == nil {
- return ErrGraphNodeNotFound.Default()
- }
-
- // If the node is found, then we can de deserialize the node
- // information to return to the user.
- nodeReader := bytes.NewReader(nodeBytes)
- n, err := deserializeLightningNode(nodeReader)
- if err != nil {
- return err
- }
- n.db = c.db
-
- node = &n
-
- return nil
- }
-
- var err er.R
- if tx == nil {
- err = kvdb.View(c.db, fetchNode, func() {})
- } else {
- err = fetchNode(tx)
- }
- if err != nil {
- return nil, err
- }
-
- return node, nil
-}
-
-// HasLightningNode determines if the graph has a vertex identified by the
-// target node identity public key. If the node exists in the database, a
-// timestamp of when the data for the node was lasted updated is returned along
-// with a true boolean. Otherwise, an empty time.Time is returned with a false
-// boolean.
-func (c *ChannelGraph) HasLightningNode(nodePub [33]byte) (time.Time, bool, er.R) {
- var (
- updateTime time.Time
- exists bool
- )
-
- err := kvdb.View(c.db, func(tx kvdb.RTx) er.R {
- // First grab the nodes bucket which stores the mapping from
- // pubKey to node information.
- nodes := tx.ReadBucket(nodeBucket)
- if nodes == nil {
- return ErrGraphNotFound.Default()
- }
-
- // If a key for this serialized public key isn't found, we can
- // exit early.
- nodeBytes := nodes.Get(nodePub[:])
- if nodeBytes == nil {
- exists = false
- return nil
- }
-
- // Otherwise we continue on to obtain the time stamp
- // representing the last time the data for this node was
- // updated.
- nodeReader := bytes.NewReader(nodeBytes)
- node, err := deserializeLightningNode(nodeReader)
- if err != nil {
- return err
- }
-
- exists = true
- updateTime = node.LastUpdate
- return nil
- }, func() {
- updateTime = time.Time{}
- exists = false
- })
- if err != nil {
- return time.Time{}, exists, err
- }
-
- return updateTime, exists, nil
-}
-
-// nodeTraversal is used to traverse all channels of a node given by its
-// public key and passes channel information into the specified callback.
-func nodeTraversal(tx kvdb.RTx, nodePub []byte, db *DB,
- cb func(kvdb.RTx, *ChannelEdgeInfo, *ChannelEdgePolicy, *ChannelEdgePolicy) er.R) er.R {
-
- traversal := func(tx kvdb.RTx) er.R {
- nodes := tx.ReadBucket(nodeBucket)
- if nodes == nil {
- return ErrGraphNotFound.Default()
- }
- edges := tx.ReadBucket(edgeBucket)
- if edges == nil {
- return ErrGraphNotFound.Default()
- }
- edgeIndex := edges.NestedReadBucket(edgeIndexBucket)
- if edgeIndex == nil {
- return ErrGraphNoEdgesFound.Default()
- }
-
- // In order to reach all the edges for this node, we take
- // advantage of the construction of the key-space within the
- // edge bucket. The keys are stored in the form: pubKey ||
- // chanID. Therefore, starting from a chanID of zero, we can
- // scan forward in the bucket, grabbing all the edges for the
- // node. Once the prefix no longer matches, then we know we're
- // done.
- var nodeStart [33 + 8]byte
- copy(nodeStart[:], nodePub)
- copy(nodeStart[33:], chanStart[:])
-
- // Starting from the key pubKey || 0, we seek forward in the
- // bucket until the retrieved key no longer has the public key
- // as its prefix. This indicates that we've stepped over into
- // another node's edges, so we can terminate our scan.
- edgeCursor := edges.ReadCursor()
- for nodeEdge, _ := edgeCursor.Seek(nodeStart[:]); bytes.HasPrefix(nodeEdge, nodePub); nodeEdge, _ = edgeCursor.Next() {
- // If the prefix still matches, the channel id is
- // returned in nodeEdge. Channel id is used to lookup
- // the node at the other end of the channel and both
- // edge policies.
- chanID := nodeEdge[33:]
- edgeInfo, err := fetchChanEdgeInfo(edgeIndex, chanID)
- if err != nil {
- return err
- }
- edgeInfo.db = db
-
- outgoingPolicy, err := fetchChanEdgePolicy(
- edges, chanID, nodePub, nodes,
- )
- if err != nil {
- return err
- }
-
- otherNode, err := edgeInfo.OtherNodeKeyBytes(nodePub)
- if err != nil {
- return err
- }
-
- incomingPolicy, err := fetchChanEdgePolicy(
- edges, chanID, otherNode[:], nodes,
- )
- if err != nil {
- return err
- }
-
- // Finally, we execute the callback.
- err = cb(tx, &edgeInfo, outgoingPolicy, incomingPolicy)
- if err != nil {
- return err
- }
- }
-
- return nil
- }
-
- // If no transaction was provided, then we'll create a new transaction
- // to execute the transaction within.
- if tx == nil {
- return kvdb.View(db, traversal, func() {})
- }
-
- // Otherwise, we re-use the existing transaction to execute the graph
- // traversal.
- return traversal(tx)
-}
-
-// ForEachChannel iterates through all channels of this node, executing the
-// passed callback with an edge info structure and the policies of each end
-// of the channel. The first edge policy is the outgoing edge *to* the
-// the connecting node, while the second is the incoming edge *from* the
-// connecting node. If the callback returns an error, then the iteration is
-// halted with the error propagated back up to the caller.
-//
-// Unknown policies are passed into the callback as nil values.
-//
-// If the caller wishes to re-use an existing boltdb transaction, then it
-// should be passed as the first argument. Otherwise the first argument should
-// be nil and a fresh transaction will be created to execute the graph
-// traversal.
-func (l *LightningNode) ForEachChannel(tx kvdb.RTx,
- cb func(kvdb.RTx, *ChannelEdgeInfo, *ChannelEdgePolicy, *ChannelEdgePolicy) er.R) er.R {
-
- nodePub := l.PubKeyBytes[:]
- db := l.db
-
- return nodeTraversal(tx, nodePub, db, cb)
-}
-
-// ChannelEdgeInfo represents a fully authenticated channel along with all its
-// unique attributes. Once an authenticated channel announcement has been
-// processed on the network, then an instance of ChannelEdgeInfo encapsulating
-// the channels attributes is stored. The other portions relevant to routing
-// policy of a channel are stored within a ChannelEdgePolicy for each direction
-// of the channel.
-type ChannelEdgeInfo struct {
- // ChannelID is the unique channel ID for the channel. The first 3
- // bytes are the block height, the next 3 the index within the block,
- // and the last 2 bytes are the output index for the channel.
- ChannelID uint64
-
- // ChainHash is the hash that uniquely identifies the chain that this
- // channel was opened within.
- //
- // TODO(roasbeef): need to modify db keying for multi-chain
- // * must add chain hash to prefix as well
- ChainHash chainhash.Hash
-
- // NodeKey1Bytes is the raw public key of the first node.
- NodeKey1Bytes [33]byte
- nodeKey1 *btcec.PublicKey
-
- // NodeKey2Bytes is the raw public key of the first node.
- NodeKey2Bytes [33]byte
- nodeKey2 *btcec.PublicKey
-
- // BitcoinKey1Bytes is the raw public key of the first node.
- BitcoinKey1Bytes [33]byte
- bitcoinKey1 *btcec.PublicKey
-
- // BitcoinKey2Bytes is the raw public key of the first node.
- BitcoinKey2Bytes [33]byte
- bitcoinKey2 *btcec.PublicKey
-
- // Features is an opaque byte slice that encodes the set of channel
- // specific features that this channel edge supports.
- Features []byte
-
- // AuthProof is the authentication proof for this channel. This proof
- // contains a set of signatures binding four identities, which attests
- // to the legitimacy of the advertised channel.
- AuthProof *ChannelAuthProof
-
- // ChannelPoint is the funding outpoint of the channel. This can be
- // used to uniquely identify the channel within the channel graph.
- ChannelPoint wire.OutPoint
-
- // Capacity is the total capacity of the channel, this is determined by
- // the value output in the outpoint that created this channel.
- Capacity btcutil.Amount
-
- // ExtraOpaqueData is the set of data that was appended to this
- // message, some of which we may not actually know how to iterate or
- // parse. By holding onto this data, we ensure that we're able to
- // properly validate the set of signatures that cover these new fields,
- // and ensure we're able to make upgrades to the network in a forwards
- // compatible manner.
- ExtraOpaqueData []byte
-
- db *DB
-}
-
-// AddNodeKeys is a setter-like method that can be used to replace the set of
-// keys for the target ChannelEdgeInfo.
-func (c *ChannelEdgeInfo) AddNodeKeys(nodeKey1, nodeKey2, bitcoinKey1,
- bitcoinKey2 *btcec.PublicKey) {
-
- c.nodeKey1 = nodeKey1
- copy(c.NodeKey1Bytes[:], c.nodeKey1.SerializeCompressed())
-
- c.nodeKey2 = nodeKey2
- copy(c.NodeKey2Bytes[:], nodeKey2.SerializeCompressed())
-
- c.bitcoinKey1 = bitcoinKey1
- copy(c.BitcoinKey1Bytes[:], c.bitcoinKey1.SerializeCompressed())
-
- c.bitcoinKey2 = bitcoinKey2
- copy(c.BitcoinKey2Bytes[:], bitcoinKey2.SerializeCompressed())
-}
-
-// NodeKey1 is the identity public key of the "first" node that was involved in
-// the creation of this channel. A node is considered "first" if the
-// lexicographical ordering the its serialized public key is "smaller" than
-// that of the other node involved in channel creation.
-//
-// NOTE: By having this method to access an attribute, we ensure we only need
-// to fully deserialize the pubkey if absolutely necessary.
-func (c *ChannelEdgeInfo) NodeKey1() (*btcec.PublicKey, er.R) {
- if c.nodeKey1 != nil {
- return c.nodeKey1, nil
- }
-
- key, err := btcec.ParsePubKey(c.NodeKey1Bytes[:], btcec.S256())
- if err != nil {
- return nil, err
- }
- c.nodeKey1 = key
-
- return key, nil
-}
-
-// NodeKey2 is the identity public key of the "second" node that was
-// involved in the creation of this channel. A node is considered
-// "second" if the lexicographical ordering the its serialized public
-// key is "larger" than that of the other node involved in channel
-// creation.
-//
-// NOTE: By having this method to access an attribute, we ensure we only need
-// to fully deserialize the pubkey if absolutely necessary.
-func (c *ChannelEdgeInfo) NodeKey2() (*btcec.PublicKey, er.R) {
- if c.nodeKey2 != nil {
- return c.nodeKey2, nil
- }
-
- key, err := btcec.ParsePubKey(c.NodeKey2Bytes[:], btcec.S256())
- if err != nil {
- return nil, err
- }
- c.nodeKey2 = key
-
- return key, nil
-}
-
-// BitcoinKey1 is the Bitcoin multi-sig key belonging to the first
-// node, that was involved in the funding transaction that originally
-// created the channel that this struct represents.
-//
-// NOTE: By having this method to access an attribute, we ensure we only need
-// to fully deserialize the pubkey if absolutely necessary.
-func (c *ChannelEdgeInfo) BitcoinKey1() (*btcec.PublicKey, er.R) {
- if c.bitcoinKey1 != nil {
- return c.bitcoinKey1, nil
- }
-
- key, err := btcec.ParsePubKey(c.BitcoinKey1Bytes[:], btcec.S256())
- if err != nil {
- return nil, err
- }
- c.bitcoinKey1 = key
-
- return key, nil
-}
-
-// BitcoinKey2 is the Bitcoin multi-sig key belonging to the second
-// node, that was involved in the funding transaction that originally
-// created the channel that this struct represents.
-//
-// NOTE: By having this method to access an attribute, we ensure we only need
-// to fully deserialize the pubkey if absolutely necessary.
-func (c *ChannelEdgeInfo) BitcoinKey2() (*btcec.PublicKey, er.R) {
- if c.bitcoinKey2 != nil {
- return c.bitcoinKey2, nil
- }
-
- key, err := btcec.ParsePubKey(c.BitcoinKey2Bytes[:], btcec.S256())
- if err != nil {
- return nil, err
- }
- c.bitcoinKey2 = key
-
- return key, nil
-}
-
-// OtherNodeKeyBytes returns the node key bytes of the other end of
-// the channel.
-func (c *ChannelEdgeInfo) OtherNodeKeyBytes(thisNodeKey []byte) (
- [33]byte, er.R) {
-
- switch {
- case bytes.Equal(c.NodeKey1Bytes[:], thisNodeKey):
- return c.NodeKey2Bytes, nil
- case bytes.Equal(c.NodeKey2Bytes[:], thisNodeKey):
- return c.NodeKey1Bytes, nil
- default:
- return [33]byte{}, er.Errorf("node not participating in this channel")
- }
-}
-
-// FetchOtherNode attempts to fetch the full LightningNode that's opposite of
-// the target node in the channel. This is useful when one knows the pubkey of
-// one of the nodes, and wishes to obtain the full LightningNode for the other
-// end of the channel.
-func (c *ChannelEdgeInfo) FetchOtherNode(tx kvdb.RTx, thisNodeKey []byte) (*LightningNode, er.R) {
-
- // Ensure that the node passed in is actually a member of the channel.
- var targetNodeBytes [33]byte
- switch {
- case bytes.Equal(c.NodeKey1Bytes[:], thisNodeKey):
- targetNodeBytes = c.NodeKey2Bytes
- case bytes.Equal(c.NodeKey2Bytes[:], thisNodeKey):
- targetNodeBytes = c.NodeKey1Bytes
- default:
- return nil, er.Errorf("node not participating in this channel")
- }
-
- var targetNode *LightningNode
- fetchNodeFunc := func(tx kvdb.RTx) er.R {
- // First grab the nodes bucket which stores the mapping from
- // pubKey to node information.
- nodes := tx.ReadBucket(nodeBucket)
- if nodes == nil {
- return ErrGraphNotFound.Default()
- }
-
- node, err := fetchLightningNode(nodes, targetNodeBytes[:])
- if err != nil {
- return err
- }
- node.db = c.db
-
- targetNode = &node
-
- return nil
- }
-
- // If the transaction is nil, then we'll need to create a new one,
- // otherwise we can use the existing db transaction.
- var err er.R
- if tx == nil {
- err = kvdb.View(c.db, fetchNodeFunc, func() { targetNode = nil })
- } else {
- err = fetchNodeFunc(tx)
- }
-
- return targetNode, err
-}
-
-// ChannelAuthProof is the authentication proof (the signature portion) for a
-// channel. Using the four signatures contained in the struct, and some
-// auxiliary knowledge (the funding script, node identities, and outpoint) nodes
-// on the network are able to validate the authenticity and existence of a
-// channel. Each of these signatures signs the following digest: chanID ||
-// nodeID1 || nodeID2 || bitcoinKey1|| bitcoinKey2 || 2-byte-feature-len ||
-// features.
-type ChannelAuthProof struct {
- // nodeSig1 is a cached instance of the first node signature.
- nodeSig1 *btcec.Signature
-
- // NodeSig1Bytes are the raw bytes of the first node signature encoded
- // in DER format.
- NodeSig1Bytes []byte
-
- // nodeSig2 is a cached instance of the second node signature.
- nodeSig2 *btcec.Signature
-
- // NodeSig2Bytes are the raw bytes of the second node signature
- // encoded in DER format.
- NodeSig2Bytes []byte
-
- // bitcoinSig1 is a cached instance of the first bitcoin signature.
- bitcoinSig1 *btcec.Signature
-
- // BitcoinSig1Bytes are the raw bytes of the first bitcoin signature
- // encoded in DER format.
- BitcoinSig1Bytes []byte
-
- // bitcoinSig2 is a cached instance of the second bitcoin signature.
- bitcoinSig2 *btcec.Signature
-
- // BitcoinSig2Bytes are the raw bytes of the second bitcoin signature
- // encoded in DER format.
- BitcoinSig2Bytes []byte
-}
-
-// Node1Sig is the signature using the identity key of the node that is first
-// in a lexicographical ordering of the serialized public keys of the two nodes
-// that created the channel.
-//
-// NOTE: By having this method to access an attribute, we ensure we only need
-// to fully deserialize the signature if absolutely necessary.
-func (c *ChannelAuthProof) Node1Sig() (*btcec.Signature, er.R) {
- if c.nodeSig1 != nil {
- return c.nodeSig1, nil
- }
-
- sig, err := btcec.ParseSignature(c.NodeSig1Bytes, btcec.S256())
- if err != nil {
- return nil, err
- }
-
- c.nodeSig1 = sig
-
- return sig, nil
-}
-
-// Node2Sig is the signature using the identity key of the node that is second
-// in a lexicographical ordering of the serialized public keys of the two nodes
-// that created the channel.
-//
-// NOTE: By having this method to access an attribute, we ensure we only need
-// to fully deserialize the signature if absolutely necessary.
-func (c *ChannelAuthProof) Node2Sig() (*btcec.Signature, er.R) {
- if c.nodeSig2 != nil {
- return c.nodeSig2, nil
- }
-
- sig, err := btcec.ParseSignature(c.NodeSig2Bytes, btcec.S256())
- if err != nil {
- return nil, err
- }
-
- c.nodeSig2 = sig
-
- return sig, nil
-}
-
-// BitcoinSig1 is the signature using the public key of the first node that was
-// used in the channel's multi-sig output.
-//
-// NOTE: By having this method to access an attribute, we ensure we only need
-// to fully deserialize the signature if absolutely necessary.
-func (c *ChannelAuthProof) BitcoinSig1() (*btcec.Signature, er.R) {
- if c.bitcoinSig1 != nil {
- return c.bitcoinSig1, nil
- }
-
- sig, err := btcec.ParseSignature(c.BitcoinSig1Bytes, btcec.S256())
- if err != nil {
- return nil, err
- }
-
- c.bitcoinSig1 = sig
-
- return sig, nil
-}
-
-// BitcoinSig2 is the signature using the public key of the second node that
-// was used in the channel's multi-sig output.
-//
-// NOTE: By having this method to access an attribute, we ensure we only need
-// to fully deserialize the signature if absolutely necessary.
-func (c *ChannelAuthProof) BitcoinSig2() (*btcec.Signature, er.R) {
- if c.bitcoinSig2 != nil {
- return c.bitcoinSig2, nil
- }
-
- sig, err := btcec.ParseSignature(c.BitcoinSig2Bytes, btcec.S256())
- if err != nil {
- return nil, err
- }
-
- c.bitcoinSig2 = sig
-
- return sig, nil
-}
-
-// IsEmpty check is the authentication proof is empty Proof is empty if at
-// least one of the signatures are equal to nil.
-func (c *ChannelAuthProof) IsEmpty() bool {
- return len(c.NodeSig1Bytes) == 0 ||
- len(c.NodeSig2Bytes) == 0 ||
- len(c.BitcoinSig1Bytes) == 0 ||
- len(c.BitcoinSig2Bytes) == 0
-}
-
-// ChannelEdgePolicy represents a *directed* edge within the channel graph. For
-// each channel in the database, there are two distinct edges: one for each
-// possible direction of travel along the channel. The edges themselves hold
-// information concerning fees, and minimum time-lock information which is
-// utilized during path finding.
-type ChannelEdgePolicy struct {
- // SigBytes is the raw bytes of the signature of the channel edge
- // policy. We'll only parse these if the caller needs to access the
- // signature for validation purposes. Do not set SigBytes directly, but
- // use SetSigBytes instead to make sure that the cache is invalidated.
- SigBytes []byte
-
- // sig is a cached fully parsed signature.
- sig *btcec.Signature
-
- // ChannelID is the unique channel ID for the channel. The first 3
- // bytes are the block height, the next 3 the index within the block,
- // and the last 2 bytes are the output index for the channel.
- ChannelID uint64
-
- // LastUpdate is the last time an authenticated edge for this channel
- // was received.
- LastUpdate time.Time
-
- // MessageFlags is a bitfield which indicates the presence of optional
- // fields (like max_htlc) in the policy.
- MessageFlags lnwire.ChanUpdateMsgFlags
-
- // ChannelFlags is a bitfield which signals the capabilities of the
- // channel as well as the directed edge this update applies to.
- ChannelFlags lnwire.ChanUpdateChanFlags
-
- // TimeLockDelta is the number of blocks this node will subtract from
- // the expiry of an incoming HTLC. This value expresses the time buffer
- // the node would like to HTLC exchanges.
- TimeLockDelta uint16
-
- // MinHTLC is the smallest value HTLC this node will forward, expressed
- // in millisatoshi.
- MinHTLC lnwire.MilliSatoshi
-
- // MaxHTLC is the largest value HTLC this node will forward, expressed
- // in millisatoshi.
- MaxHTLC lnwire.MilliSatoshi
-
- // FeeBaseMSat is the base HTLC fee that will be charged for forwarding
- // ANY HTLC, expressed in mSAT's.
- FeeBaseMSat lnwire.MilliSatoshi
-
- // FeeProportionalMillionths is the rate that the node will charge for
- // HTLCs for each millionth of a satoshi forwarded.
- FeeProportionalMillionths lnwire.MilliSatoshi
-
- // Node is the LightningNode that this directed edge leads to. Using
- // this pointer the channel graph can further be traversed.
- Node *LightningNode
-
- // ExtraOpaqueData is the set of data that was appended to this
- // message, some of which we may not actually know how to iterate or
- // parse. By holding onto this data, we ensure that we're able to
- // properly validate the set of signatures that cover these new fields,
- // and ensure we're able to make upgrades to the network in a forwards
- // compatible manner.
- ExtraOpaqueData []byte
-
- db *DB
-}
-
-// Signature is a channel announcement signature, which is needed for proper
-// edge policy announcement.
-//
-// NOTE: By having this method to access an attribute, we ensure we only need
-// to fully deserialize the signature if absolutely necessary.
-func (c *ChannelEdgePolicy) Signature() (*btcec.Signature, er.R) {
- if c.sig != nil {
- return c.sig, nil
- }
-
- sig, err := btcec.ParseSignature(c.SigBytes, btcec.S256())
- if err != nil {
- return nil, err
- }
-
- c.sig = sig
-
- return sig, nil
-}
-
-// SetSigBytes updates the signature and invalidates the cached parsed
-// signature.
-func (c *ChannelEdgePolicy) SetSigBytes(sig []byte) {
- c.SigBytes = sig
- c.sig = nil
-}
-
-// IsDisabled determines whether the edge has the disabled bit set.
-func (c *ChannelEdgePolicy) IsDisabled() bool {
- return c.ChannelFlags&lnwire.ChanUpdateDisabled ==
- lnwire.ChanUpdateDisabled
-}
-
-// ComputeFee computes the fee to forward an HTLC of `amt` milli-satoshis over
-// the passed active payment channel. This value is currently computed as
-// specified in BOLT07, but will likely change in the near future.
-func (c *ChannelEdgePolicy) ComputeFee(
- amt lnwire.MilliSatoshi) lnwire.MilliSatoshi {
-
- return c.FeeBaseMSat + (amt*c.FeeProportionalMillionths)/feeRateParts
-}
-
-// divideCeil divides dividend by factor and rounds the result up.
-func divideCeil(dividend, factor lnwire.MilliSatoshi) lnwire.MilliSatoshi {
- return (dividend + factor - 1) / factor
-}
-
-// ComputeFeeFromIncoming computes the fee to forward an HTLC given the incoming
-// amount.
-func (c *ChannelEdgePolicy) ComputeFeeFromIncoming(
- incomingAmt lnwire.MilliSatoshi) lnwire.MilliSatoshi {
-
- return incomingAmt - divideCeil(
- feeRateParts*(incomingAmt-c.FeeBaseMSat),
- feeRateParts+c.FeeProportionalMillionths,
- )
-}
-
-// FetchChannelEdgesByOutpoint attempts to lookup the two directed edges for
-// the channel identified by the funding outpoint. If the channel can't be
-// found, then ErrEdgeNotFound is returned. A struct which houses the general
-// information for the channel itself is returned as well as two structs that
-// contain the routing policies for the channel in either direction.
-func (c *ChannelGraph) FetchChannelEdgesByOutpoint(op *wire.OutPoint,
-) (*ChannelEdgeInfo, *ChannelEdgePolicy, *ChannelEdgePolicy, er.R) {
-
- var (
- edgeInfo *ChannelEdgeInfo
- policy1 *ChannelEdgePolicy
- policy2 *ChannelEdgePolicy
- )
-
- err := kvdb.View(c.db, func(tx kvdb.RTx) er.R {
- // First, grab the node bucket. This will be used to populate
- // the Node pointers in each edge read from disk.
- nodes := tx.ReadBucket(nodeBucket)
- if nodes == nil {
- return ErrGraphNotFound.Default()
- }
-
- // Next, grab the edge bucket which stores the edges, and also
- // the index itself so we can group the directed edges together
- // logically.
- edges := tx.ReadBucket(edgeBucket)
- if edges == nil {
- return ErrGraphNoEdgesFound.Default()
- }
- edgeIndex := edges.NestedReadBucket(edgeIndexBucket)
- if edgeIndex == nil {
- return ErrGraphNoEdgesFound.Default()
- }
-
- // If the channel's outpoint doesn't exist within the outpoint
- // index, then the edge does not exist.
- chanIndex := edges.NestedReadBucket(channelPointBucket)
- if chanIndex == nil {
- return ErrGraphNoEdgesFound.Default()
- }
- var b bytes.Buffer
- if err := writeOutpoint(&b, op); err != nil {
- return err
- }
- chanID := chanIndex.Get(b.Bytes())
- if chanID == nil {
- return ErrEdgeNotFound.Default()
- }
-
- // If the channel is found to exists, then we'll first retrieve
- // the general information for the channel.
- edge, err := fetchChanEdgeInfo(edgeIndex, chanID)
- if err != nil {
- return err
- }
- edgeInfo = &edge
- edgeInfo.db = c.db
-
- // Once we have the information about the channels' parameters,
- // we'll fetch the routing policies for each for the directed
- // edges.
- e1, e2, err := fetchChanEdgePolicies(
- edgeIndex, edges, nodes, chanID, c.db,
- )
- if err != nil {
- return err
- }
-
- policy1 = e1
- policy2 = e2
- return nil
- }, func() {
- edgeInfo = nil
- policy1 = nil
- policy2 = nil
- })
- if err != nil {
- return nil, nil, nil, err
- }
-
- return edgeInfo, policy1, policy2, nil
-}
-
-// FetchChannelEdgesByID attempts to lookup the two directed edges for the
-// channel identified by the channel ID. If the channel can't be found, then
-// ErrEdgeNotFound is returned. A struct which houses the general information
-// for the channel itself is returned as well as two structs that contain the
-// routing policies for the channel in either direction.
-//
-// ErrZombieEdge an be returned if the edge is currently marked as a zombie
-// within the database. In this case, the ChannelEdgePolicy's will be nil, and
-// the ChannelEdgeInfo will only include the public keys of each node.
-func (c *ChannelGraph) FetchChannelEdgesByID(chanID uint64,
-) (*ChannelEdgeInfo, *ChannelEdgePolicy, *ChannelEdgePolicy, er.R) {
-
- var (
- edgeInfo *ChannelEdgeInfo
- policy1 *ChannelEdgePolicy
- policy2 *ChannelEdgePolicy
- channelID [8]byte
- )
-
- err := kvdb.View(c.db, func(tx kvdb.RTx) er.R {
- // First, grab the node bucket. This will be used to populate
- // the Node pointers in each edge read from disk.
- nodes := tx.ReadBucket(nodeBucket)
- if nodes == nil {
- return ErrGraphNotFound.Default()
- }
-
- // Next, grab the edge bucket which stores the edges, and also
- // the index itself so we can group the directed edges together
- // logically.
- edges := tx.ReadBucket(edgeBucket)
- if edges == nil {
- return ErrGraphNoEdgesFound.Default()
- }
- edgeIndex := edges.NestedReadBucket(edgeIndexBucket)
- if edgeIndex == nil {
- return ErrGraphNoEdgesFound.Default()
- }
-
- byteOrder.PutUint64(channelID[:], chanID)
-
- // Now, attempt to fetch edge.
- edge, err := fetchChanEdgeInfo(edgeIndex, channelID[:])
-
- // If it doesn't exist, we'll quickly check our zombie index to
- // see if we've previously marked it as so.
- if ErrEdgeNotFound.Is(err) {
- // If the zombie index doesn't exist, or the edge is not
- // marked as a zombie within it, then we'll return the
- // original ErrEdgeNotFound error.
- zombieIndex := edges.NestedReadBucket(zombieBucket)
- if zombieIndex == nil {
- return ErrEdgeNotFound.Default()
- }
-
- isZombie, pubKey1, pubKey2 := isZombieEdge(
- zombieIndex, chanID,
- )
- if !isZombie {
- return ErrEdgeNotFound.Default()
- }
-
- // Otherwise, the edge is marked as a zombie, so we'll
- // populate the edge info with the public keys of each
- // party as this is the only information we have about
- // it and return an error signaling so.
- edgeInfo = &ChannelEdgeInfo{
- NodeKey1Bytes: pubKey1,
- NodeKey2Bytes: pubKey2,
- }
- return ErrZombieEdge.Default()
- }
-
- // Otherwise, we'll just return the error if any.
- if err != nil {
- return err
- }
-
- edgeInfo = &edge
- edgeInfo.db = c.db
-
- // Then we'll attempt to fetch the accompanying policies of this
- // edge.
- e1, e2, err := fetchChanEdgePolicies(
- edgeIndex, edges, nodes, channelID[:], c.db,
- )
- if err != nil {
- return err
- }
-
- policy1 = e1
- policy2 = e2
- return nil
- }, func() {
- edgeInfo = nil
- policy1 = nil
- policy2 = nil
- })
- if ErrZombieEdge.Is(err) {
- return edgeInfo, nil, nil, err
- }
- if err != nil {
- return nil, nil, nil, err
- }
-
- return edgeInfo, policy1, policy2, nil
-}
-
-// IsPublicNode is a helper method that determines whether the node with the
-// given public key is seen as a public node in the graph from the graph's
-// source node's point of view.
-func (c *ChannelGraph) IsPublicNode(pubKey [33]byte) (bool, er.R) {
- var nodeIsPublic bool
- err := kvdb.View(c.db, func(tx kvdb.RTx) er.R {
- nodes := tx.ReadBucket(nodeBucket)
- if nodes == nil {
- return ErrGraphNodesNotFound.Default()
- }
- ourPubKey := nodes.Get(sourceKey)
- if ourPubKey == nil {
- return ErrSourceNodeNotSet.Default()
- }
- node, err := fetchLightningNode(nodes, pubKey[:])
- if err != nil {
- return err
- }
-
- nodeIsPublic, err = node.isPublic(tx, ourPubKey)
- return err
- }, func() {
- nodeIsPublic = false
- })
- if err != nil {
- return false, err
- }
-
- return nodeIsPublic, nil
-}
-
-// genMultiSigP2WSH generates the p2wsh'd multisig script for 2 of 2 pubkeys.
-func genMultiSigP2WSH(aPub, bPub []byte) ([]byte, er.R) {
- if len(aPub) != 33 || len(bPub) != 33 {
- return nil, er.Errorf("pubkey size error. Compressed " +
- "pubkeys only")
- }
-
- // Swap to sort pubkeys if needed. Keys are sorted in lexicographical
- // order. The signatures within the scriptSig must also adhere to the
- // order, ensuring that the signatures for each public key appears in
- // the proper order on the stack.
- if bytes.Compare(aPub, bPub) == 1 {
- aPub, bPub = bPub, aPub
- }
-
- // First, we'll generate the witness script for the multi-sig.
- bldr := scriptbuilder.NewScriptBuilder()
- bldr.AddOp(opcode.OP_2)
- bldr.AddData(aPub) // Add both pubkeys (sorted).
- bldr.AddData(bPub)
- bldr.AddOp(opcode.OP_2)
- bldr.AddOp(opcode.OP_CHECKMULTISIG)
- witnessScript, err := bldr.Script()
- if err != nil {
- return nil, err
- }
-
- // With the witness script generated, we'll now turn it into a p2sh
- // script:
- // * OP_0
- bldr = scriptbuilder.NewScriptBuilder()
- bldr.AddOp(opcode.OP_0)
- scriptHash := sha256.Sum256(witnessScript)
- bldr.AddData(scriptHash[:])
-
- return bldr.Script()
-}
-
-// EdgePoint couples the outpoint of a channel with the funding script that it
-// creates. The FilteredChainView will use this to watch for spends of this
-// edge point on chain. We require both of these values as depending on the
-// concrete implementation, either the pkScript, or the out point will be used.
-type EdgePoint struct {
- // FundingPkScript is the p2wsh multi-sig script of the target channel.
- FundingPkScript []byte
-
- // OutPoint is the outpoint of the target channel.
- OutPoint wire.OutPoint
-}
-
-// String returns a human readable version of the target EdgePoint. We return
-// the outpoint directly as it is enough to uniquely identify the edge point.
-func (e *EdgePoint) String() string {
- return e.OutPoint.String()
-}
-
-// ChannelView returns the verifiable edge information for each active channel
-// within the known channel graph. The set of UTXO's (along with their scripts)
-// returned are the ones that need to be watched on chain to detect channel
-// closes on the resident blockchain.
-func (c *ChannelGraph) ChannelView() ([]EdgePoint, er.R) {
- var edgePoints []EdgePoint
- if err := kvdb.View(c.db, func(tx kvdb.RTx) er.R {
- // We're going to iterate over the entire channel index, so
- // we'll need to fetch the edgeBucket to get to the index as
- // it's a sub-bucket.
- edges := tx.ReadBucket(edgeBucket)
- if edges == nil {
- return ErrGraphNoEdgesFound.Default()
- }
- chanIndex := edges.NestedReadBucket(channelPointBucket)
- if chanIndex == nil {
- return ErrGraphNoEdgesFound.Default()
- }
- edgeIndex := edges.NestedReadBucket(edgeIndexBucket)
- if edgeIndex == nil {
- return ErrGraphNoEdgesFound.Default()
- }
-
- // Once we have the proper bucket, we'll range over each key
- // (which is the channel point for the channel) and decode it,
- // accumulating each entry.
- return chanIndex.ForEach(func(chanPointBytes, chanID []byte) er.R {
- chanPointReader := bytes.NewReader(chanPointBytes)
-
- var chanPoint wire.OutPoint
- err := readOutpoint(chanPointReader, &chanPoint)
- if err != nil {
- return err
- }
-
- edgeInfo, err := fetchChanEdgeInfo(
- edgeIndex, chanID,
- )
- if err != nil {
- return err
- }
-
- pkScript, err := genMultiSigP2WSH(
- edgeInfo.BitcoinKey1Bytes[:],
- edgeInfo.BitcoinKey2Bytes[:],
- )
- if err != nil {
- return err
- }
-
- edgePoints = append(edgePoints, EdgePoint{
- FundingPkScript: pkScript,
- OutPoint: chanPoint,
- })
-
- return nil
- })
- }, func() {
- edgePoints = nil
- }); err != nil {
- return nil, err
- }
-
- return edgePoints, nil
-}
-
-// NewChannelEdgePolicy returns a new blank ChannelEdgePolicy.
-func (c *ChannelGraph) NewChannelEdgePolicy() *ChannelEdgePolicy {
- return &ChannelEdgePolicy{db: c.db}
-}
-
-// markEdgeZombie marks an edge as a zombie within our zombie index. The public
-// keys should represent the node public keys of the two parties involved in the
-// edge.
-func markEdgeZombie(zombieIndex kvdb.RwBucket, chanID uint64, pubKey1,
- pubKey2 [33]byte) er.R {
-
- var k [8]byte
- byteOrder.PutUint64(k[:], chanID)
-
- var v [66]byte
- copy(v[:33], pubKey1[:])
- copy(v[33:], pubKey2[:])
-
- return zombieIndex.Put(k[:], v[:])
-}
-
-// MarkEdgeLive clears an edge from our zombie index, deeming it as live.
-func (c *ChannelGraph) MarkEdgeLive(chanID uint64) er.R {
- c.cacheMu.Lock()
- defer c.cacheMu.Unlock()
-
- err := kvdb.Update(c.db, func(tx kvdb.RwTx) er.R {
- edges := tx.ReadWriteBucket(edgeBucket)
- if edges == nil {
- return ErrGraphNoEdgesFound.Default()
- }
- zombieIndex := edges.NestedReadWriteBucket(zombieBucket)
- if zombieIndex == nil {
- return nil
- }
-
- var k [8]byte
- byteOrder.PutUint64(k[:], chanID)
- return zombieIndex.Delete(k[:])
- }, func() {})
- if err != nil {
- return err
- }
-
- c.rejectCache.remove(chanID)
- c.chanCache.remove(chanID)
-
- return nil
-}
-
-// IsZombieEdge returns whether the edge is considered zombie. If it is a
-// zombie, then the two node public keys corresponding to this edge are also
-// returned.
-func (c *ChannelGraph) IsZombieEdge(chanID uint64) (bool, [33]byte, [33]byte) {
- var (
- isZombie bool
- pubKey1, pubKey2 [33]byte
- )
-
- err := kvdb.View(c.db, func(tx kvdb.RTx) er.R {
- edges := tx.ReadBucket(edgeBucket)
- if edges == nil {
- return ErrGraphNoEdgesFound.Default()
- }
- zombieIndex := edges.NestedReadBucket(zombieBucket)
- if zombieIndex == nil {
- return nil
- }
-
- isZombie, pubKey1, pubKey2 = isZombieEdge(zombieIndex, chanID)
- return nil
- }, func() {
- isZombie = false
- pubKey1 = [33]byte{}
- pubKey2 = [33]byte{}
- })
- if err != nil {
- return false, [33]byte{}, [33]byte{}
- }
-
- return isZombie, pubKey1, pubKey2
-}
-
-// isZombieEdge returns whether an entry exists for the given channel in the
-// zombie index. If an entry exists, then the two node public keys corresponding
-// to this edge are also returned.
-func isZombieEdge(zombieIndex kvdb.RBucket,
- chanID uint64) (bool, [33]byte, [33]byte) {
-
- var k [8]byte
- byteOrder.PutUint64(k[:], chanID)
-
- v := zombieIndex.Get(k[:])
- if v == nil {
- return false, [33]byte{}, [33]byte{}
- }
-
- var pubKey1, pubKey2 [33]byte
- copy(pubKey1[:], v[:33])
- copy(pubKey2[:], v[33:])
-
- return true, pubKey1, pubKey2
-}
-
-// NumZombies returns the current number of zombie channels in the graph.
-func (c *ChannelGraph) NumZombies() (uint64, er.R) {
- var numZombies uint64
- err := kvdb.View(c.db, func(tx kvdb.RTx) er.R {
- edges := tx.ReadBucket(edgeBucket)
- if edges == nil {
- return nil
- }
- zombieIndex := edges.NestedReadBucket(zombieBucket)
- if zombieIndex == nil {
- return nil
- }
-
- return zombieIndex.ForEach(func(_, _ []byte) er.R {
- numZombies++
- return nil
- })
- }, func() {
- numZombies = 0
- })
- if err != nil {
- return 0, err
- }
-
- return numZombies, nil
-}
-
-func putLightningNode(nodeBucket kvdb.RwBucket, aliasBucket kvdb.RwBucket, // nolint:dupl
- updateIndex kvdb.RwBucket, node *LightningNode) er.R {
-
- var (
- scratch [16]byte
- b bytes.Buffer
- )
-
- pub, err := node.PubKey()
- if err != nil {
- return err
- }
- nodePub := pub.SerializeCompressed()
-
- // If the node has the update time set, write it, else write 0.
- updateUnix := uint64(0)
- if node.LastUpdate.Unix() > 0 {
- updateUnix = uint64(node.LastUpdate.Unix())
- }
-
- byteOrder.PutUint64(scratch[:8], updateUnix)
- if _, err := b.Write(scratch[:8]); err != nil {
- return er.E(err)
- }
-
- if _, err := b.Write(nodePub); err != nil {
- return er.E(err)
- }
-
- // If we got a node announcement for this node, we will have the rest
- // of the data available. If not we don't have more data to write.
- if !node.HaveNodeAnnouncement {
- // Write HaveNodeAnnouncement=0.
- byteOrder.PutUint16(scratch[:2], 0)
- if _, err := b.Write(scratch[:2]); err != nil {
- return er.E(err)
- }
-
- return nodeBucket.Put(nodePub, b.Bytes())
- }
-
- // Write HaveNodeAnnouncement=1.
- byteOrder.PutUint16(scratch[:2], 1)
- if _, err := b.Write(scratch[:2]); err != nil {
- return er.E(err)
- }
-
- if err := util.WriteBin(&b, byteOrder, node.Color.R); err != nil {
- return err
- }
- if err := util.WriteBin(&b, byteOrder, node.Color.G); err != nil {
- return err
- }
- if err := util.WriteBin(&b, byteOrder, node.Color.B); err != nil {
- return err
- }
-
- if err := wire.WriteVarString(&b, 0, node.Alias); err != nil {
- return err
- }
-
- if err := node.Features.Encode(&b); err != nil {
- return err
- }
-
- numAddresses := uint16(len(node.Addresses))
- byteOrder.PutUint16(scratch[:2], numAddresses)
- if _, err := b.Write(scratch[:2]); err != nil {
- return er.E(err)
- }
-
- for _, address := range node.Addresses {
- if err := serializeAddr(&b, address); err != nil {
- return err
- }
- }
-
- sigLen := len(node.AuthSigBytes)
- if sigLen > 80 {
- return er.Errorf("max sig len allowed is 80, had %v",
- sigLen)
- }
-
- err = wire.WriteVarBytes(&b, 0, node.AuthSigBytes)
- if err != nil {
- return err
- }
-
- if len(node.ExtraOpaqueData) > MaxAllowedExtraOpaqueBytes {
- return ErrTooManyExtraOpaqueBytes.New(
- fmt.Sprintf("%d", len(node.ExtraOpaqueData)), nil)
- }
- err = wire.WriteVarBytes(&b, 0, node.ExtraOpaqueData)
- if err != nil {
- return err
- }
-
- if err := aliasBucket.Put(nodePub, []byte(node.Alias)); err != nil {
- return err
- }
-
- // With the alias bucket updated, we'll now update the index that
- // tracks the time series of node updates.
- var indexKey [8 + 33]byte
- byteOrder.PutUint64(indexKey[:8], updateUnix)
- copy(indexKey[8:], nodePub)
-
- // If there was already an old index entry for this node, then we'll
- // delete the old one before we write the new entry.
- if nodeBytes := nodeBucket.Get(nodePub); nodeBytes != nil {
- // Extract out the old update time to we can reconstruct the
- // prior index key to delete it from the index.
- oldUpdateTime := nodeBytes[:8]
-
- var oldIndexKey [8 + 33]byte
- copy(oldIndexKey[:8], oldUpdateTime)
- copy(oldIndexKey[8:], nodePub)
-
- if err := updateIndex.Delete(oldIndexKey[:]); err != nil {
- return err
- }
- }
-
- if err := updateIndex.Put(indexKey[:], nil); err != nil {
- return err
- }
-
- return nodeBucket.Put(nodePub, b.Bytes())
-}
-
-func fetchLightningNode(nodeBucket kvdb.RBucket,
- nodePub []byte) (LightningNode, er.R) {
-
- nodeBytes := nodeBucket.Get(nodePub)
- if nodeBytes == nil {
- return LightningNode{}, ErrGraphNodeNotFound.Default()
- }
-
- nodeReader := bytes.NewReader(nodeBytes)
- return deserializeLightningNode(nodeReader)
-}
-
-func deserializeLightningNode(r io.Reader) (LightningNode, er.R) {
- var (
- node LightningNode
- scratch [8]byte
- err er.R
- )
-
- // Always populate a feature vector, even if we don't have a node
- // announcement and short circuit below.
- node.Features = lnwire.EmptyFeatureVector()
-
- if _, err := r.Read(scratch[:]); err != nil {
- return LightningNode{}, er.E(err)
- }
-
- unix := int64(byteOrder.Uint64(scratch[:]))
- node.LastUpdate = time.Unix(unix, 0)
-
- if _, err := util.ReadFull(r, node.PubKeyBytes[:]); err != nil {
- return LightningNode{}, err
- }
-
- if _, err := r.Read(scratch[:2]); err != nil {
- return LightningNode{}, er.E(err)
- }
-
- hasNodeAnn := byteOrder.Uint16(scratch[:2])
- if hasNodeAnn == 1 {
- node.HaveNodeAnnouncement = true
- } else {
- node.HaveNodeAnnouncement = false
- }
-
- // The rest of the data is optional, and will only be there if we got a node
- // announcement for this node.
- if !node.HaveNodeAnnouncement {
- return node, nil
- }
-
- // We did get a node announcement for this node, so we'll have the rest
- // of the data available.
- if err := util.ReadBin(r, byteOrder, &node.Color.R); err != nil {
- return LightningNode{}, err
- }
- if err := util.ReadBin(r, byteOrder, &node.Color.G); err != nil {
- return LightningNode{}, err
- }
- if err := util.ReadBin(r, byteOrder, &node.Color.B); err != nil {
- return LightningNode{}, err
- }
-
- node.Alias, err = wire.ReadVarString(r, 0)
- if err != nil {
- return LightningNode{}, err
- }
-
- err = node.Features.Decode(r)
- if err != nil {
- return LightningNode{}, err
- }
-
- if _, err := r.Read(scratch[:2]); err != nil {
- return LightningNode{}, er.E(err)
- }
- numAddresses := int(byteOrder.Uint16(scratch[:2]))
-
- var addresses []net.Addr
- for i := 0; i < numAddresses; i++ {
- address, err := deserializeAddr(r)
- if err != nil {
- return LightningNode{}, err
- }
- addresses = append(addresses, address)
- }
- node.Addresses = addresses
-
- node.AuthSigBytes, err = wire.ReadVarBytes(r, 0, 80, "sig")
- if err != nil {
- return LightningNode{}, err
- }
-
- // We'll try and see if there are any opaque bytes left, if not, then
- // we'll ignore the EOF error and return the node as is.
- node.ExtraOpaqueData, err = wire.ReadVarBytes(
- r, 0, MaxAllowedExtraOpaqueBytes, "blob",
- )
- switch {
- case er.Wrapped(err) == io.ErrUnexpectedEOF:
- case er.Wrapped(err) == io.EOF:
- case err != nil:
- return LightningNode{}, err
- }
-
- return node, nil
-}
-
-func putChanEdgeInfo(edgeIndex kvdb.RwBucket, edgeInfo *ChannelEdgeInfo, chanID [8]byte) er.R {
- var b bytes.Buffer
-
- if _, err := b.Write(edgeInfo.NodeKey1Bytes[:]); err != nil {
- return er.E(err)
- }
- if _, err := b.Write(edgeInfo.NodeKey2Bytes[:]); err != nil {
- return er.E(err)
- }
- if _, err := b.Write(edgeInfo.BitcoinKey1Bytes[:]); err != nil {
- return er.E(err)
- }
- if _, err := b.Write(edgeInfo.BitcoinKey2Bytes[:]); err != nil {
- return er.E(err)
- }
-
- if err := wire.WriteVarBytes(&b, 0, edgeInfo.Features); err != nil {
- return err
- }
-
- authProof := edgeInfo.AuthProof
- var nodeSig1, nodeSig2, bitcoinSig1, bitcoinSig2 []byte
- if authProof != nil {
- nodeSig1 = authProof.NodeSig1Bytes
- nodeSig2 = authProof.NodeSig2Bytes
- bitcoinSig1 = authProof.BitcoinSig1Bytes
- bitcoinSig2 = authProof.BitcoinSig2Bytes
- }
-
- if err := wire.WriteVarBytes(&b, 0, nodeSig1); err != nil {
- return err
- }
- if err := wire.WriteVarBytes(&b, 0, nodeSig2); err != nil {
- return err
- }
- if err := wire.WriteVarBytes(&b, 0, bitcoinSig1); err != nil {
- return err
- }
- if err := wire.WriteVarBytes(&b, 0, bitcoinSig2); err != nil {
- return err
- }
-
- if err := writeOutpoint(&b, &edgeInfo.ChannelPoint); err != nil {
- return err
- }
- if err := util.WriteBin(&b, byteOrder, uint64(edgeInfo.Capacity)); err != nil {
- return err
- }
- if _, err := b.Write(chanID[:]); err != nil {
- return er.E(err)
- }
- if _, err := b.Write(edgeInfo.ChainHash[:]); err != nil {
- return er.E(err)
- }
-
- if len(edgeInfo.ExtraOpaqueData) > MaxAllowedExtraOpaqueBytes {
- return ErrTooManyExtraOpaqueBytes.New(
- fmt.Sprintf("%d", len(edgeInfo.ExtraOpaqueData)), nil)
- }
- err := wire.WriteVarBytes(&b, 0, edgeInfo.ExtraOpaqueData)
- if err != nil {
- return err
- }
-
- return edgeIndex.Put(chanID[:], b.Bytes())
-}
-
-func fetchChanEdgeInfo(edgeIndex kvdb.RBucket,
- chanID []byte) (ChannelEdgeInfo, er.R) {
-
- edgeInfoBytes := edgeIndex.Get(chanID)
- if edgeInfoBytes == nil {
- return ChannelEdgeInfo{}, ErrEdgeNotFound.Default()
- }
-
- edgeInfoReader := bytes.NewReader(edgeInfoBytes)
- return deserializeChanEdgeInfo(edgeInfoReader)
-}
-
-func deserializeChanEdgeInfo(r io.Reader) (ChannelEdgeInfo, er.R) {
- var (
- err er.R
- edgeInfo ChannelEdgeInfo
- )
-
- if _, err := util.ReadFull(r, edgeInfo.NodeKey1Bytes[:]); err != nil {
- return ChannelEdgeInfo{}, err
- }
- if _, err := util.ReadFull(r, edgeInfo.NodeKey2Bytes[:]); err != nil {
- return ChannelEdgeInfo{}, err
- }
- if _, err := util.ReadFull(r, edgeInfo.BitcoinKey1Bytes[:]); err != nil {
- return ChannelEdgeInfo{}, err
- }
- if _, err := util.ReadFull(r, edgeInfo.BitcoinKey2Bytes[:]); err != nil {
- return ChannelEdgeInfo{}, err
- }
-
- edgeInfo.Features, err = wire.ReadVarBytes(r, 0, 900, "features")
- if err != nil {
- return ChannelEdgeInfo{}, err
- }
-
- proof := &ChannelAuthProof{}
-
- proof.NodeSig1Bytes, err = wire.ReadVarBytes(r, 0, 80, "sigs")
- if err != nil {
- return ChannelEdgeInfo{}, err
- }
- proof.NodeSig2Bytes, err = wire.ReadVarBytes(r, 0, 80, "sigs")
- if err != nil {
- return ChannelEdgeInfo{}, err
- }
- proof.BitcoinSig1Bytes, err = wire.ReadVarBytes(r, 0, 80, "sigs")
- if err != nil {
- return ChannelEdgeInfo{}, err
- }
- proof.BitcoinSig2Bytes, err = wire.ReadVarBytes(r, 0, 80, "sigs")
- if err != nil {
- return ChannelEdgeInfo{}, err
- }
-
- if !proof.IsEmpty() {
- edgeInfo.AuthProof = proof
- }
-
- edgeInfo.ChannelPoint = wire.OutPoint{}
- if err := readOutpoint(r, &edgeInfo.ChannelPoint); err != nil {
- return ChannelEdgeInfo{}, err
- }
- if err := util.ReadBin(r, byteOrder, &edgeInfo.Capacity); err != nil {
- return ChannelEdgeInfo{}, err
- }
- if err := util.ReadBin(r, byteOrder, &edgeInfo.ChannelID); err != nil {
- return ChannelEdgeInfo{}, err
- }
-
- if _, err := util.ReadFull(r, edgeInfo.ChainHash[:]); err != nil {
- return ChannelEdgeInfo{}, err
- }
-
- // We'll try and see if there are any opaque bytes left, if not, then
- // we'll ignore the EOF error and return the edge as is.
- edgeInfo.ExtraOpaqueData, err = wire.ReadVarBytes(
- r, 0, MaxAllowedExtraOpaqueBytes, "blob",
- )
- switch {
- case er.Wrapped(err) == io.ErrUnexpectedEOF:
- case er.Wrapped(err) == io.EOF:
- case err != nil:
- return ChannelEdgeInfo{}, err
- }
-
- return edgeInfo, nil
-}
-
-func putChanEdgePolicy(edges, nodes kvdb.RwBucket, edge *ChannelEdgePolicy,
- from, to []byte) er.R {
-
- var edgeKey [33 + 8]byte
- copy(edgeKey[:], from)
- byteOrder.PutUint64(edgeKey[33:], edge.ChannelID)
-
- var b bytes.Buffer
- if err := serializeChanEdgePolicy(&b, edge, to); err != nil {
- return err
- }
-
- // Before we write out the new edge, we'll create a new entry in the
- // update index in order to keep it fresh.
- updateUnix := uint64(edge.LastUpdate.Unix())
- var indexKey [8 + 8]byte
- byteOrder.PutUint64(indexKey[:8], updateUnix)
- byteOrder.PutUint64(indexKey[8:], edge.ChannelID)
-
- updateIndex, err := edges.CreateBucketIfNotExists(edgeUpdateIndexBucket)
- if err != nil {
- return err
- }
-
- // If there was already an entry for this edge, then we'll need to
- // delete the old one to ensure we don't leave around any after-images.
- // An unknown policy value does not have a update time recorded, so
- // it also does not need to be removed.
- if edgeBytes := edges.Get(edgeKey[:]); edgeBytes != nil &&
- !bytes.Equal(edgeBytes[:], unknownPolicy) {
-
- // In order to delete the old entry, we'll need to obtain the
- // *prior* update time in order to delete it. To do this, we'll
- // need to deserialize the existing policy within the database
- // (now outdated by the new one), and delete its corresponding
- // entry within the update index. We'll ignore any
- // ErrEdgePolicyOptionalFieldNotFound error, as we only need
- // the channel ID and update time to delete the entry.
- // TODO(halseth): get rid of these invalid policies in a
- // migration.
- oldEdgePolicy, err := deserializeChanEdgePolicy(
- bytes.NewReader(edgeBytes), nodes,
- )
- if err != nil && !ErrEdgePolicyOptionalFieldNotFound.Is(err) {
- return err
- }
-
- oldUpdateTime := uint64(oldEdgePolicy.LastUpdate.Unix())
-
- var oldIndexKey [8 + 8]byte
- byteOrder.PutUint64(oldIndexKey[:8], oldUpdateTime)
- byteOrder.PutUint64(oldIndexKey[8:], edge.ChannelID)
-
- if err := updateIndex.Delete(oldIndexKey[:]); err != nil {
- return err
- }
- }
-
- if err := updateIndex.Put(indexKey[:], nil); err != nil {
- return err
- }
-
- updateEdgePolicyDisabledIndex(
- edges, edge.ChannelID,
- edge.ChannelFlags&lnwire.ChanUpdateDirection > 0,
- edge.IsDisabled(),
- )
-
- return edges.Put(edgeKey[:], b.Bytes()[:])
-}
-
-// updateEdgePolicyDisabledIndex is used to update the disabledEdgePolicyIndex
-// bucket by either add a new disabled ChannelEdgePolicy or remove an existing
-// one.
-// The direction represents the direction of the edge and disabled is used for
-// deciding whether to remove or add an entry to the bucket.
-// In general a channel is disabled if two entries for the same chanID exist
-// in this bucket.
-// Maintaining the bucket this way allows a fast retrieval of disabled
-// channels, for example when prune is needed.
-func updateEdgePolicyDisabledIndex(edges kvdb.RwBucket, chanID uint64,
- direction bool, disabled bool) er.R {
-
- var disabledEdgeKey [8 + 1]byte
- byteOrder.PutUint64(disabledEdgeKey[0:], chanID)
- if direction {
- disabledEdgeKey[8] = 1
- }
-
- disabledEdgePolicyIndex, err := edges.CreateBucketIfNotExists(
- disabledEdgePolicyBucket,
- )
- if err != nil {
- return err
- }
-
- if disabled {
- return disabledEdgePolicyIndex.Put(disabledEdgeKey[:], []byte{})
- }
-
- return disabledEdgePolicyIndex.Delete(disabledEdgeKey[:])
-}
-
-// putChanEdgePolicyUnknown marks the edge policy as unknown
-// in the edges bucket.
-func putChanEdgePolicyUnknown(edges kvdb.RwBucket, channelID uint64,
- from []byte) er.R {
-
- var edgeKey [33 + 8]byte
- copy(edgeKey[:], from)
- byteOrder.PutUint64(edgeKey[33:], channelID)
-
- if edges.Get(edgeKey[:]) != nil {
- return er.Errorf("cannot write unknown policy for channel %v "+
- " when there is already a policy present", channelID)
- }
-
- return edges.Put(edgeKey[:], unknownPolicy)
-}
-
-func fetchChanEdgePolicy(edges kvdb.RBucket, chanID []byte,
- nodePub []byte, nodes kvdb.RBucket) (*ChannelEdgePolicy, er.R) {
-
- var edgeKey [33 + 8]byte
- copy(edgeKey[:], nodePub)
- copy(edgeKey[33:], chanID[:])
-
- edgeBytes := edges.Get(edgeKey[:])
- if edgeBytes == nil {
- return nil, ErrEdgeNotFound.Default()
- }
-
- // No need to deserialize unknown policy.
- if bytes.Equal(edgeBytes[:], unknownPolicy) {
- return nil, nil
- }
-
- edgeReader := bytes.NewReader(edgeBytes)
-
- ep, err := deserializeChanEdgePolicy(edgeReader, nodes)
- switch {
- // If the db policy was missing an expected optional field, we return
- // nil as if the policy was unknown.
- case ErrEdgePolicyOptionalFieldNotFound.Is(err):
- return nil, nil
-
- case err != nil:
- return nil, err
- }
-
- return ep, nil
-}
-
-func fetchChanEdgePolicies(edgeIndex kvdb.RBucket, edges kvdb.RBucket,
- nodes kvdb.RBucket, chanID []byte,
- db *DB) (*ChannelEdgePolicy, *ChannelEdgePolicy, er.R) {
-
- edgeInfo := edgeIndex.Get(chanID)
- if edgeInfo == nil {
- return nil, nil, ErrEdgeNotFound.Default()
- }
-
- // The first node is contained within the first half of the edge
- // information. We only propagate the error here and below if it's
- // something other than edge non-existence.
- node1Pub := edgeInfo[:33]
- edge1, err := fetchChanEdgePolicy(edges, chanID, node1Pub, nodes)
- if err != nil {
- return nil, nil, err
- }
-
- // As we may have a single direction of the edge but not the other,
- // only fill in the database pointers if the edge is found.
- if edge1 != nil {
- edge1.db = db
- edge1.Node.db = db
- }
-
- // Similarly, the second node is contained within the latter
- // half of the edge information.
- node2Pub := edgeInfo[33:66]
- edge2, err := fetchChanEdgePolicy(edges, chanID, node2Pub, nodes)
- if err != nil {
- return nil, nil, err
- }
-
- if edge2 != nil {
- edge2.db = db
- edge2.Node.db = db
- }
-
- return edge1, edge2, nil
-}
-
-func serializeChanEdgePolicy(w io.Writer, edge *ChannelEdgePolicy,
- to []byte) er.R {
-
- err := wire.WriteVarBytes(w, 0, edge.SigBytes)
- if err != nil {
- return err
- }
-
- if err := util.WriteBin(w, byteOrder, edge.ChannelID); err != nil {
- return err
- }
-
- var scratch [8]byte
- updateUnix := uint64(edge.LastUpdate.Unix())
- byteOrder.PutUint64(scratch[:], updateUnix)
- if _, err := util.Write(w, scratch[:]); err != nil {
- return err
- }
-
- if err := util.WriteBin(w, byteOrder, edge.MessageFlags); err != nil {
- return err
- }
- if err := util.WriteBin(w, byteOrder, edge.ChannelFlags); err != nil {
- return err
- }
- if err := util.WriteBin(w, byteOrder, edge.TimeLockDelta); err != nil {
- return err
- }
- if err := util.WriteBin(w, byteOrder, uint64(edge.MinHTLC)); err != nil {
- return err
- }
- if err := util.WriteBin(w, byteOrder, uint64(edge.FeeBaseMSat)); err != nil {
- return err
- }
- if err := util.WriteBin(w, byteOrder, uint64(edge.FeeProportionalMillionths)); err != nil {
- return err
- }
-
- if _, err := util.Write(w, to); err != nil {
- return err
- }
-
- // If the max_htlc field is present, we write it. To be compatible with
- // older versions that wasn't aware of this field, we write it as part
- // of the opaque data.
- // TODO(halseth): clean up when moving to TLV.
- var opaqueBuf bytes.Buffer
- if edge.MessageFlags.HasMaxHtlc() {
- err := util.WriteBin(&opaqueBuf, byteOrder, uint64(edge.MaxHTLC))
- if err != nil {
- return err
- }
- }
-
- if len(edge.ExtraOpaqueData) > MaxAllowedExtraOpaqueBytes {
- return ErrTooManyExtraOpaqueBytes.New(
- fmt.Sprintf("%d", len(edge.ExtraOpaqueData)), nil)
- }
- if _, err := opaqueBuf.Write(edge.ExtraOpaqueData); err != nil {
- return er.E(err)
- }
-
- if err := wire.WriteVarBytes(w, 0, opaqueBuf.Bytes()); err != nil {
- return err
- }
- return nil
-}
-
-func deserializeChanEdgePolicy(r io.Reader,
- nodes kvdb.RBucket) (*ChannelEdgePolicy, er.R) {
-
- edge := &ChannelEdgePolicy{}
-
- var err er.R
- edge.SigBytes, err = wire.ReadVarBytes(r, 0, 80, "sig")
- if err != nil {
- return nil, err
- }
-
- if err := util.ReadBin(r, byteOrder, &edge.ChannelID); err != nil {
- return nil, err
- }
-
- var scratch [8]byte
- if _, err := r.Read(scratch[:]); err != nil {
- return nil, er.E(err)
- }
- unix := int64(byteOrder.Uint64(scratch[:]))
- edge.LastUpdate = time.Unix(unix, 0)
-
- if err := util.ReadBin(r, byteOrder, &edge.MessageFlags); err != nil {
- return nil, err
- }
- if err := util.ReadBin(r, byteOrder, &edge.ChannelFlags); err != nil {
- return nil, err
- }
- if err := util.ReadBin(r, byteOrder, &edge.TimeLockDelta); err != nil {
- return nil, err
- }
-
- var n uint64
- if err := util.ReadBin(r, byteOrder, &n); err != nil {
- return nil, err
- }
- edge.MinHTLC = lnwire.MilliSatoshi(n)
-
- if err := util.ReadBin(r, byteOrder, &n); err != nil {
- return nil, err
- }
- edge.FeeBaseMSat = lnwire.MilliSatoshi(n)
-
- if err := util.ReadBin(r, byteOrder, &n); err != nil {
- return nil, err
- }
- edge.FeeProportionalMillionths = lnwire.MilliSatoshi(n)
-
- var pub [33]byte
- if _, err := r.Read(pub[:]); err != nil {
- return nil, er.E(err)
- }
-
- node, err := fetchLightningNode(nodes, pub[:])
- if err != nil {
- return nil, er.Errorf("unable to fetch node: %x, %v",
- pub[:], err)
- }
- edge.Node = &node
-
- // We'll try and see if there are any opaque bytes left, if not, then
- // we'll ignore the EOF error and return the edge as is.
- edge.ExtraOpaqueData, err = wire.ReadVarBytes(
- r, 0, MaxAllowedExtraOpaqueBytes, "blob",
- )
- switch {
- case er.Wrapped(err) == io.ErrUnexpectedEOF:
- case er.Wrapped(err) == io.EOF:
- case err != nil:
- return nil, err
- }
-
- // See if optional fields are present.
- if edge.MessageFlags.HasMaxHtlc() {
- // The max_htlc field should be at the beginning of the opaque
- // bytes.
- opq := edge.ExtraOpaqueData
-
- // If the max_htlc field is not present, it might be old data
- // stored before this field was validated. We'll return the
- // edge along with an error.
- if len(opq) < 8 {
- return edge, ErrEdgePolicyOptionalFieldNotFound.Default()
- }
-
- maxHtlc := byteOrder.Uint64(opq[:8])
- edge.MaxHTLC = lnwire.MilliSatoshi(maxHtlc)
-
- // Exclude the parsed field from the rest of the opaque data.
- edge.ExtraOpaqueData = opq[8:]
- }
-
- return edge, nil
-}
diff --git a/lnd/channeldb/graph_test.go b/lnd/channeldb/graph_test.go
deleted file mode 100644
index 322b161d..00000000
--- a/lnd/channeldb/graph_test.go
+++ /dev/null
@@ -1,3197 +0,0 @@
-package channeldb
-
-import (
- "bytes"
- "crypto/sha256"
- "image/color"
- "math"
- "math/big"
- prand "math/rand"
- "net"
- "reflect"
- "runtime"
- "testing"
- "time"
-
- "github.com/davecgh/go-spew/spew"
- "github.com/pkt-cash/pktd/btcec"
- "github.com/pkt-cash/pktd/btcutil/er"
- "github.com/pkt-cash/pktd/chaincfg/chainhash"
- "github.com/pkt-cash/pktd/lnd/channeldb/kvdb"
- "github.com/pkt-cash/pktd/lnd/lnwire"
- "github.com/pkt-cash/pktd/lnd/routing/route"
- "github.com/pkt-cash/pktd/wire"
-)
-
-var (
- testAddr = &net.TCPAddr{IP: (net.IP)([]byte{0xA, 0x0, 0x0, 0x1}),
- Port: 9000}
- anotherAddr, _ = net.ResolveTCPAddr("tcp",
- "[2001:db8:85a3:0:0:8a2e:370:7334]:80")
- testAddrs = []net.Addr{testAddr, anotherAddr}
-
- testSig = &btcec.Signature{
- R: new(big.Int),
- S: new(big.Int),
- }
- _, _ = testSig.R.SetString("63724406601629180062774974542967536251589935445068131219452686511677818569431", 10)
- _, _ = testSig.S.SetString("18801056069249825825291287104931333862866033135609736119018462340006816851118", 10)
-
- testFeatures = lnwire.NewFeatureVector(nil, lnwire.Features)
-
- testPub = route.Vertex{2, 202, 4}
-)
-
-func createLightningNode(db *DB, priv *btcec.PrivateKey) (*LightningNode, er.R) {
- updateTime := prand.Int63()
-
- pub := priv.PubKey().SerializeCompressed()
- n := &LightningNode{
- HaveNodeAnnouncement: true,
- AuthSigBytes: testSig.Serialize(),
- LastUpdate: time.Unix(updateTime, 0),
- Color: color.RGBA{1, 2, 3, 0},
- Alias: "kek" + string(pub[:]),
- Features: testFeatures,
- Addresses: testAddrs,
- db: db,
- }
- copy(n.PubKeyBytes[:], priv.PubKey().SerializeCompressed())
-
- return n, nil
-}
-
-func createTestVertex(db *DB) (*LightningNode, er.R) {
- priv, err := btcec.NewPrivateKey(btcec.S256())
- if err != nil {
- return nil, err
- }
-
- return createLightningNode(db, priv)
-}
-
-func TestNodeInsertionAndDeletion(t *testing.T) {
- t.Parallel()
-
- db, cleanUp, err := MakeTestDB()
- defer cleanUp()
- if err != nil {
- t.Fatalf("unable to make test database: %v", err)
- }
-
- graph := db.ChannelGraph()
-
- // We'd like to test basic insertion/deletion for vertexes from the
- // graph, so we'll create a test vertex to start with.
- node := &LightningNode{
- HaveNodeAnnouncement: true,
- AuthSigBytes: testSig.Serialize(),
- LastUpdate: time.Unix(1232342, 0),
- Color: color.RGBA{1, 2, 3, 0},
- Alias: "kek",
- Features: testFeatures,
- Addresses: testAddrs,
- ExtraOpaqueData: []byte("extra new data"),
- PubKeyBytes: testPub,
- db: db,
- }
-
- // First, insert the node into the graph DB. This should succeed
- // without any errors.
- if err := graph.AddLightningNode(node); err != nil {
- t.Fatalf("unable to add node: %v", err)
- }
-
- // Next, fetch the node from the database to ensure everything was
- // serialized properly.
- dbNode, err := graph.FetchLightningNode(nil, testPub)
- if err != nil {
- t.Fatalf("unable to locate node: %v", err)
- }
-
- if _, exists, err := graph.HasLightningNode(dbNode.PubKeyBytes); err != nil {
- t.Fatalf("unable to query for node: %v", err)
- } else if !exists {
- t.Fatalf("node should be found but wasn't")
- }
-
- // The two nodes should match exactly!
- if err := compareNodes(node, dbNode); err != nil {
- t.Fatalf("nodes don't match: %v", err)
- }
-
- // Next, delete the node from the graph, this should purge all data
- // related to the node.
- if err := graph.DeleteLightningNode(testPub); err != nil {
- t.Fatalf("unable to delete node; %v", err)
- }
-
- // Finally, attempt to fetch the node again. This should fail as the
- // node should have been deleted from the database.
- _, err = graph.FetchLightningNode(nil, testPub)
- if !ErrGraphNodeNotFound.Is(err) {
- t.Fatalf("fetch after delete should fail!")
- }
-}
-
-// TestPartialNode checks that we can add and retrieve a LightningNode where
-// where only the pubkey is known to the database.
-func TestPartialNode(t *testing.T) {
- t.Parallel()
-
- db, cleanUp, err := MakeTestDB()
- defer cleanUp()
- if err != nil {
- t.Fatalf("unable to make test database: %v", err)
- }
-
- graph := db.ChannelGraph()
-
- // We want to be able to insert nodes into the graph that only has the
- // PubKey set.
- node := &LightningNode{
- HaveNodeAnnouncement: false,
- PubKeyBytes: testPub,
- }
-
- if err := graph.AddLightningNode(node); err != nil {
- t.Fatalf("unable to add node: %v", err)
- }
-
- // Next, fetch the node from the database to ensure everything was
- // serialized properly.
- dbNode, err := graph.FetchLightningNode(nil, testPub)
- if err != nil {
- t.Fatalf("unable to locate node: %v", err)
- }
-
- if _, exists, err := graph.HasLightningNode(dbNode.PubKeyBytes); err != nil {
- t.Fatalf("unable to query for node: %v", err)
- } else if !exists {
- t.Fatalf("node should be found but wasn't")
- }
-
- // The two nodes should match exactly! (with default values for
- // LastUpdate and db set to satisfy compareNodes())
- node = &LightningNode{
- HaveNodeAnnouncement: false,
- LastUpdate: time.Unix(0, 0),
- PubKeyBytes: testPub,
- db: db,
- }
-
- if err := compareNodes(node, dbNode); err != nil {
- t.Fatalf("nodes don't match: %v", err)
- }
-
- // Next, delete the node from the graph, this should purge all data
- // related to the node.
- if err := graph.DeleteLightningNode(testPub); err != nil {
- t.Fatalf("unable to delete node: %v", err)
- }
-
- // Finally, attempt to fetch the node again. This should fail as the
- // node should have been deleted from the database.
- _, err = graph.FetchLightningNode(nil, testPub)
- if !ErrGraphNodeNotFound.Is(err) {
- t.Fatalf("fetch after delete should fail!")
- }
-}
-
-func TestAliasLookup(t *testing.T) {
- t.Parallel()
-
- db, cleanUp, err := MakeTestDB()
- defer cleanUp()
- if err != nil {
- t.Fatalf("unable to make test database: %v", err)
- }
-
- graph := db.ChannelGraph()
-
- // We'd like to test the alias index within the database, so first
- // create a new test node.
- testNode, err := createTestVertex(db)
- if err != nil {
- t.Fatalf("unable to create test node: %v", err)
- }
-
- // Add the node to the graph's database, this should also insert an
- // entry into the alias index for this node.
- if err := graph.AddLightningNode(testNode); err != nil {
- t.Fatalf("unable to add node: %v", err)
- }
-
- // Next, attempt to lookup the alias. The alias should exactly match
- // the one which the test node was assigned.
- nodePub, err := testNode.PubKey()
- if err != nil {
- t.Fatalf("unable to generate pubkey: %v", err)
- }
- dbAlias, err := graph.LookupAlias(nodePub)
- if err != nil {
- t.Fatalf("unable to find alias: %v", err)
- }
- if dbAlias != testNode.Alias {
- t.Fatalf("aliases don't match, expected %v got %v",
- testNode.Alias, dbAlias)
- }
-
- // Ensure that looking up a non-existent alias results in an error.
- node, err := createTestVertex(db)
- if err != nil {
- t.Fatalf("unable to create test node: %v", err)
- }
- nodePub, err = node.PubKey()
- if err != nil {
- t.Fatalf("unable to generate pubkey: %v", err)
- }
- _, err = graph.LookupAlias(nodePub)
- if !ErrNodeAliasNotFound.Is(err) {
- t.Fatalf("alias lookup should fail for non-existent pubkey")
- }
-}
-
-func TestSourceNode(t *testing.T) {
- t.Parallel()
-
- db, cleanUp, err := MakeTestDB()
- defer cleanUp()
- if err != nil {
- t.Fatalf("unable to make test database: %v", err)
- }
-
- graph := db.ChannelGraph()
-
- // We'd like to test the setting/getting of the source node, so we
- // first create a fake node to use within the test.
- testNode, err := createTestVertex(db)
- if err != nil {
- t.Fatalf("unable to create test node: %v", err)
- }
-
- // Attempt to fetch the source node, this should return an error as the
- // source node hasn't yet been set.
- if _, err := graph.SourceNode(); !ErrSourceNodeNotSet.Is(err) {
- t.Fatalf("source node shouldn't be set in new graph")
- }
-
- // Set the source the source node, this should insert the node into the
- // database in a special way indicating it's the source node.
- if err := graph.SetSourceNode(testNode); err != nil {
- t.Fatalf("unable to set source node: %v", err)
- }
-
- // Retrieve the source node from the database, it should exactly match
- // the one we set above.
- sourceNode, err := graph.SourceNode()
- if err != nil {
- t.Fatalf("unable to fetch source node: %v", err)
- }
- if err := compareNodes(testNode, sourceNode); err != nil {
- t.Fatalf("nodes don't match: %v", err)
- }
-}
-
-func TestEdgeInsertionDeletion(t *testing.T) {
- t.Parallel()
-
- db, cleanUp, err := MakeTestDB()
- defer cleanUp()
- if err != nil {
- t.Fatalf("unable to make test database: %v", err)
- }
-
- graph := db.ChannelGraph()
-
- // We'd like to test the insertion/deletion of edges, so we create two
- // vertexes to connect.
- node1, err := createTestVertex(db)
- if err != nil {
- t.Fatalf("unable to create test node: %v", err)
- }
- node2, err := createTestVertex(db)
- if err != nil {
- t.Fatalf("unable to create test node: %v", err)
- }
-
- // In addition to the fake vertexes we create some fake channel
- // identifiers.
- chanID := uint64(prand.Int63())
- outpoint := wire.OutPoint{
- Hash: rev,
- Index: 9,
- }
-
- // Add the new edge to the database, this should proceed without any
- // errors.
- node1Pub, err := node1.PubKey()
- if err != nil {
- t.Fatalf("unable to generate node key: %v", err)
- }
- node2Pub, err := node2.PubKey()
- if err != nil {
- t.Fatalf("unable to generate node key: %v", err)
- }
- edgeInfo := ChannelEdgeInfo{
- ChannelID: chanID,
- ChainHash: key,
- AuthProof: &ChannelAuthProof{
- NodeSig1Bytes: testSig.Serialize(),
- NodeSig2Bytes: testSig.Serialize(),
- BitcoinSig1Bytes: testSig.Serialize(),
- BitcoinSig2Bytes: testSig.Serialize(),
- },
- ChannelPoint: outpoint,
- Capacity: 9000,
- }
- copy(edgeInfo.NodeKey1Bytes[:], node1Pub.SerializeCompressed())
- copy(edgeInfo.NodeKey2Bytes[:], node2Pub.SerializeCompressed())
- copy(edgeInfo.BitcoinKey1Bytes[:], node1Pub.SerializeCompressed())
- copy(edgeInfo.BitcoinKey2Bytes[:], node2Pub.SerializeCompressed())
-
- if err := graph.AddChannelEdge(&edgeInfo); err != nil {
- t.Fatalf("unable to create channel edge: %v", err)
- }
-
- // Ensure that both policies are returned as unknown (nil).
- _, e1, e2, err := graph.FetchChannelEdgesByID(chanID)
- if err != nil {
- t.Fatalf("unable to fetch channel edge")
- }
- if e1 != nil || e2 != nil {
- t.Fatalf("channel edges not unknown")
- }
-
- // Next, attempt to delete the edge from the database, again this
- // should proceed without any issues.
- if err := graph.DeleteChannelEdges(chanID); err != nil {
- t.Fatalf("unable to delete edge: %v", err)
- }
-
- // Ensure that any query attempts to lookup the delete channel edge are
- // properly deleted.
- if _, _, _, err := graph.FetchChannelEdgesByOutpoint(&outpoint); err == nil {
- t.Fatalf("channel edge not deleted")
- }
- if _, _, _, err := graph.FetchChannelEdgesByID(chanID); err == nil {
- t.Fatalf("channel edge not deleted")
- }
- isZombie, _, _ := graph.IsZombieEdge(chanID)
- if !isZombie {
- t.Fatal("channel edge not marked as zombie")
- }
-
- // Finally, attempt to delete a (now) non-existent edge within the
- // database, this should result in an error.
- err = graph.DeleteChannelEdges(chanID)
- if !ErrEdgeNotFound.Is(err) {
- t.Fatalf("deleting a non-existent edge should fail!")
- }
-}
-
-func createEdge(height, txIndex uint32, txPosition uint16, outPointIndex uint32,
- node1, node2 *LightningNode) (ChannelEdgeInfo, lnwire.ShortChannelID) {
-
- shortChanID := lnwire.ShortChannelID{
- BlockHeight: height,
- TxIndex: txIndex,
- TxPosition: txPosition,
- }
- outpoint := wire.OutPoint{
- Hash: rev,
- Index: outPointIndex,
- }
-
- node1Pub, _ := node1.PubKey()
- node2Pub, _ := node2.PubKey()
- edgeInfo := ChannelEdgeInfo{
- ChannelID: shortChanID.ToUint64(),
- ChainHash: key,
- AuthProof: &ChannelAuthProof{
- NodeSig1Bytes: testSig.Serialize(),
- NodeSig2Bytes: testSig.Serialize(),
- BitcoinSig1Bytes: testSig.Serialize(),
- BitcoinSig2Bytes: testSig.Serialize(),
- },
- ChannelPoint: outpoint,
- Capacity: 9000,
- }
-
- copy(edgeInfo.NodeKey1Bytes[:], node1Pub.SerializeCompressed())
- copy(edgeInfo.NodeKey2Bytes[:], node2Pub.SerializeCompressed())
- copy(edgeInfo.BitcoinKey1Bytes[:], node1Pub.SerializeCompressed())
- copy(edgeInfo.BitcoinKey2Bytes[:], node2Pub.SerializeCompressed())
-
- return edgeInfo, shortChanID
-}
-
-// TestDisconnectBlockAtHeight checks that the pruned state of the channel
-// database is what we expect after calling DisconnectBlockAtHeight.
-func TestDisconnectBlockAtHeight(t *testing.T) {
- t.Parallel()
-
- db, cleanUp, err := MakeTestDB()
- defer cleanUp()
- if err != nil {
- t.Fatalf("unable to make test database: %v", err)
- }
-
- graph := db.ChannelGraph()
- sourceNode, err := createTestVertex(db)
- if err != nil {
- t.Fatalf("unable to create source node: %v", err)
- }
- if err := graph.SetSourceNode(sourceNode); err != nil {
- t.Fatalf("unable to set source node: %v", err)
- }
-
- // We'd like to test the insertion/deletion of edges, so we create two
- // vertexes to connect.
- node1, err := createTestVertex(db)
- if err != nil {
- t.Fatalf("unable to create test node: %v", err)
- }
- node2, err := createTestVertex(db)
- if err != nil {
- t.Fatalf("unable to create test node: %v", err)
- }
-
- // In addition to the fake vertexes we create some fake channel
- // identifiers.
- var spendOutputs []*wire.OutPoint
- var blockHash chainhash.Hash
- copy(blockHash[:], bytes.Repeat([]byte{1}, 32))
-
- // Prune the graph a few times to make sure we have entries in the
- // prune log.
- _, err = graph.PruneGraph(spendOutputs, &blockHash, 155)
- if err != nil {
- t.Fatalf("unable to prune graph: %v", err)
- }
- var blockHash2 chainhash.Hash
- copy(blockHash2[:], bytes.Repeat([]byte{2}, 32))
-
- _, err = graph.PruneGraph(spendOutputs, &blockHash2, 156)
- if err != nil {
- t.Fatalf("unable to prune graph: %v", err)
- }
-
- // We'll create 3 almost identical edges, so first create a helper
- // method containing all logic for doing so.
-
- // Create an edge which has its block height at 156.
- height := uint32(156)
- edgeInfo, _ := createEdge(height, 0, 0, 0, node1, node2)
-
- // Create an edge with block height 157. We give it
- // maximum values for tx index and position, to make
- // sure our database range scan get edges from the
- // entire range.
- edgeInfo2, _ := createEdge(
- height+1, math.MaxUint32&0x00ffffff, math.MaxUint16, 1,
- node1, node2,
- )
-
- // Create a third edge, this with a block height of 155.
- edgeInfo3, _ := createEdge(height-1, 0, 0, 2, node1, node2)
-
- // Now add all these new edges to the database.
- if err := graph.AddChannelEdge(&edgeInfo); err != nil {
- t.Fatalf("unable to create channel edge: %v", err)
- }
-
- if err := graph.AddChannelEdge(&edgeInfo2); err != nil {
- t.Fatalf("unable to create channel edge: %v", err)
- }
-
- if err := graph.AddChannelEdge(&edgeInfo3); err != nil {
- t.Fatalf("unable to create channel edge: %v", err)
- }
-
- // Call DisconnectBlockAtHeight, which should prune every channel
- // that has a funding height of 'height' or greater.
- removed, err := graph.DisconnectBlockAtHeight(uint32(height))
- if err != nil {
- t.Fatalf("unable to prune %v", err)
- }
-
- // The two edges should have been removed.
- if len(removed) != 2 {
- t.Fatalf("expected two edges to be removed from graph, "+
- "only %d were", len(removed))
- }
- if removed[0].ChannelID != edgeInfo.ChannelID {
- t.Fatalf("expected edge to be removed from graph")
- }
- if removed[1].ChannelID != edgeInfo2.ChannelID {
- t.Fatalf("expected edge to be removed from graph")
- }
-
- // The two first edges should be removed from the db.
- _, _, has, isZombie, err := graph.HasChannelEdge(edgeInfo.ChannelID)
- if err != nil {
- t.Fatalf("unable to query for edge: %v", err)
- }
- if has {
- t.Fatalf("edge1 was not pruned from the graph")
- }
- if isZombie {
- t.Fatal("reorged edge1 should not be marked as zombie")
- }
- _, _, has, isZombie, err = graph.HasChannelEdge(edgeInfo2.ChannelID)
- if err != nil {
- t.Fatalf("unable to query for edge: %v", err)
- }
- if has {
- t.Fatalf("edge2 was not pruned from the graph")
- }
- if isZombie {
- t.Fatal("reorged edge2 should not be marked as zombie")
- }
-
- // Edge 3 should not be removed.
- _, _, has, isZombie, err = graph.HasChannelEdge(edgeInfo3.ChannelID)
- if err != nil {
- t.Fatalf("unable to query for edge: %v", err)
- }
- if !has {
- t.Fatalf("edge3 was pruned from the graph")
- }
- if isZombie {
- t.Fatal("edge3 was marked as zombie")
- }
-
- // PruneTip should be set to the blockHash we specified for the block
- // at height 155.
- hash, h, err := graph.PruneTip()
- if err != nil {
- t.Fatalf("unable to get prune tip: %v", err)
- }
- if !blockHash.IsEqual(hash) {
- t.Fatalf("expected best block to be %x, was %x", blockHash, hash)
- }
- if h != height-1 {
- t.Fatalf("expected best block height to be %d, was %d", height-1, h)
- }
-}
-
-func assertEdgeInfoEqual(t *testing.T, e1 *ChannelEdgeInfo,
- e2 *ChannelEdgeInfo) {
-
- if e1.ChannelID != e2.ChannelID {
- t.Fatalf("chan id's don't match: %v vs %v", e1.ChannelID,
- e2.ChannelID)
- }
-
- if e1.ChainHash != e2.ChainHash {
- t.Fatalf("chain hashes don't match: %v vs %v", e1.ChainHash,
- e2.ChainHash)
- }
-
- if !bytes.Equal(e1.NodeKey1Bytes[:], e2.NodeKey1Bytes[:]) {
- t.Fatalf("nodekey1 doesn't match")
- }
- if !bytes.Equal(e1.NodeKey2Bytes[:], e2.NodeKey2Bytes[:]) {
- t.Fatalf("nodekey2 doesn't match")
- }
- if !bytes.Equal(e1.BitcoinKey1Bytes[:], e2.BitcoinKey1Bytes[:]) {
- t.Fatalf("bitcoinkey1 doesn't match")
- }
- if !bytes.Equal(e1.BitcoinKey2Bytes[:], e2.BitcoinKey2Bytes[:]) {
- t.Fatalf("bitcoinkey2 doesn't match")
- }
-
- if !bytes.Equal(e1.Features, e2.Features) {
- t.Fatalf("features doesn't match: %x vs %x", e1.Features,
- e2.Features)
- }
-
- if !bytes.Equal(e1.AuthProof.NodeSig1Bytes, e2.AuthProof.NodeSig1Bytes) {
- t.Fatalf("nodesig1 doesn't match: %v vs %v",
- spew.Sdump(e1.AuthProof.NodeSig1Bytes),
- spew.Sdump(e2.AuthProof.NodeSig1Bytes))
- }
- if !bytes.Equal(e1.AuthProof.NodeSig2Bytes, e2.AuthProof.NodeSig2Bytes) {
- t.Fatalf("nodesig2 doesn't match")
- }
- if !bytes.Equal(e1.AuthProof.BitcoinSig1Bytes, e2.AuthProof.BitcoinSig1Bytes) {
- t.Fatalf("bitcoinsig1 doesn't match")
- }
- if !bytes.Equal(e1.AuthProof.BitcoinSig2Bytes, e2.AuthProof.BitcoinSig2Bytes) {
- t.Fatalf("bitcoinsig2 doesn't match")
- }
-
- if e1.ChannelPoint != e2.ChannelPoint {
- t.Fatalf("channel point match: %v vs %v", e1.ChannelPoint,
- e2.ChannelPoint)
- }
-
- if e1.Capacity != e2.Capacity {
- t.Fatalf("capacity doesn't match: %v vs %v", e1.Capacity,
- e2.Capacity)
- }
-
- if !bytes.Equal(e1.ExtraOpaqueData, e2.ExtraOpaqueData) {
- t.Fatalf("extra data doesn't match: %v vs %v",
- e2.ExtraOpaqueData, e2.ExtraOpaqueData)
- }
-}
-
-func createChannelEdge(db *DB, node1, node2 *LightningNode) (*ChannelEdgeInfo,
- *ChannelEdgePolicy, *ChannelEdgePolicy) {
-
- var (
- firstNode *LightningNode
- secondNode *LightningNode
- )
- if bytes.Compare(node1.PubKeyBytes[:], node2.PubKeyBytes[:]) == -1 {
- firstNode = node1
- secondNode = node2
- } else {
- firstNode = node2
- secondNode = node1
- }
-
- // In addition to the fake vertexes we create some fake channel
- // identifiers.
- chanID := uint64(prand.Int63())
- outpoint := wire.OutPoint{
- Hash: rev,
- Index: 9,
- }
-
- // Add the new edge to the database, this should proceed without any
- // errors.
- edgeInfo := &ChannelEdgeInfo{
- ChannelID: chanID,
- ChainHash: key,
- AuthProof: &ChannelAuthProof{
- NodeSig1Bytes: testSig.Serialize(),
- NodeSig2Bytes: testSig.Serialize(),
- BitcoinSig1Bytes: testSig.Serialize(),
- BitcoinSig2Bytes: testSig.Serialize(),
- },
- ChannelPoint: outpoint,
- Capacity: 1000,
- ExtraOpaqueData: []byte("new unknown feature"),
- }
- copy(edgeInfo.NodeKey1Bytes[:], firstNode.PubKeyBytes[:])
- copy(edgeInfo.NodeKey2Bytes[:], secondNode.PubKeyBytes[:])
- copy(edgeInfo.BitcoinKey1Bytes[:], firstNode.PubKeyBytes[:])
- copy(edgeInfo.BitcoinKey2Bytes[:], secondNode.PubKeyBytes[:])
-
- edge1 := &ChannelEdgePolicy{
- SigBytes: testSig.Serialize(),
- ChannelID: chanID,
- LastUpdate: time.Unix(433453, 0),
- MessageFlags: 1,
- ChannelFlags: 0,
- TimeLockDelta: 99,
- MinHTLC: 2342135,
- MaxHTLC: 13928598,
- FeeBaseMSat: 4352345,
- FeeProportionalMillionths: 3452352,
- Node: secondNode,
- ExtraOpaqueData: []byte("new unknown feature2"),
- db: db,
- }
- edge2 := &ChannelEdgePolicy{
- SigBytes: testSig.Serialize(),
- ChannelID: chanID,
- LastUpdate: time.Unix(124234, 0),
- MessageFlags: 1,
- ChannelFlags: 1,
- TimeLockDelta: 99,
- MinHTLC: 2342135,
- MaxHTLC: 13928598,
- FeeBaseMSat: 4352345,
- FeeProportionalMillionths: 90392423,
- Node: firstNode,
- ExtraOpaqueData: []byte("new unknown feature1"),
- db: db,
- }
-
- return edgeInfo, edge1, edge2
-}
-
-func TestEdgeInfoUpdates(t *testing.T) {
- t.Parallel()
-
- db, cleanUp, err := MakeTestDB()
- defer cleanUp()
- if err != nil {
- t.Fatalf("unable to make test database: %v", err)
- }
-
- graph := db.ChannelGraph()
-
- // We'd like to test the update of edges inserted into the database, so
- // we create two vertexes to connect.
- node1, err := createTestVertex(db)
- if err != nil {
- t.Fatalf("unable to create test node: %v", err)
- }
- if err := graph.AddLightningNode(node1); err != nil {
- t.Fatalf("unable to add node: %v", err)
- }
- node2, err := createTestVertex(db)
- if err != nil {
- t.Fatalf("unable to create test node: %v", err)
- }
- if err := graph.AddLightningNode(node2); err != nil {
- t.Fatalf("unable to add node: %v", err)
- }
-
- // Create an edge and add it to the db.
- edgeInfo, edge1, edge2 := createChannelEdge(db, node1, node2)
-
- // Make sure inserting the policy at this point, before the edge info
- // is added, will fail.
- if err := graph.UpdateEdgePolicy(edge1); !ErrEdgeNotFound.Is(err) {
- t.Fatalf("expected ErrEdgeNotFound, got: %v", err)
- }
-
- // Add the edge info.
- if err := graph.AddChannelEdge(edgeInfo); err != nil {
- t.Fatalf("unable to create channel edge: %v", err)
- }
-
- chanID := edgeInfo.ChannelID
- outpoint := edgeInfo.ChannelPoint
-
- // Next, insert both edge policies into the database, they should both
- // be inserted without any issues.
- if err := graph.UpdateEdgePolicy(edge1); err != nil {
- t.Fatalf("unable to update edge: %v", err)
- }
- if err := graph.UpdateEdgePolicy(edge2); err != nil {
- t.Fatalf("unable to update edge: %v", err)
- }
-
- // Check for existence of the edge within the database, it should be
- // found.
- _, _, found, isZombie, err := graph.HasChannelEdge(chanID)
- if err != nil {
- t.Fatalf("unable to query for edge: %v", err)
- }
- if !found {
- t.Fatalf("graph should have of inserted edge")
- }
- if isZombie {
- t.Fatal("live edge should not be marked as zombie")
- }
-
- // We should also be able to retrieve the channelID only knowing the
- // channel point of the channel.
- dbChanID, err := graph.ChannelID(&outpoint)
- if err != nil {
- t.Fatalf("unable to retrieve channel ID: %v", err)
- }
- if dbChanID != chanID {
- t.Fatalf("chan ID's mismatch, expected %v got %v", dbChanID,
- chanID)
- }
-
- // With the edges inserted, perform some queries to ensure that they've
- // been inserted properly.
- dbEdgeInfo, dbEdge1, dbEdge2, err := graph.FetchChannelEdgesByID(chanID)
- if err != nil {
- t.Fatalf("unable to fetch channel by ID: %v", err)
- }
- if err := compareEdgePolicies(dbEdge1, edge1); err != nil {
- t.Fatalf("edge doesn't match: %v", err)
- }
- if err := compareEdgePolicies(dbEdge2, edge2); err != nil {
- t.Fatalf("edge doesn't match: %v", err)
- }
- assertEdgeInfoEqual(t, dbEdgeInfo, edgeInfo)
-
- // Next, attempt to query the channel edges according to the outpoint
- // of the channel.
- dbEdgeInfo, dbEdge1, dbEdge2, err = graph.FetchChannelEdgesByOutpoint(&outpoint)
- if err != nil {
- t.Fatalf("unable to fetch channel by ID: %v", err)
- }
- if err := compareEdgePolicies(dbEdge1, edge1); err != nil {
- t.Fatalf("edge doesn't match: %v", err)
- }
- if err := compareEdgePolicies(dbEdge2, edge2); err != nil {
- t.Fatalf("edge doesn't match: %v", err)
- }
- assertEdgeInfoEqual(t, dbEdgeInfo, edgeInfo)
-}
-
-func randEdgePolicy(chanID uint64, op wire.OutPoint, db *DB) *ChannelEdgePolicy {
- update := prand.Int63()
-
- return newEdgePolicy(chanID, op, db, update)
-}
-
-func newEdgePolicy(chanID uint64, op wire.OutPoint, db *DB,
- updateTime int64) *ChannelEdgePolicy {
-
- return &ChannelEdgePolicy{
- ChannelID: chanID,
- LastUpdate: time.Unix(updateTime, 0),
- MessageFlags: 1,
- ChannelFlags: 0,
- TimeLockDelta: uint16(prand.Int63()),
- MinHTLC: lnwire.MilliSatoshi(prand.Int63()),
- MaxHTLC: lnwire.MilliSatoshi(prand.Int63()),
- FeeBaseMSat: lnwire.MilliSatoshi(prand.Int63()),
- FeeProportionalMillionths: lnwire.MilliSatoshi(prand.Int63()),
- db: db,
- }
-}
-
-func TestGraphTraversal(t *testing.T) {
- t.Parallel()
-
- db, cleanUp, err := MakeTestDB()
- defer cleanUp()
- if err != nil {
- t.Fatalf("unable to make test database: %v", err)
- }
-
- graph := db.ChannelGraph()
-
- // We'd like to test some of the graph traversal capabilities within
- // the DB, so we'll create a series of fake nodes to insert into the
- // graph.
- const numNodes = 20
- nodes := make([]*LightningNode, numNodes)
- nodeIndex := map[string]struct{}{}
- for i := 0; i < numNodes; i++ {
- node, err := createTestVertex(db)
- if err != nil {
- t.Fatalf("unable to create node: %v", err)
- }
-
- nodes[i] = node
- nodeIndex[node.Alias] = struct{}{}
- }
-
- // Add each of the nodes into the graph, they should be inserted
- // without error.
- for _, node := range nodes {
- if err := graph.AddLightningNode(node); err != nil {
- t.Fatalf("unable to add node: %v", err)
- }
- }
-
- // Iterate over each node as returned by the graph, if all nodes are
- // reached, then the map created above should be empty.
- err = graph.ForEachNode(func(_ kvdb.RTx, node *LightningNode) er.R {
- delete(nodeIndex, node.Alias)
- return nil
- })
- if err != nil {
- t.Fatalf("for each failure: %v", err)
- }
- if len(nodeIndex) != 0 {
- t.Fatalf("all nodes not reached within ForEach")
- }
-
- // Determine which node is "smaller", we'll need this in order to
- // properly create the edges for the graph.
- var firstNode, secondNode *LightningNode
- if bytes.Compare(nodes[0].PubKeyBytes[:], nodes[1].PubKeyBytes[:]) == -1 {
- firstNode = nodes[0]
- secondNode = nodes[1]
- } else {
- firstNode = nodes[0]
- secondNode = nodes[1]
- }
-
- // Create 5 channels between the first two nodes we generated above.
- const numChannels = 5
- chanIndex := map[uint64]struct{}{}
- for i := 0; i < numChannels; i++ {
- txHash := sha256.Sum256([]byte{byte(i)})
- chanID := uint64(i + 1)
- op := wire.OutPoint{
- Hash: txHash,
- Index: 0,
- }
-
- edgeInfo := ChannelEdgeInfo{
- ChannelID: chanID,
- ChainHash: key,
- AuthProof: &ChannelAuthProof{
- NodeSig1Bytes: testSig.Serialize(),
- NodeSig2Bytes: testSig.Serialize(),
- BitcoinSig1Bytes: testSig.Serialize(),
- BitcoinSig2Bytes: testSig.Serialize(),
- },
- ChannelPoint: op,
- Capacity: 1000,
- }
- copy(edgeInfo.NodeKey1Bytes[:], nodes[0].PubKeyBytes[:])
- copy(edgeInfo.NodeKey2Bytes[:], nodes[1].PubKeyBytes[:])
- copy(edgeInfo.BitcoinKey1Bytes[:], nodes[0].PubKeyBytes[:])
- copy(edgeInfo.BitcoinKey2Bytes[:], nodes[1].PubKeyBytes[:])
- err := graph.AddChannelEdge(&edgeInfo)
- if err != nil {
- t.Fatalf("unable to add node: %v", err)
- }
-
- // Create and add an edge with random data that points from
- // node1 -> node2.
- edge := randEdgePolicy(chanID, op, db)
- edge.ChannelFlags = 0
- edge.Node = secondNode
- edge.SigBytes = testSig.Serialize()
- if err := graph.UpdateEdgePolicy(edge); err != nil {
- t.Fatalf("unable to update edge: %v", err)
- }
-
- // Create another random edge that points from node2 -> node1
- // this time.
- edge = randEdgePolicy(chanID, op, db)
- edge.ChannelFlags = 1
- edge.Node = firstNode
- edge.SigBytes = testSig.Serialize()
- if err := graph.UpdateEdgePolicy(edge); err != nil {
- t.Fatalf("unable to update edge: %v", err)
- }
-
- chanIndex[chanID] = struct{}{}
- }
-
- // Iterate through all the known channels within the graph DB, once
- // again if the map is empty that indicates that all edges have
- // properly been reached.
- err = graph.ForEachChannel(func(ei *ChannelEdgeInfo, _ *ChannelEdgePolicy,
- _ *ChannelEdgePolicy) er.R {
-
- delete(chanIndex, ei.ChannelID)
- return nil
- })
- if err != nil {
- t.Fatalf("for each failure: %v", err)
- }
- if len(chanIndex) != 0 {
- t.Fatalf("all edges not reached within ForEach")
- }
-
- // Finally, we want to test the ability to iterate over all the
- // outgoing channels for a particular node.
- numNodeChans := 0
- err = firstNode.ForEachChannel(nil, func(_ kvdb.RTx, _ *ChannelEdgeInfo,
- outEdge, inEdge *ChannelEdgePolicy) er.R {
-
- // All channels between first and second node should have fully
- // (both sides) specified policies.
- if inEdge == nil || outEdge == nil {
- return er.Errorf("channel policy not present")
- }
-
- // Each should indicate that it's outgoing (pointed
- // towards the second node).
- if !bytes.Equal(outEdge.Node.PubKeyBytes[:], secondNode.PubKeyBytes[:]) {
- return er.Errorf("wrong outgoing edge")
- }
-
- // The incoming edge should also indicate that it's pointing to
- // the origin node.
- if !bytes.Equal(inEdge.Node.PubKeyBytes[:], firstNode.PubKeyBytes[:]) {
- return er.Errorf("wrong outgoing edge")
- }
-
- numNodeChans++
- return nil
- })
- if err != nil {
- t.Fatalf("for each failure: %v", err)
- }
- if numNodeChans != numChannels {
- t.Fatalf("all edges for node not reached within ForEach: "+
- "expected %v, got %v", numChannels, numNodeChans)
- }
-}
-
-func assertPruneTip(t *testing.T, graph *ChannelGraph, blockHash *chainhash.Hash,
- blockHeight uint32) {
-
- pruneHash, pruneHeight, err := graph.PruneTip()
- if err != nil {
- _, _, line, _ := runtime.Caller(1)
- t.Fatalf("line %v: unable to fetch prune tip: %v", line, err)
- }
- if !bytes.Equal(blockHash[:], pruneHash[:]) {
- _, _, line, _ := runtime.Caller(1)
- t.Fatalf("line: %v, prune tips don't match, expected %x got %x",
- line, blockHash, pruneHash)
- }
- if pruneHeight != blockHeight {
- _, _, line, _ := runtime.Caller(1)
- t.Fatalf("line %v: prune heights don't match, expected %v "+
- "got %v", line, blockHeight, pruneHeight)
- }
-}
-
-func assertNumChans(t *testing.T, graph *ChannelGraph, n int) {
- numChans := 0
- if err := graph.ForEachChannel(func(*ChannelEdgeInfo, *ChannelEdgePolicy,
- *ChannelEdgePolicy) er.R {
-
- numChans++
- return nil
- }); err != nil {
- _, _, line, _ := runtime.Caller(1)
- t.Fatalf("line %v: unable to scan channels: %v", line, err)
- }
- if numChans != n {
- _, _, line, _ := runtime.Caller(1)
- t.Fatalf("line %v: expected %v chans instead have %v", line,
- n, numChans)
- }
-}
-
-func assertNumNodes(t *testing.T, graph *ChannelGraph, n int) {
- numNodes := 0
- err := graph.ForEachNode(func(_ kvdb.RTx, _ *LightningNode) er.R {
- numNodes++
- return nil
- })
- if err != nil {
- _, _, line, _ := runtime.Caller(1)
- t.Fatalf("line %v: unable to scan nodes: %v", line, err)
- }
-
- if numNodes != n {
- _, _, line, _ := runtime.Caller(1)
- t.Fatalf("line %v: expected %v nodes, got %v", line, n, numNodes)
- }
-}
-
-func assertChanViewEqual(t *testing.T, a []EdgePoint, b []EdgePoint) {
- if len(a) != len(b) {
- _, _, line, _ := runtime.Caller(1)
- t.Fatalf("line %v: chan views don't match", line)
- }
-
- chanViewSet := make(map[wire.OutPoint]struct{})
- for _, op := range a {
- chanViewSet[op.OutPoint] = struct{}{}
- }
-
- for _, op := range b {
- if _, ok := chanViewSet[op.OutPoint]; !ok {
- _, _, line, _ := runtime.Caller(1)
- t.Fatalf("line %v: chanPoint(%v) not found in first "+
- "view", line, op)
- }
- }
-}
-
-func assertChanViewEqualChanPoints(t *testing.T, a []EdgePoint, b []*wire.OutPoint) {
- if len(a) != len(b) {
- _, _, line, _ := runtime.Caller(1)
- t.Fatalf("line %v: chan views don't match", line)
- }
-
- chanViewSet := make(map[wire.OutPoint]struct{})
- for _, op := range a {
- chanViewSet[op.OutPoint] = struct{}{}
- }
-
- for _, op := range b {
- if _, ok := chanViewSet[*op]; !ok {
- _, _, line, _ := runtime.Caller(1)
- t.Fatalf("line %v: chanPoint(%v) not found in first "+
- "view", line, op)
- }
- }
-}
-
-func TestGraphPruning(t *testing.T) {
- t.Parallel()
-
- db, cleanUp, err := MakeTestDB()
- defer cleanUp()
- if err != nil {
- t.Fatalf("unable to make test database: %v", err)
- }
-
- graph := db.ChannelGraph()
- sourceNode, err := createTestVertex(db)
- if err != nil {
- t.Fatalf("unable to create source node: %v", err)
- }
- if err := graph.SetSourceNode(sourceNode); err != nil {
- t.Fatalf("unable to set source node: %v", err)
- }
-
- // As initial set up for the test, we'll create a graph with 5 vertexes
- // and enough edges to create a fully connected graph. The graph will
- // be rather simple, representing a straight line.
- const numNodes = 5
- graphNodes := make([]*LightningNode, numNodes)
- for i := 0; i < numNodes; i++ {
- node, err := createTestVertex(db)
- if err != nil {
- t.Fatalf("unable to create node: %v", err)
- }
-
- if err := graph.AddLightningNode(node); err != nil {
- t.Fatalf("unable to add node: %v", err)
- }
-
- graphNodes[i] = node
- }
-
- // With the vertexes created, we'll next create a series of channels
- // between them.
- channelPoints := make([]*wire.OutPoint, 0, numNodes-1)
- edgePoints := make([]EdgePoint, 0, numNodes-1)
- for i := 0; i < numNodes-1; i++ {
- txHash := sha256.Sum256([]byte{byte(i)})
- chanID := uint64(i + 1)
- op := wire.OutPoint{
- Hash: txHash,
- Index: 0,
- }
-
- channelPoints = append(channelPoints, &op)
-
- edgeInfo := ChannelEdgeInfo{
- ChannelID: chanID,
- ChainHash: key,
- AuthProof: &ChannelAuthProof{
- NodeSig1Bytes: testSig.Serialize(),
- NodeSig2Bytes: testSig.Serialize(),
- BitcoinSig1Bytes: testSig.Serialize(),
- BitcoinSig2Bytes: testSig.Serialize(),
- },
- ChannelPoint: op,
- Capacity: 1000,
- }
- copy(edgeInfo.NodeKey1Bytes[:], graphNodes[i].PubKeyBytes[:])
- copy(edgeInfo.NodeKey2Bytes[:], graphNodes[i+1].PubKeyBytes[:])
- copy(edgeInfo.BitcoinKey1Bytes[:], graphNodes[i].PubKeyBytes[:])
- copy(edgeInfo.BitcoinKey2Bytes[:], graphNodes[i+1].PubKeyBytes[:])
- if err := graph.AddChannelEdge(&edgeInfo); err != nil {
- t.Fatalf("unable to add node: %v", err)
- }
-
- pkScript, err := genMultiSigP2WSH(
- edgeInfo.BitcoinKey1Bytes[:], edgeInfo.BitcoinKey2Bytes[:],
- )
- if err != nil {
- t.Fatalf("unable to gen multi-sig p2wsh: %v", err)
- }
- edgePoints = append(edgePoints, EdgePoint{
- FundingPkScript: pkScript,
- OutPoint: op,
- })
-
- // Create and add an edge with random data that points from
- // node_i -> node_i+1
- edge := randEdgePolicy(chanID, op, db)
- edge.ChannelFlags = 0
- edge.Node = graphNodes[i]
- edge.SigBytes = testSig.Serialize()
- if err := graph.UpdateEdgePolicy(edge); err != nil {
- t.Fatalf("unable to update edge: %v", err)
- }
-
- // Create another random edge that points from node_i+1 ->
- // node_i this time.
- edge = randEdgePolicy(chanID, op, db)
- edge.ChannelFlags = 1
- edge.Node = graphNodes[i]
- edge.SigBytes = testSig.Serialize()
- if err := graph.UpdateEdgePolicy(edge); err != nil {
- t.Fatalf("unable to update edge: %v", err)
- }
- }
-
- // With all the channel points added, we'll consult the graph to ensure
- // it has the same channel view as the one we just constructed.
- channelView, err := graph.ChannelView()
- if err != nil {
- t.Fatalf("unable to get graph channel view: %v", err)
- }
- assertChanViewEqual(t, channelView, edgePoints)
-
- // Now with our test graph created, we can test the pruning
- // capabilities of the channel graph.
-
- // First we create a mock block that ends up closing the first two
- // channels.
- var blockHash chainhash.Hash
- copy(blockHash[:], bytes.Repeat([]byte{1}, 32))
- blockHeight := uint32(1)
- block := channelPoints[:2]
- prunedChans, err := graph.PruneGraph(block, &blockHash, blockHeight)
- if err != nil {
- t.Fatalf("unable to prune graph: %v", err)
- }
- if len(prunedChans) != 2 {
- t.Fatalf("incorrect number of channels pruned: "+
- "expected %v, got %v", 2, prunedChans)
- }
-
- // Now ensure that the prune tip has been updated.
- assertPruneTip(t, graph, &blockHash, blockHeight)
-
- // Count up the number of channels known within the graph, only 2
- // should be remaining.
- assertNumChans(t, graph, 2)
-
- // Those channels should also be missing from the channel view.
- channelView, err = graph.ChannelView()
- if err != nil {
- t.Fatalf("unable to get graph channel view: %v", err)
- }
- assertChanViewEqualChanPoints(t, channelView, channelPoints[2:])
-
- // Next we'll create a block that doesn't close any channels within the
- // graph to test the negative error case.
- fakeHash := sha256.Sum256([]byte("test prune"))
- nonChannel := &wire.OutPoint{
- Hash: fakeHash,
- Index: 9,
- }
- blockHash = sha256.Sum256(blockHash[:])
- blockHeight = 2
- prunedChans, err = graph.PruneGraph(
- []*wire.OutPoint{nonChannel}, &blockHash, blockHeight,
- )
- if err != nil {
- t.Fatalf("unable to prune graph: %v", err)
- }
-
- // No channels should have been detected as pruned.
- if len(prunedChans) != 0 {
- t.Fatalf("channels were pruned but shouldn't have been")
- }
-
- // Once again, the prune tip should have been updated. We should still
- // see both channels and their participants, along with the source node.
- assertPruneTip(t, graph, &blockHash, blockHeight)
- assertNumChans(t, graph, 2)
- assertNumNodes(t, graph, 4)
-
- // Finally, create a block that prunes the remainder of the channels
- // from the graph.
- blockHash = sha256.Sum256(blockHash[:])
- blockHeight = 3
- prunedChans, err = graph.PruneGraph(
- channelPoints[2:], &blockHash, blockHeight,
- )
- if err != nil {
- t.Fatalf("unable to prune graph: %v", err)
- }
-
- // The remainder of the channels should have been pruned from the
- // graph.
- if len(prunedChans) != 2 {
- t.Fatalf("incorrect number of channels pruned: "+
- "expected %v, got %v", 2, len(prunedChans))
- }
-
- // The prune tip should be updated, no channels should be found, and
- // only the source node should remain within the current graph.
- assertPruneTip(t, graph, &blockHash, blockHeight)
- assertNumChans(t, graph, 0)
- assertNumNodes(t, graph, 1)
-
- // Finally, the channel view at this point in the graph should now be
- // completely empty. Those channels should also be missing from the
- // channel view.
- channelView, err = graph.ChannelView()
- if err != nil {
- t.Fatalf("unable to get graph channel view: %v", err)
- }
- if len(channelView) != 0 {
- t.Fatalf("channel view should be empty, instead have: %v",
- channelView)
- }
-}
-
-// TestHighestChanID tests that we're able to properly retrieve the highest
-// known channel ID in the database.
-func TestHighestChanID(t *testing.T) {
- t.Parallel()
-
- db, cleanUp, err := MakeTestDB()
- defer cleanUp()
- if err != nil {
- t.Fatalf("unable to make test database: %v", err)
- }
-
- graph := db.ChannelGraph()
-
- // If we don't yet have any channels in the database, then we should
- // get a channel ID of zero if we ask for the highest channel ID.
- bestID, err := graph.HighestChanID()
- if err != nil {
- t.Fatalf("unable to get highest ID: %v", err)
- }
- if bestID != 0 {
- t.Fatalf("best ID w/ no chan should be zero, is instead: %v",
- bestID)
- }
-
- // Next, we'll insert two channels into the database, with each channel
- // connecting the same two nodes.
- node1, err := createTestVertex(db)
- if err != nil {
- t.Fatalf("unable to create test node: %v", err)
- }
- node2, err := createTestVertex(db)
- if err != nil {
- t.Fatalf("unable to create test node: %v", err)
- }
-
- // The first channel with be at height 10, while the other will be at
- // height 100.
- edge1, _ := createEdge(10, 0, 0, 0, node1, node2)
- edge2, chanID2 := createEdge(100, 0, 0, 0, node1, node2)
-
- if err := graph.AddChannelEdge(&edge1); err != nil {
- t.Fatalf("unable to create channel edge: %v", err)
- }
- if err := graph.AddChannelEdge(&edge2); err != nil {
- t.Fatalf("unable to create channel edge: %v", err)
- }
-
- // Now that the edges has been inserted, we'll query for the highest
- // known channel ID in the database.
- bestID, err = graph.HighestChanID()
- if err != nil {
- t.Fatalf("unable to get highest ID: %v", err)
- }
-
- if bestID != chanID2.ToUint64() {
- t.Fatalf("expected %v got %v for best chan ID: ",
- chanID2.ToUint64(), bestID)
- }
-
- // If we add another edge, then the current best chan ID should be
- // updated as well.
- edge3, chanID3 := createEdge(1000, 0, 0, 0, node1, node2)
- if err := graph.AddChannelEdge(&edge3); err != nil {
- t.Fatalf("unable to create channel edge: %v", err)
- }
- bestID, err = graph.HighestChanID()
- if err != nil {
- t.Fatalf("unable to get highest ID: %v", err)
- }
-
- if bestID != chanID3.ToUint64() {
- t.Fatalf("expected %v got %v for best chan ID: ",
- chanID3.ToUint64(), bestID)
- }
-}
-
-// TestChanUpdatesInHorizon tests the we're able to properly retrieve all known
-// channel updates within a specific time horizon. It also tests that upon
-// insertion of a new edge, the edge update index is updated properly.
-func TestChanUpdatesInHorizon(t *testing.T) {
- t.Parallel()
-
- db, cleanUp, err := MakeTestDB()
- defer cleanUp()
- if err != nil {
- t.Fatalf("unable to make test database: %v", err)
- }
-
- graph := db.ChannelGraph()
-
- // If we issue an arbitrary query before any channel updates are
- // inserted in the database, we should get zero results.
- chanUpdates, err := graph.ChanUpdatesInHorizon(
- time.Unix(999, 0), time.Unix(9999, 0),
- )
- if err != nil {
- t.Fatalf("unable to updates for updates: %v", err)
- }
- if len(chanUpdates) != 0 {
- t.Fatalf("expected 0 chan updates, instead got %v",
- len(chanUpdates))
- }
-
- // We'll start by creating two nodes which will seed our test graph.
- node1, err := createTestVertex(db)
- if err != nil {
- t.Fatalf("unable to create test node: %v", err)
- }
- if err := graph.AddLightningNode(node1); err != nil {
- t.Fatalf("unable to add node: %v", err)
- }
- node2, err := createTestVertex(db)
- if err != nil {
- t.Fatalf("unable to create test node: %v", err)
- }
- if err := graph.AddLightningNode(node2); err != nil {
- t.Fatalf("unable to add node: %v", err)
- }
-
- // We'll now create 10 channels between the two nodes, with update
- // times 10 seconds after each other.
- const numChans = 10
- startTime := time.Unix(1234, 0)
- endTime := startTime
- edges := make([]ChannelEdge, 0, numChans)
- for i := 0; i < numChans; i++ {
- txHash := sha256.Sum256([]byte{byte(i)})
- op := wire.OutPoint{
- Hash: txHash,
- Index: 0,
- }
-
- channel, chanID := createEdge(
- uint32(i*10), 0, 0, 0, node1, node2,
- )
-
- if err := graph.AddChannelEdge(&channel); err != nil {
- t.Fatalf("unable to create channel edge: %v", err)
- }
-
- edge1UpdateTime := endTime
- edge2UpdateTime := edge1UpdateTime.Add(time.Second)
- endTime = endTime.Add(time.Second * 10)
-
- edge1 := newEdgePolicy(
- chanID.ToUint64(), op, db, edge1UpdateTime.Unix(),
- )
- edge1.ChannelFlags = 0
- edge1.Node = node2
- edge1.SigBytes = testSig.Serialize()
- if err := graph.UpdateEdgePolicy(edge1); err != nil {
- t.Fatalf("unable to update edge: %v", err)
- }
-
- edge2 := newEdgePolicy(
- chanID.ToUint64(), op, db, edge2UpdateTime.Unix(),
- )
- edge2.ChannelFlags = 1
- edge2.Node = node1
- edge2.SigBytes = testSig.Serialize()
- if err := graph.UpdateEdgePolicy(edge2); err != nil {
- t.Fatalf("unable to update edge: %v", err)
- }
-
- edges = append(edges, ChannelEdge{
- Info: &channel,
- Policy1: edge1,
- Policy2: edge2,
- })
- }
-
- // With our channels loaded, we'll now start our series of queries.
- queryCases := []struct {
- start time.Time
- end time.Time
-
- resp []ChannelEdge
- }{
- // If we query for a time range that's strictly below our set
- // of updates, then we'll get an empty result back.
- {
- start: time.Unix(100, 0),
- end: time.Unix(200, 0),
- },
-
- // If we query for a time range that's well beyond our set of
- // updates, we should get an empty set of results back.
- {
- start: time.Unix(99999, 0),
- end: time.Unix(999999, 0),
- },
-
- // If we query for the start time, and 10 seconds directly
- // after it, we should only get a single update, that first
- // one.
- {
- start: time.Unix(1234, 0),
- end: startTime.Add(time.Second * 10),
-
- resp: []ChannelEdge{edges[0]},
- },
-
- // If we add 10 seconds past the first update, and then
- // subtract 10 from the last update, then we should only get
- // the 8 edges in the middle.
- {
- start: startTime.Add(time.Second * 10),
- end: endTime.Add(-time.Second * 10),
-
- resp: edges[1:9],
- },
-
- // If we use the start and end time as is, we should get the
- // entire range.
- {
- start: startTime,
- end: endTime,
-
- resp: edges,
- },
- }
- for _, queryCase := range queryCases {
- resp, err := graph.ChanUpdatesInHorizon(
- queryCase.start, queryCase.end,
- )
- if err != nil {
- t.Fatalf("unable to query for updates: %v", err)
- }
-
- if len(resp) != len(queryCase.resp) {
- t.Fatalf("expected %v chans, got %v chans",
- len(queryCase.resp), len(resp))
-
- }
-
- for i := 0; i < len(resp); i++ {
- chanExp := queryCase.resp[i]
- chanRet := resp[i]
-
- assertEdgeInfoEqual(t, chanExp.Info, chanRet.Info)
-
- err := compareEdgePolicies(chanExp.Policy1, chanRet.Policy1)
- if err != nil {
- t.Fatal(err)
- }
- compareEdgePolicies(chanExp.Policy2, chanRet.Policy2)
- if err != nil {
- t.Fatal(err)
- }
- }
- }
-}
-
-// TestNodeUpdatesInHorizon tests that we're able to properly scan and retrieve
-// the most recent node updates within a particular time horizon.
-func TestNodeUpdatesInHorizon(t *testing.T) {
- t.Parallel()
-
- db, cleanUp, err := MakeTestDB()
- defer cleanUp()
- if err != nil {
- t.Fatalf("unable to make test database: %v", err)
- }
-
- graph := db.ChannelGraph()
-
- startTime := time.Unix(1234, 0)
- endTime := startTime
-
- // If we issue an arbitrary query before we insert any nodes into the
- // database, then we shouldn't get any results back.
- nodeUpdates, err := graph.NodeUpdatesInHorizon(
- time.Unix(999, 0), time.Unix(9999, 0),
- )
- if err != nil {
- t.Fatalf("unable to query for node updates: %v", err)
- }
- if len(nodeUpdates) != 0 {
- t.Fatalf("expected 0 node updates, instead got %v",
- len(nodeUpdates))
- }
-
- // We'll create 10 node announcements, each with an update timestamp 10
- // seconds after the other.
- const numNodes = 10
- nodeAnns := make([]LightningNode, 0, numNodes)
- for i := 0; i < numNodes; i++ {
- nodeAnn, err := createTestVertex(db)
- if err != nil {
- t.Fatalf("unable to create test vertex: %v", err)
- }
-
- // The node ann will use the current end time as its last
- // update them, then we'll add 10 seconds in order to create
- // the proper update time for the next node announcement.
- updateTime := endTime
- endTime = updateTime.Add(time.Second * 10)
-
- nodeAnn.LastUpdate = updateTime
-
- nodeAnns = append(nodeAnns, *nodeAnn)
-
- if err := graph.AddLightningNode(nodeAnn); err != nil {
- t.Fatalf("unable to add lightning node: %v", err)
- }
- }
-
- queryCases := []struct {
- start time.Time
- end time.Time
-
- resp []LightningNode
- }{
- // If we query for a time range that's strictly below our set
- // of updates, then we'll get an empty result back.
- {
- start: time.Unix(100, 0),
- end: time.Unix(200, 0),
- },
-
- // If we query for a time range that's well beyond our set of
- // updates, we should get an empty set of results back.
- {
- start: time.Unix(99999, 0),
- end: time.Unix(999999, 0),
- },
-
- // If we skip he first time epoch with out start time, then we
- // should get back every now but the first.
- {
- start: startTime.Add(time.Second * 10),
- end: endTime,
-
- resp: nodeAnns[1:],
- },
-
- // If we query for the range as is, we should get all 10
- // announcements back.
- {
- start: startTime,
- end: endTime,
-
- resp: nodeAnns,
- },
-
- // If we reduce the ending time by 10 seconds, then we should
- // get all but the last node we inserted.
- {
- start: startTime,
- end: endTime.Add(-time.Second * 10),
-
- resp: nodeAnns[:9],
- },
- }
- for _, queryCase := range queryCases {
- resp, err := graph.NodeUpdatesInHorizon(queryCase.start, queryCase.end)
- if err != nil {
- t.Fatalf("unable to query for nodes: %v", err)
- }
-
- if len(resp) != len(queryCase.resp) {
- t.Fatalf("expected %v nodes, got %v nodes",
- len(queryCase.resp), len(resp))
-
- }
-
- for i := 0; i < len(resp); i++ {
- err := compareNodes(&queryCase.resp[i], &resp[i])
- if err != nil {
- t.Fatal(err)
- }
- }
- }
-}
-
-// TestFilterKnownChanIDs tests that we're able to properly perform the set
-// differences of an incoming set of channel ID's, and those that we already
-// know of on disk.
-func TestFilterKnownChanIDs(t *testing.T) {
- t.Parallel()
-
- db, cleanUp, err := MakeTestDB()
- defer cleanUp()
- if err != nil {
- t.Fatalf("unable to make test database: %v", err)
- }
-
- graph := db.ChannelGraph()
-
- // If we try to filter out a set of channel ID's before we even know of
- // any channels, then we should get the entire set back.
- preChanIDs := []uint64{1, 2, 3, 4}
- filteredIDs, err := graph.FilterKnownChanIDs(preChanIDs)
- if err != nil {
- t.Fatalf("unable to filter chan IDs: %v", err)
- }
- if !reflect.DeepEqual(preChanIDs, filteredIDs) {
- t.Fatalf("chan IDs shouldn't have been filtered!")
- }
-
- // We'll start by creating two nodes which will seed our test graph.
- node1, err := createTestVertex(db)
- if err != nil {
- t.Fatalf("unable to create test node: %v", err)
- }
- if err := graph.AddLightningNode(node1); err != nil {
- t.Fatalf("unable to add node: %v", err)
- }
- node2, err := createTestVertex(db)
- if err != nil {
- t.Fatalf("unable to create test node: %v", err)
- }
- if err := graph.AddLightningNode(node2); err != nil {
- t.Fatalf("unable to add node: %v", err)
- }
-
- // Next, we'll add 5 channel ID's to the graph, each of them having a
- // block height 10 blocks after the previous.
- const numChans = 5
- chanIDs := make([]uint64, 0, numChans)
- for i := 0; i < numChans; i++ {
- channel, chanID := createEdge(
- uint32(i*10), 0, 0, 0, node1, node2,
- )
-
- if err := graph.AddChannelEdge(&channel); err != nil {
- t.Fatalf("unable to create channel edge: %v", err)
- }
-
- chanIDs = append(chanIDs, chanID.ToUint64())
- }
-
- const numZombies = 5
- zombieIDs := make([]uint64, 0, numZombies)
- for i := 0; i < numZombies; i++ {
- channel, chanID := createEdge(
- uint32(i*10+1), 0, 0, 0, node1, node2,
- )
- if err := graph.AddChannelEdge(&channel); err != nil {
- t.Fatalf("unable to create channel edge: %v", err)
- }
- err := graph.DeleteChannelEdges(channel.ChannelID)
- if err != nil {
- t.Fatalf("unable to mark edge zombie: %v", err)
- }
-
- zombieIDs = append(zombieIDs, chanID.ToUint64())
- }
-
- queryCases := []struct {
- queryIDs []uint64
-
- resp []uint64
- }{
- // If we attempt to filter out all chanIDs we know of, the
- // response should be the empty set.
- {
- queryIDs: chanIDs,
- },
- // If we attempt to filter out all zombies that we know of, the
- // response should be the empty set.
- {
- queryIDs: zombieIDs,
- },
-
- // If we query for a set of ID's that we didn't insert, we
- // should get the same set back.
- {
- queryIDs: []uint64{99, 100},
- resp: []uint64{99, 100},
- },
-
- // If we query for a super-set of our the chan ID's inserted,
- // we should only get those new chanIDs back.
- {
- queryIDs: append(chanIDs, []uint64{99, 101}...),
- resp: []uint64{99, 101},
- },
- }
-
- for _, queryCase := range queryCases {
- resp, err := graph.FilterKnownChanIDs(queryCase.queryIDs)
- if err != nil {
- t.Fatalf("unable to filter chan IDs: %v", err)
- }
-
- if !reflect.DeepEqual(resp, queryCase.resp) {
- t.Fatalf("expected %v, got %v", spew.Sdump(queryCase.resp),
- spew.Sdump(resp))
- }
- }
-}
-
-// TestFilterChannelRange tests that we're able to properly retrieve the full
-// set of short channel ID's for a given block range.
-func TestFilterChannelRange(t *testing.T) {
- t.Parallel()
-
- db, cleanUp, err := MakeTestDB()
- defer cleanUp()
- if err != nil {
- t.Fatalf("unable to make test database: %v", err)
- }
-
- graph := db.ChannelGraph()
-
- // We'll first populate our graph with two nodes. All channels created
- // below will be made between these two nodes.
- node1, err := createTestVertex(db)
- if err != nil {
- t.Fatalf("unable to create test node: %v", err)
- }
- if err := graph.AddLightningNode(node1); err != nil {
- t.Fatalf("unable to add node: %v", err)
- }
- node2, err := createTestVertex(db)
- if err != nil {
- t.Fatalf("unable to create test node: %v", err)
- }
- if err := graph.AddLightningNode(node2); err != nil {
- t.Fatalf("unable to add node: %v", err)
- }
-
- // If we try to filter a channel range before we have any channels
- // inserted, we should get an empty slice of results.
- resp, err := graph.FilterChannelRange(10, 100)
- if err != nil {
- t.Fatalf("unable to filter channels: %v", err)
- }
- if len(resp) != 0 {
- t.Fatalf("expected zero chans, instead got %v", len(resp))
- }
-
- // To start, we'll create a set of channels, each mined in a block 10
- // blocks after the prior one.
- startHeight := uint32(100)
- endHeight := startHeight
- const numChans = 10
- chanIDs := make([]uint64, 0, numChans)
- for i := 0; i < numChans; i++ {
- chanHeight := endHeight
- channel, chanID := createEdge(
- uint32(chanHeight), uint32(i+1), 0, 0, node1, node2,
- )
-
- if err := graph.AddChannelEdge(&channel); err != nil {
- t.Fatalf("unable to create channel edge: %v", err)
- }
-
- chanIDs = append(chanIDs, chanID.ToUint64())
-
- endHeight += 10
- }
-
- // With our channels inserted, we'll construct a series of queries that
- // we'll execute below in order to exercise the features of the
- // FilterKnownChanIDs method.
- queryCases := []struct {
- startHeight uint32
- endHeight uint32
-
- resp []uint64
- }{
- // If we query for the entire range, then we should get the same
- // set of short channel IDs back.
- {
- startHeight: startHeight,
- endHeight: endHeight,
-
- resp: chanIDs,
- },
-
- // If we query for a range of channels right before our range, we
- // shouldn't get any results back.
- {
- startHeight: 0,
- endHeight: 10,
- },
-
- // If we only query for the last height (range wise), we should
- // only get that last channel.
- {
- startHeight: endHeight - 10,
- endHeight: endHeight - 10,
-
- resp: chanIDs[9:],
- },
-
- // If we query for just the first height, we should only get a
- // single channel back (the first one).
- {
- startHeight: startHeight,
- endHeight: startHeight,
-
- resp: chanIDs[:1],
- },
- }
- for i, queryCase := range queryCases {
- resp, err := graph.FilterChannelRange(
- queryCase.startHeight, queryCase.endHeight,
- )
- if err != nil {
- t.Fatalf("unable to issue range query: %v", err)
- }
-
- if !reflect.DeepEqual(resp, queryCase.resp) {
- t.Fatalf("case #%v: expected %v, got %v", i,
- queryCase.resp, resp)
- }
- }
-}
-
-// TestFetchChanInfos tests that we're able to properly retrieve the full set
-// of ChannelEdge structs for a given set of short channel ID's.
-func TestFetchChanInfos(t *testing.T) {
- t.Parallel()
-
- db, cleanUp, err := MakeTestDB()
- defer cleanUp()
- if err != nil {
- t.Fatalf("unable to make test database: %v", err)
- }
-
- graph := db.ChannelGraph()
-
- // We'll first populate our graph with two nodes. All channels created
- // below will be made between these two nodes.
- node1, err := createTestVertex(db)
- if err != nil {
- t.Fatalf("unable to create test node: %v", err)
- }
- if err := graph.AddLightningNode(node1); err != nil {
- t.Fatalf("unable to add node: %v", err)
- }
- node2, err := createTestVertex(db)
- if err != nil {
- t.Fatalf("unable to create test node: %v", err)
- }
- if err := graph.AddLightningNode(node2); err != nil {
- t.Fatalf("unable to add node: %v", err)
- }
-
- // We'll make 5 test channels, ensuring we keep track of which channel
- // ID corresponds to a particular ChannelEdge.
- const numChans = 5
- startTime := time.Unix(1234, 0)
- endTime := startTime
- edges := make([]ChannelEdge, 0, numChans)
- edgeQuery := make([]uint64, 0, numChans)
- for i := 0; i < numChans; i++ {
- txHash := sha256.Sum256([]byte{byte(i)})
- op := wire.OutPoint{
- Hash: txHash,
- Index: 0,
- }
-
- channel, chanID := createEdge(
- uint32(i*10), 0, 0, 0, node1, node2,
- )
-
- if err := graph.AddChannelEdge(&channel); err != nil {
- t.Fatalf("unable to create channel edge: %v", err)
- }
-
- updateTime := endTime
- endTime = updateTime.Add(time.Second * 10)
-
- edge1 := newEdgePolicy(
- chanID.ToUint64(), op, db, updateTime.Unix(),
- )
- edge1.ChannelFlags = 0
- edge1.Node = node2
- edge1.SigBytes = testSig.Serialize()
- if err := graph.UpdateEdgePolicy(edge1); err != nil {
- t.Fatalf("unable to update edge: %v", err)
- }
-
- edge2 := newEdgePolicy(
- chanID.ToUint64(), op, db, updateTime.Unix(),
- )
- edge2.ChannelFlags = 1
- edge2.Node = node1
- edge2.SigBytes = testSig.Serialize()
- if err := graph.UpdateEdgePolicy(edge2); err != nil {
- t.Fatalf("unable to update edge: %v", err)
- }
-
- edges = append(edges, ChannelEdge{
- Info: &channel,
- Policy1: edge1,
- Policy2: edge2,
- })
-
- edgeQuery = append(edgeQuery, chanID.ToUint64())
- }
-
- // Add an additional edge that does not exist. The query should skip
- // this channel and return only infos for the edges that exist.
- edgeQuery = append(edgeQuery, 500)
-
- // Add an another edge to the query that has been marked as a zombie
- // edge. The query should also skip this channel.
- zombieChan, zombieChanID := createEdge(
- 666, 0, 0, 0, node1, node2,
- )
- if err := graph.AddChannelEdge(&zombieChan); err != nil {
- t.Fatalf("unable to create channel edge: %v", err)
- }
- err = graph.DeleteChannelEdges(zombieChan.ChannelID)
- if err != nil {
- t.Fatalf("unable to delete and mark edge zombie: %v", err)
- }
- edgeQuery = append(edgeQuery, zombieChanID.ToUint64())
-
- // We'll now attempt to query for the range of channel ID's we just
- // inserted into the database. We should get the exact same set of
- // edges back.
- resp, err := graph.FetchChanInfos(edgeQuery)
- if err != nil {
- t.Fatalf("unable to fetch chan edges: %v", err)
- }
- if len(resp) != len(edges) {
- t.Fatalf("expected %v edges, instead got %v", len(edges),
- len(resp))
- }
-
- for i := 0; i < len(resp); i++ {
- err := compareEdgePolicies(resp[i].Policy1, edges[i].Policy1)
- if err != nil {
- t.Fatalf("edge doesn't match: %v", err)
- }
- err = compareEdgePolicies(resp[i].Policy2, edges[i].Policy2)
- if err != nil {
- t.Fatalf("edge doesn't match: %v", err)
- }
- assertEdgeInfoEqual(t, resp[i].Info, edges[i].Info)
- }
-}
-
-// TestIncompleteChannelPolicies tests that a channel that only has a policy
-// specified on one end is properly returned in ForEachChannel calls from
-// both sides.
-func TestIncompleteChannelPolicies(t *testing.T) {
- t.Parallel()
-
- db, cleanUp, err := MakeTestDB()
- defer cleanUp()
- if err != nil {
- t.Fatalf("unable to make test database: %v", err)
- }
-
- graph := db.ChannelGraph()
-
- // Create two nodes.
- node1, err := createTestVertex(db)
- if err != nil {
- t.Fatalf("unable to create test node: %v", err)
- }
- if err := graph.AddLightningNode(node1); err != nil {
- t.Fatalf("unable to add node: %v", err)
- }
- node2, err := createTestVertex(db)
- if err != nil {
- t.Fatalf("unable to create test node: %v", err)
- }
- if err := graph.AddLightningNode(node2); err != nil {
- t.Fatalf("unable to add node: %v", err)
- }
-
- // Create channel between nodes.
- txHash := sha256.Sum256([]byte{0})
- op := wire.OutPoint{
- Hash: txHash,
- Index: 0,
- }
-
- channel, chanID := createEdge(
- uint32(0), 0, 0, 0, node1, node2,
- )
-
- if err := graph.AddChannelEdge(&channel); err != nil {
- t.Fatalf("unable to create channel edge: %v", err)
- }
-
- // Ensure that channel is reported with unknown policies.
- checkPolicies := func(node *LightningNode, expectedIn, expectedOut bool) {
- calls := 0
- err := node.ForEachChannel(nil, func(_ kvdb.RTx, _ *ChannelEdgeInfo,
- outEdge, inEdge *ChannelEdgePolicy) er.R {
-
- if !expectedOut && outEdge != nil {
- t.Fatalf("Expected no outgoing policy")
- }
-
- if expectedOut && outEdge == nil {
- t.Fatalf("Expected an outgoing policy")
- }
-
- if !expectedIn && inEdge != nil {
- t.Fatalf("Expected no incoming policy")
- }
-
- if expectedIn && inEdge == nil {
- t.Fatalf("Expected an incoming policy")
- }
-
- calls++
-
- return nil
- })
- if err != nil {
- t.Fatalf("unable to scan channels: %v", err)
- }
-
- if calls != 1 {
- t.Fatalf("Expected only one callback call")
- }
- }
-
- checkPolicies(node2, false, false)
-
- // Only create an edge policy for node1 and leave the policy for node2
- // unknown.
- updateTime := time.Unix(1234, 0)
-
- edgePolicy := newEdgePolicy(
- chanID.ToUint64(), op, db, updateTime.Unix(),
- )
- edgePolicy.ChannelFlags = 0
- edgePolicy.Node = node2
- edgePolicy.SigBytes = testSig.Serialize()
- if err := graph.UpdateEdgePolicy(edgePolicy); err != nil {
- t.Fatalf("unable to update edge: %v", err)
- }
-
- checkPolicies(node1, false, true)
- checkPolicies(node2, true, false)
-
- // Create second policy and assert that both policies are reported
- // as present.
- edgePolicy = newEdgePolicy(
- chanID.ToUint64(), op, db, updateTime.Unix(),
- )
- edgePolicy.ChannelFlags = 1
- edgePolicy.Node = node1
- edgePolicy.SigBytes = testSig.Serialize()
- if err := graph.UpdateEdgePolicy(edgePolicy); err != nil {
- t.Fatalf("unable to update edge: %v", err)
- }
-
- checkPolicies(node1, true, true)
- checkPolicies(node2, true, true)
-}
-
-// TestChannelEdgePruningUpdateIndexDeletion tests that once edges are deleted
-// from the graph, then their entries within the update index are also cleaned
-// up.
-func TestChannelEdgePruningUpdateIndexDeletion(t *testing.T) {
- t.Parallel()
-
- db, cleanUp, err := MakeTestDB()
- defer cleanUp()
- if err != nil {
- t.Fatalf("unable to make test database: %v", err)
- }
-
- graph := db.ChannelGraph()
- sourceNode, err := createTestVertex(db)
- if err != nil {
- t.Fatalf("unable to create source node: %v", err)
- }
- if err := graph.SetSourceNode(sourceNode); err != nil {
- t.Fatalf("unable to set source node: %v", err)
- }
-
- // We'll first populate our graph with two nodes. All channels created
- // below will be made between these two nodes.
- node1, err := createTestVertex(db)
- if err != nil {
- t.Fatalf("unable to create test node: %v", err)
- }
- if err := graph.AddLightningNode(node1); err != nil {
- t.Fatalf("unable to add node: %v", err)
- }
- node2, err := createTestVertex(db)
- if err != nil {
- t.Fatalf("unable to create test node: %v", err)
- }
- if err := graph.AddLightningNode(node2); err != nil {
- t.Fatalf("unable to add node: %v", err)
- }
-
- // With the two nodes created, we'll now create a random channel, as
- // well as two edges in the database with distinct update times.
- edgeInfo, chanID := createEdge(100, 0, 0, 0, node1, node2)
- if err := graph.AddChannelEdge(&edgeInfo); err != nil {
- t.Fatalf("unable to add edge: %v", err)
- }
-
- edge1 := randEdgePolicy(chanID.ToUint64(), edgeInfo.ChannelPoint, db)
- edge1.ChannelFlags = 0
- edge1.Node = node1
- edge1.SigBytes = testSig.Serialize()
- if err := graph.UpdateEdgePolicy(edge1); err != nil {
- t.Fatalf("unable to update edge: %v", err)
- }
-
- edge2 := randEdgePolicy(chanID.ToUint64(), edgeInfo.ChannelPoint, db)
- edge2.ChannelFlags = 1
- edge2.Node = node2
- edge2.SigBytes = testSig.Serialize()
- if err := graph.UpdateEdgePolicy(edge2); err != nil {
- t.Fatalf("unable to update edge: %v", err)
- }
-
- // checkIndexTimestamps is a helper function that checks the edge update
- // index only includes the given timestamps.
- checkIndexTimestamps := func(timestamps ...uint64) {
- timestampSet := make(map[uint64]struct{})
- for _, t := range timestamps {
- timestampSet[t] = struct{}{}
- }
-
- err := kvdb.View(db, func(tx kvdb.RTx) er.R {
- edges := tx.ReadBucket(edgeBucket)
- if edges == nil {
- return ErrGraphNoEdgesFound.Default()
- }
- edgeUpdateIndex := edges.NestedReadBucket(
- edgeUpdateIndexBucket,
- )
- if edgeUpdateIndex == nil {
- return ErrGraphNoEdgesFound.Default()
- }
-
- var numEntries int
- err := edgeUpdateIndex.ForEach(func(k, v []byte) er.R {
- numEntries++
- return nil
- })
- if err != nil {
- return err
- }
-
- expectedEntries := len(timestampSet)
- if numEntries != expectedEntries {
- return er.Errorf("expected %v entries in the "+
- "update index, got %v", expectedEntries,
- numEntries)
- }
-
- return edgeUpdateIndex.ForEach(func(k, _ []byte) er.R {
- t := byteOrder.Uint64(k[:8])
- if _, ok := timestampSet[t]; !ok {
- return er.Errorf("found unexpected "+
- "timestamp "+"%d", t)
- }
-
- return nil
- })
- }, func() {})
- if err != nil {
- t.Fatal(err)
- }
- }
-
- // With both edges policies added, we'll make sure to check they exist
- // within the edge update index.
- checkIndexTimestamps(
- uint64(edge1.LastUpdate.Unix()),
- uint64(edge2.LastUpdate.Unix()),
- )
-
- // Now, we'll update the edge policies to ensure the old timestamps are
- // removed from the update index.
- edge1.ChannelFlags = 2
- edge1.LastUpdate = time.Now()
- if err := graph.UpdateEdgePolicy(edge1); err != nil {
- t.Fatalf("unable to update edge: %v", err)
- }
- edge2.ChannelFlags = 3
- edge2.LastUpdate = edge1.LastUpdate.Add(time.Hour)
- if err := graph.UpdateEdgePolicy(edge2); err != nil {
- t.Fatalf("unable to update edge: %v", err)
- }
-
- // With the policies updated, we should now be able to find their
- // updated entries within the update index.
- checkIndexTimestamps(
- uint64(edge1.LastUpdate.Unix()),
- uint64(edge2.LastUpdate.Unix()),
- )
-
- // Now we'll prune the graph, removing the edges, and also the update
- // index entries from the database all together.
- var blockHash chainhash.Hash
- copy(blockHash[:], bytes.Repeat([]byte{2}, 32))
- _, err = graph.PruneGraph(
- []*wire.OutPoint{&edgeInfo.ChannelPoint}, &blockHash, 101,
- )
- if err != nil {
- t.Fatalf("unable to prune graph: %v", err)
- }
-
- // Finally, we'll check the database state one last time to conclude
- // that we should no longer be able to locate _any_ entries within the
- // edge update index.
- checkIndexTimestamps()
-}
-
-// TestPruneGraphNodes tests that unconnected vertexes are pruned via the
-// PruneSyncState method.
-func TestPruneGraphNodes(t *testing.T) {
- t.Parallel()
-
- db, cleanUp, err := MakeTestDB()
- defer cleanUp()
- if err != nil {
- t.Fatalf("unable to make test database: %v", err)
- }
-
- // We'll start off by inserting our source node, to ensure that it's
- // the only node left after we prune the graph.
- graph := db.ChannelGraph()
- sourceNode, err := createTestVertex(db)
- if err != nil {
- t.Fatalf("unable to create source node: %v", err)
- }
- if err := graph.SetSourceNode(sourceNode); err != nil {
- t.Fatalf("unable to set source node: %v", err)
- }
-
- // With the source node inserted, we'll now add three nodes to the
- // channel graph, at the end of the scenario, only two of these nodes
- // should still be in the graph.
- node1, err := createTestVertex(db)
- if err != nil {
- t.Fatalf("unable to create test node: %v", err)
- }
- if err := graph.AddLightningNode(node1); err != nil {
- t.Fatalf("unable to add node: %v", err)
- }
- node2, err := createTestVertex(db)
- if err != nil {
- t.Fatalf("unable to create test node: %v", err)
- }
- if err := graph.AddLightningNode(node2); err != nil {
- t.Fatalf("unable to add node: %v", err)
- }
- node3, err := createTestVertex(db)
- if err != nil {
- t.Fatalf("unable to create test node: %v", err)
- }
- if err := graph.AddLightningNode(node3); err != nil {
- t.Fatalf("unable to add node: %v", err)
- }
-
- // We'll now add a new edge to the graph, but only actually advertise
- // the edge of *one* of the nodes.
- edgeInfo, chanID := createEdge(100, 0, 0, 0, node1, node2)
- if err := graph.AddChannelEdge(&edgeInfo); err != nil {
- t.Fatalf("unable to add edge: %v", err)
- }
-
- // We'll now insert an advertised edge, but it'll only be the edge that
- // points from the first to the second node.
- edge1 := randEdgePolicy(chanID.ToUint64(), edgeInfo.ChannelPoint, db)
- edge1.ChannelFlags = 0
- edge1.Node = node1
- edge1.SigBytes = testSig.Serialize()
- if err := graph.UpdateEdgePolicy(edge1); err != nil {
- t.Fatalf("unable to update edge: %v", err)
- }
-
- // We'll now initiate a around of graph pruning.
- if err := graph.PruneGraphNodes(); err != nil {
- t.Fatalf("unable to prune graph nodes: %v", err)
- }
-
- // At this point, there should be 3 nodes left in the graph still: the
- // source node (which can't be pruned), and node 1+2. Nodes 1 and two
- // should still be left in the graph as there's half of an advertised
- // edge between them.
- assertNumNodes(t, graph, 3)
-
- // Finally, we'll ensure that node3, the only fully unconnected node as
- // properly deleted from the graph and not another node in its place.
- _, err = graph.FetchLightningNode(nil, node3.PubKeyBytes)
- if err == nil {
- t.Fatalf("node 3 should have been deleted!")
- }
-}
-
-// TestAddChannelEdgeShellNodes tests that when we attempt to add a ChannelEdge
-// to the graph, one or both of the nodes the edge involves aren't found in the
-// database, then shell edges are created for each node if needed.
-func TestAddChannelEdgeShellNodes(t *testing.T) {
- t.Parallel()
-
- db, cleanUp, err := MakeTestDB()
- defer cleanUp()
- if err != nil {
- t.Fatalf("unable to make test database: %v", err)
- }
-
- graph := db.ChannelGraph()
-
- // To start, we'll create two nodes, and only add one of them to the
- // channel graph.
- node1, err := createTestVertex(db)
- if err != nil {
- t.Fatalf("unable to create test node: %v", err)
- }
- if err := graph.AddLightningNode(node1); err != nil {
- t.Fatalf("unable to add node: %v", err)
- }
- node2, err := createTestVertex(db)
- if err != nil {
- t.Fatalf("unable to create test node: %v", err)
- }
-
- // We'll now create an edge between the two nodes, as a result, node2
- // should be inserted into the database as a shell node.
- edgeInfo, _ := createEdge(100, 0, 0, 0, node1, node2)
- if err := graph.AddChannelEdge(&edgeInfo); err != nil {
- t.Fatalf("unable to add edge: %v", err)
- }
-
- // Ensure that node1 was inserted as a full node, while node2 only has
- // a shell node present.
- node1, err = graph.FetchLightningNode(nil, node1.PubKeyBytes)
- if err != nil {
- t.Fatalf("unable to fetch node1: %v", err)
- }
- if !node1.HaveNodeAnnouncement {
- t.Fatalf("have shell announcement for node1, shouldn't")
- }
-
- node2, err = graph.FetchLightningNode(nil, node2.PubKeyBytes)
- if err != nil {
- t.Fatalf("unable to fetch node2: %v", err)
- }
- if node2.HaveNodeAnnouncement {
- t.Fatalf("should have shell announcement for node2, but is full")
- }
-}
-
-// TestNodePruningUpdateIndexDeletion tests that once a node has been removed
-// from the channel graph, we also remove the entry from the update index as
-// well.
-func TestNodePruningUpdateIndexDeletion(t *testing.T) {
- t.Parallel()
-
- db, cleanUp, err := MakeTestDB()
- defer cleanUp()
- if err != nil {
- t.Fatalf("unable to make test database: %v", err)
- }
-
- graph := db.ChannelGraph()
-
- // We'll first populate our graph with a single node that will be
- // removed shortly.
- node1, err := createTestVertex(db)
- if err != nil {
- t.Fatalf("unable to create test node: %v", err)
- }
- if err := graph.AddLightningNode(node1); err != nil {
- t.Fatalf("unable to add node: %v", err)
- }
-
- // We'll confirm that we can retrieve the node using
- // NodeUpdatesInHorizon, using a time that's slightly beyond the last
- // update time of our test node.
- startTime := time.Unix(9, 0)
- endTime := node1.LastUpdate.Add(time.Minute)
- nodesInHorizon, err := graph.NodeUpdatesInHorizon(startTime, endTime)
- if err != nil {
- t.Fatalf("unable to fetch nodes in horizon: %v", err)
- }
-
- // We should only have a single node, and that node should exactly
- // match the node we just inserted.
- if len(nodesInHorizon) != 1 {
- t.Fatalf("should have 1 nodes instead have: %v",
- len(nodesInHorizon))
- }
- if err := compareNodes(node1, &nodesInHorizon[0]); err != nil {
- t.Fatalf("nodes don't match: %v", err)
- }
-
- // We'll now delete the node from the graph, this should result in it
- // being removed from the update index as well.
- if err := graph.DeleteLightningNode(node1.PubKeyBytes); err != nil {
- t.Fatalf("unable to delete node: %v", err)
- }
-
- // Now that the node has been deleted, we'll again query the nodes in
- // the horizon. This time we should have no nodes at all.
- nodesInHorizon, err = graph.NodeUpdatesInHorizon(startTime, endTime)
- if err != nil {
- t.Fatalf("unable to fetch nodes in horizon: %v", err)
- }
-
- if len(nodesInHorizon) != 0 {
- t.Fatalf("should have zero nodes instead have: %v",
- len(nodesInHorizon))
- }
-}
-
-// TestNodeIsPublic ensures that we properly detect nodes that are seen as
-// public within the network graph.
-func TestNodeIsPublic(t *testing.T) {
- t.Parallel()
-
- // We'll start off the test by creating a small network of 3
- // participants with the following graph:
- //
- // Alice <-> Bob <-> Carol
- //
- // We'll need to create a separate database and channel graph for each
- // participant to replicate real-world scenarios (private edges being in
- // some graphs but not others, etc.).
- aliceDB, cleanUp, err := MakeTestDB()
- defer cleanUp()
- if err != nil {
- t.Fatalf("unable to make test database: %v", err)
- }
- aliceNode, err := createTestVertex(aliceDB)
- if err != nil {
- t.Fatalf("unable to create test node: %v", err)
- }
- aliceGraph := aliceDB.ChannelGraph()
- if err := aliceGraph.SetSourceNode(aliceNode); err != nil {
- t.Fatalf("unable to set source node: %v", err)
- }
-
- bobDB, cleanUp, err := MakeTestDB()
- defer cleanUp()
- if err != nil {
- t.Fatalf("unable to make test database: %v", err)
- }
- bobNode, err := createTestVertex(bobDB)
- if err != nil {
- t.Fatalf("unable to create test node: %v", err)
- }
- bobGraph := bobDB.ChannelGraph()
- if err := bobGraph.SetSourceNode(bobNode); err != nil {
- t.Fatalf("unable to set source node: %v", err)
- }
-
- carolDB, cleanUp, err := MakeTestDB()
- defer cleanUp()
- if err != nil {
- t.Fatalf("unable to make test database: %v", err)
- }
- carolNode, err := createTestVertex(carolDB)
- if err != nil {
- t.Fatalf("unable to create test node: %v", err)
- }
- carolGraph := carolDB.ChannelGraph()
- if err := carolGraph.SetSourceNode(carolNode); err != nil {
- t.Fatalf("unable to set source node: %v", err)
- }
-
- aliceBobEdge, _ := createEdge(10, 0, 0, 0, aliceNode, bobNode)
- bobCarolEdge, _ := createEdge(10, 1, 0, 1, bobNode, carolNode)
-
- // After creating all of our nodes and edges, we'll add them to each
- // participant's graph.
- nodes := []*LightningNode{aliceNode, bobNode, carolNode}
- edges := []*ChannelEdgeInfo{&aliceBobEdge, &bobCarolEdge}
- dbs := []*DB{aliceDB, bobDB, carolDB}
- graphs := []*ChannelGraph{aliceGraph, bobGraph, carolGraph}
- for i, graph := range graphs {
- for _, node := range nodes {
- node.db = dbs[i]
- if err := graph.AddLightningNode(node); err != nil {
- t.Fatalf("unable to add node: %v", err)
- }
- }
- for _, edge := range edges {
- edge.db = dbs[i]
- if err := graph.AddChannelEdge(edge); err != nil {
- t.Fatalf("unable to add edge: %v", err)
- }
- }
- }
-
- // checkNodes is a helper closure that will be used to assert that the
- // given nodes are seen as public/private within the given graphs.
- checkNodes := func(nodes []*LightningNode, graphs []*ChannelGraph,
- public bool) {
-
- t.Helper()
-
- for _, node := range nodes {
- for _, graph := range graphs {
- isPublic, err := graph.IsPublicNode(node.PubKeyBytes)
- if err != nil {
- t.Fatalf("unable to determine if pivot "+
- "is public: %v", err)
- }
-
- switch {
- case isPublic && !public:
- t.Fatalf("expected %x to be private",
- node.PubKeyBytes)
- case !isPublic && public:
- t.Fatalf("expected %x to be public",
- node.PubKeyBytes)
- }
- }
- }
- }
-
- // Due to the way the edges were set up above, we'll make sure each node
- // can correctly determine that every other node is public.
- checkNodes(nodes, graphs, true)
-
- // Now, we'll remove the edge between Alice and Bob from everyone's
- // graph. This will make Alice be seen as a private node as it no longer
- // has any advertised edges.
- for _, graph := range graphs {
- err := graph.DeleteChannelEdges(aliceBobEdge.ChannelID)
- if err != nil {
- t.Fatalf("unable to remove edge: %v", err)
- }
- }
- checkNodes(
- []*LightningNode{aliceNode},
- []*ChannelGraph{bobGraph, carolGraph},
- false,
- )
-
- // We'll also make the edge between Bob and Carol private. Within Bob's
- // and Carol's graph, the edge will exist, but it will not have a proof
- // that allows it to be advertised. Within Alice's graph, we'll
- // completely remove the edge as it is not possible for her to know of
- // it without it being advertised.
- for i, graph := range graphs {
- err := graph.DeleteChannelEdges(bobCarolEdge.ChannelID)
- if err != nil {
- t.Fatalf("unable to remove edge: %v", err)
- }
-
- if graph == aliceGraph {
- continue
- }
-
- bobCarolEdge.AuthProof = nil
- bobCarolEdge.db = dbs[i]
- if err := graph.AddChannelEdge(&bobCarolEdge); err != nil {
- t.Fatalf("unable to add edge: %v", err)
- }
- }
-
- // With the modifications above, Bob should now be seen as a private
- // node from both Alice's and Carol's perspective.
- checkNodes(
- []*LightningNode{bobNode},
- []*ChannelGraph{aliceGraph, carolGraph},
- false,
- )
-}
-
-// TestDisabledChannelIDs ensures that the disabled channels within the
-// disabledEdgePolicyBucket are managed properly and the list returned from
-// DisabledChannelIDs is correct.
-func TestDisabledChannelIDs(t *testing.T) {
- t.Parallel()
-
- db, cleanUp, err := MakeTestDB()
- if err != nil {
- t.Fatalf("unable to make test database: %v", err)
- }
- defer cleanUp()
-
- graph := db.ChannelGraph()
-
- // Create first node and add it to the graph.
- node1, err := createTestVertex(db)
- if err != nil {
- t.Fatalf("unable to create test node: %v", err)
- }
- if err := graph.AddLightningNode(node1); err != nil {
- t.Fatalf("unable to add node: %v", err)
- }
-
- // Create second node and add it to the graph.
- node2, err := createTestVertex(db)
- if err != nil {
- t.Fatalf("unable to create test node: %v", err)
- }
- if err := graph.AddLightningNode(node2); err != nil {
- t.Fatalf("unable to add node: %v", err)
- }
-
- // Adding a new channel edge to the graph.
- edgeInfo, edge1, edge2 := createChannelEdge(db, node1, node2)
- if err := graph.AddLightningNode(node2); err != nil {
- t.Fatalf("unable to add node: %v", err)
- }
-
- if err := graph.AddChannelEdge(edgeInfo); err != nil {
- t.Fatalf("unable to create channel edge: %v", err)
- }
-
- // Ensure no disabled channels exist in the bucket on start.
- disabledChanIds, err := graph.DisabledChannelIDs()
- if err != nil {
- t.Fatalf("unable to get disabled channel ids: %v", err)
- }
- if len(disabledChanIds) > 0 {
- t.Fatalf("expected empty disabled channels, got %v disabled channels",
- len(disabledChanIds))
- }
-
- // Add one disabled policy and ensure the channel is still not in the
- // disabled list.
- edge1.ChannelFlags |= lnwire.ChanUpdateDisabled
- if err := graph.UpdateEdgePolicy(edge1); err != nil {
- t.Fatalf("unable to update edge: %v", err)
- }
- disabledChanIds, err = graph.DisabledChannelIDs()
- if err != nil {
- t.Fatalf("unable to get disabled channel ids: %v", err)
- }
- if len(disabledChanIds) > 0 {
- t.Fatalf("expected empty disabled channels, got %v disabled channels",
- len(disabledChanIds))
- }
-
- // Add second disabled policy and ensure the channel is now in the
- // disabled list.
- edge2.ChannelFlags |= lnwire.ChanUpdateDisabled
- if err := graph.UpdateEdgePolicy(edge2); err != nil {
- t.Fatalf("unable to update edge: %v", err)
- }
- disabledChanIds, err = graph.DisabledChannelIDs()
- if err != nil {
- t.Fatalf("unable to get disabled channel ids: %v", err)
- }
- if len(disabledChanIds) != 1 || disabledChanIds[0] != edgeInfo.ChannelID {
- t.Fatalf("expected disabled channel with id %v, "+
- "got %v", edgeInfo.ChannelID, disabledChanIds)
- }
-
- // Delete the channel edge and ensure it is removed from the disabled list.
- if err = graph.DeleteChannelEdges(edgeInfo.ChannelID); err != nil {
- t.Fatalf("unable to delete channel edge: %v", err)
- }
- disabledChanIds, err = graph.DisabledChannelIDs()
- if err != nil {
- t.Fatalf("unable to get disabled channel ids: %v", err)
- }
- if len(disabledChanIds) > 0 {
- t.Fatalf("expected empty disabled channels, got %v disabled channels",
- len(disabledChanIds))
- }
-}
-
-// TestEdgePolicyMissingMaxHtcl tests that if we find a ChannelEdgePolicy in
-// the DB that indicates that it should support the htlc_maximum_value_msat
-// field, but it is not part of the opaque data, then we'll handle it as it is
-// unknown. It also checks that we are correctly able to overwrite it when we
-// receive the proper update.
-func TestEdgePolicyMissingMaxHtcl(t *testing.T) {
- t.Parallel()
-
- db, cleanUp, err := MakeTestDB()
- defer cleanUp()
- if err != nil {
- t.Fatalf("unable to make test database: %v", err)
- }
-
- graph := db.ChannelGraph()
-
- // We'd like to test the update of edges inserted into the database, so
- // we create two vertexes to connect.
- node1, err := createTestVertex(db)
- if err != nil {
- t.Fatalf("unable to create test node: %v", err)
- }
- if err := graph.AddLightningNode(node1); err != nil {
- t.Fatalf("unable to add node: %v", err)
- }
- node2, err := createTestVertex(db)
- if err != nil {
- t.Fatalf("unable to create test node: %v", err)
- }
-
- edgeInfo, edge1, edge2 := createChannelEdge(db, node1, node2)
- if err := graph.AddLightningNode(node2); err != nil {
- t.Fatalf("unable to add node: %v", err)
- }
- if err := graph.AddChannelEdge(edgeInfo); err != nil {
- t.Fatalf("unable to create channel edge: %v", err)
- }
-
- chanID := edgeInfo.ChannelID
- from := edge2.Node.PubKeyBytes[:]
- to := edge1.Node.PubKeyBytes[:]
-
- // We'll remove the no max_htlc field from the first edge policy, and
- // all other opaque data, and serialize it.
- edge1.MessageFlags = 0
- edge1.ExtraOpaqueData = nil
-
- var b bytes.Buffer
- err = serializeChanEdgePolicy(&b, edge1, to)
- if err != nil {
- t.Fatalf("unable to serialize policy")
- }
-
- // Set the max_htlc field. The extra bytes added to the serialization
- // will be the opaque data containing the serialized field.
- edge1.MessageFlags = lnwire.ChanUpdateOptionMaxHtlc
- edge1.MaxHTLC = 13928598
- var b2 bytes.Buffer
- err = serializeChanEdgePolicy(&b2, edge1, to)
- if err != nil {
- t.Fatalf("unable to serialize policy")
- }
-
- withMaxHtlc := b2.Bytes()
-
- // Remove the opaque data from the serialization.
- stripped := withMaxHtlc[:len(b.Bytes())]
-
- // Attempting to deserialize these bytes should return an error.
- r := bytes.NewReader(stripped)
- err = kvdb.View(db, func(tx kvdb.RTx) er.R {
- nodes := tx.ReadBucket(nodeBucket)
- if nodes == nil {
- return ErrGraphNotFound.Default()
- }
-
- _, err = deserializeChanEdgePolicy(r, nodes)
- if !ErrEdgePolicyOptionalFieldNotFound.Is(err) {
- t.Fatalf("expected "+
- "ErrEdgePolicyOptionalFieldNotFound, got %v",
- err)
- }
-
- return nil
- }, func() {})
- if err != nil {
- t.Fatalf("error reading db: %v", err)
- }
-
- // Put the stripped bytes in the DB.
- err = kvdb.Update(db, func(tx kvdb.RwTx) er.R {
- edges := tx.ReadWriteBucket(edgeBucket)
- if edges == nil {
- return ErrEdgeNotFound.Default()
- }
-
- edgeIndex := edges.NestedReadWriteBucket(edgeIndexBucket)
- if edgeIndex == nil {
- return ErrEdgeNotFound.Default()
- }
-
- var edgeKey [33 + 8]byte
- copy(edgeKey[:], from)
- byteOrder.PutUint64(edgeKey[33:], edge1.ChannelID)
-
- var scratch [8]byte
- var indexKey [8 + 8]byte
- copy(indexKey[:], scratch[:])
- byteOrder.PutUint64(indexKey[8:], edge1.ChannelID)
-
- updateIndex, err := edges.CreateBucketIfNotExists(edgeUpdateIndexBucket)
- if err != nil {
- return err
- }
-
- if err := updateIndex.Put(indexKey[:], nil); err != nil {
- return err
- }
-
- return edges.Put(edgeKey[:], stripped)
- }, func() {})
- if err != nil {
- t.Fatalf("error writing db: %v", err)
- }
-
- // And add the second, unmodified edge.
- if err := graph.UpdateEdgePolicy(edge2); err != nil {
- t.Fatalf("unable to update edge: %v", err)
- }
-
- // Attempt to fetch the edge and policies from the DB. Since the policy
- // we added is invalid according to the new format, it should be as we
- // are not aware of the policy (indicated by the policy returned being
- // nil)
- dbEdgeInfo, dbEdge1, dbEdge2, err := graph.FetchChannelEdgesByID(chanID)
- if err != nil {
- t.Fatalf("unable to fetch channel by ID: %v", err)
- }
-
- // The first edge should have a nil-policy returned
- if dbEdge1 != nil {
- t.Fatalf("expected db edge to be nil")
- }
- if err := compareEdgePolicies(dbEdge2, edge2); err != nil {
- t.Fatalf("edge doesn't match: %v", err)
- }
- assertEdgeInfoEqual(t, dbEdgeInfo, edgeInfo)
-
- // Now add the original, unmodified edge policy, and make sure the edge
- // policies then become fully populated.
- if err := graph.UpdateEdgePolicy(edge1); err != nil {
- t.Fatalf("unable to update edge: %v", err)
- }
-
- dbEdgeInfo, dbEdge1, dbEdge2, err = graph.FetchChannelEdgesByID(chanID)
- if err != nil {
- t.Fatalf("unable to fetch channel by ID: %v", err)
- }
- if err := compareEdgePolicies(dbEdge1, edge1); err != nil {
- t.Fatalf("edge doesn't match: %v", err)
- }
- if err := compareEdgePolicies(dbEdge2, edge2); err != nil {
- t.Fatalf("edge doesn't match: %v", err)
- }
- assertEdgeInfoEqual(t, dbEdgeInfo, edgeInfo)
-}
-
-// assertNumZombies queries the provided ChannelGraph for NumZombies, and
-// asserts that the returned number is equal to expZombies.
-func assertNumZombies(t *testing.T, graph *ChannelGraph, expZombies uint64) {
- t.Helper()
-
- numZombies, err := graph.NumZombies()
- if err != nil {
- t.Fatalf("unable to query number of zombies: %v", err)
- }
-
- if numZombies != expZombies {
- t.Fatalf("expected %d zombies, found %d",
- expZombies, numZombies)
- }
-}
-
-// TestGraphZombieIndex ensures that we can mark edges correctly as zombie/live.
-func TestGraphZombieIndex(t *testing.T) {
- t.Parallel()
-
- // We'll start by creating our test graph along with a test edge.
- db, cleanUp, err := MakeTestDB()
- defer cleanUp()
- if err != nil {
- t.Fatalf("unable to create test database: %v", err)
- }
- graph := db.ChannelGraph()
-
- node1, err := createTestVertex(db)
- if err != nil {
- t.Fatalf("unable to create test vertex: %v", err)
- }
- node2, err := createTestVertex(db)
- if err != nil {
- t.Fatalf("unable to create test vertex: %v", err)
- }
-
- // Swap the nodes if the second's pubkey is smaller than the first.
- // Without this, the comparisons at the end will fail probabilistically.
- if bytes.Compare(node2.PubKeyBytes[:], node1.PubKeyBytes[:]) < 0 {
- node1, node2 = node2, node1
- }
-
- edge, _, _ := createChannelEdge(db, node1, node2)
- if err := graph.AddChannelEdge(edge); err != nil {
- t.Fatalf("unable to create channel edge: %v", err)
- }
-
- // Since the edge is known the graph and it isn't a zombie, IsZombieEdge
- // should not report the channel as a zombie.
- isZombie, _, _ := graph.IsZombieEdge(edge.ChannelID)
- if isZombie {
- t.Fatal("expected edge to not be marked as zombie")
- }
- assertNumZombies(t, graph, 0)
-
- // If we delete the edge and mark it as a zombie, then we should expect
- // to see it within the index.
- err = graph.DeleteChannelEdges(edge.ChannelID)
- if err != nil {
- t.Fatalf("unable to mark edge as zombie: %v", err)
- }
- isZombie, pubKey1, pubKey2 := graph.IsZombieEdge(edge.ChannelID)
- if !isZombie {
- t.Fatal("expected edge to be marked as zombie")
- }
- if pubKey1 != node1.PubKeyBytes {
- t.Fatalf("expected pubKey1 %x, got %x", node1.PubKeyBytes,
- pubKey1)
- }
- if pubKey2 != node2.PubKeyBytes {
- t.Fatalf("expected pubKey2 %x, got %x", node2.PubKeyBytes,
- pubKey2)
- }
- assertNumZombies(t, graph, 1)
-
- // Similarly, if we mark the same edge as live, we should no longer see
- // it within the index.
- if err := graph.MarkEdgeLive(edge.ChannelID); err != nil {
- t.Fatalf("unable to mark edge as live: %v", err)
- }
- isZombie, _, _ = graph.IsZombieEdge(edge.ChannelID)
- if isZombie {
- t.Fatal("expected edge to not be marked as zombie")
- }
- assertNumZombies(t, graph, 0)
-}
-
-// compareNodes is used to compare two LightningNodes while excluding the
-// Features struct, which cannot be compared as the semantics for reserializing
-// the featuresMap have not been defined.
-func compareNodes(a, b *LightningNode) er.R {
- if a.LastUpdate != b.LastUpdate {
- return er.Errorf("node LastUpdate doesn't match: expected %v, \n"+
- "got %v", a.LastUpdate, b.LastUpdate)
- }
- if !reflect.DeepEqual(a.Addresses, b.Addresses) {
- return er.Errorf("Addresses doesn't match: expected %#v, \n "+
- "got %#v", a.Addresses, b.Addresses)
- }
- if !reflect.DeepEqual(a.PubKeyBytes, b.PubKeyBytes) {
- return er.Errorf("PubKey doesn't match: expected %#v, \n "+
- "got %#v", a.PubKeyBytes, b.PubKeyBytes)
- }
- if !reflect.DeepEqual(a.Color, b.Color) {
- return er.Errorf("Color doesn't match: expected %#v, \n "+
- "got %#v", a.Color, b.Color)
- }
- if !reflect.DeepEqual(a.Alias, b.Alias) {
- return er.Errorf("Alias doesn't match: expected %#v, \n "+
- "got %#v", a.Alias, b.Alias)
- }
- if !reflect.DeepEqual(a.db, b.db) {
- return er.Errorf("db doesn't match: expected %#v, \n "+
- "got %#v", a.db, b.db)
- }
- if !reflect.DeepEqual(a.HaveNodeAnnouncement, b.HaveNodeAnnouncement) {
- return er.Errorf("HaveNodeAnnouncement doesn't match: expected %#v, \n "+
- "got %#v", a.HaveNodeAnnouncement, b.HaveNodeAnnouncement)
- }
- if !bytes.Equal(a.ExtraOpaqueData, b.ExtraOpaqueData) {
- return er.Errorf("extra data doesn't match: %v vs %v",
- a.ExtraOpaqueData, b.ExtraOpaqueData)
- }
-
- return nil
-}
-
-// compareEdgePolicies is used to compare two ChannelEdgePolices using
-// compareNodes, so as to exclude comparisons of the Nodes' Features struct.
-func compareEdgePolicies(a, b *ChannelEdgePolicy) er.R {
- if a.ChannelID != b.ChannelID {
- return er.Errorf("ChannelID doesn't match: expected %v, "+
- "got %v", a.ChannelID, b.ChannelID)
- }
- if !reflect.DeepEqual(a.LastUpdate, b.LastUpdate) {
- return er.Errorf("edge LastUpdate doesn't match: expected %#v, \n "+
- "got %#v", a.LastUpdate, b.LastUpdate)
- }
- if a.MessageFlags != b.MessageFlags {
- return er.Errorf("MessageFlags doesn't match: expected %v, "+
- "got %v", a.MessageFlags, b.MessageFlags)
- }
- if a.ChannelFlags != b.ChannelFlags {
- return er.Errorf("ChannelFlags doesn't match: expected %v, "+
- "got %v", a.ChannelFlags, b.ChannelFlags)
- }
- if a.TimeLockDelta != b.TimeLockDelta {
- return er.Errorf("TimeLockDelta doesn't match: expected %v, "+
- "got %v", a.TimeLockDelta, b.TimeLockDelta)
- }
- if a.MinHTLC != b.MinHTLC {
- return er.Errorf("MinHTLC doesn't match: expected %v, "+
- "got %v", a.MinHTLC, b.MinHTLC)
- }
- if a.MaxHTLC != b.MaxHTLC {
- return er.Errorf("MaxHTLC doesn't match: expected %v, "+
- "got %v", a.MaxHTLC, b.MaxHTLC)
- }
- if a.FeeBaseMSat != b.FeeBaseMSat {
- return er.Errorf("FeeBaseMSat doesn't match: expected %v, "+
- "got %v", a.FeeBaseMSat, b.FeeBaseMSat)
- }
- if a.FeeProportionalMillionths != b.FeeProportionalMillionths {
- return er.Errorf("FeeProportionalMillionths doesn't match: "+
- "expected %v, got %v", a.FeeProportionalMillionths,
- b.FeeProportionalMillionths)
- }
- if !bytes.Equal(a.ExtraOpaqueData, b.ExtraOpaqueData) {
- return er.Errorf("extra data doesn't match: %v vs %v",
- a.ExtraOpaqueData, b.ExtraOpaqueData)
- }
- if err := compareNodes(a.Node, b.Node); err != nil {
- return err
- }
- if !reflect.DeepEqual(a.db, b.db) {
- return er.Errorf("db doesn't match: expected %#v, \n "+
- "got %#v", a.db, b.db)
- }
- return nil
-}
-
-// TestLightningNodeSigVerifcation checks that we can use the LightningNode's
-// pubkey to verify signatures.
-func TestLightningNodeSigVerification(t *testing.T) {
- t.Parallel()
-
- // Create some dummy data to sign.
- var data [32]byte
- if _, err := prand.Read(data[:]); err != nil {
- t.Fatalf("unable to read prand: %v", err)
- }
-
- // Create private key and sign the data with it.
- priv, err := btcec.NewPrivateKey(btcec.S256())
- if err != nil {
- t.Fatalf("unable to crete priv key: %v", err)
- }
-
- sign, err := priv.Sign(data[:])
- if err != nil {
- t.Fatalf("unable to sign: %v", err)
- }
-
- // Sanity check that the signature checks out.
- if !sign.Verify(data[:], priv.PubKey()) {
- t.Fatalf("signature doesn't check out")
- }
-
- // Create a LightningNode from the same private key.
- db, cleanUp, errr := MakeTestDB()
- if errr != nil {
- t.Fatalf("unable to make test database: %v", errr)
- }
- defer cleanUp()
-
- node, errr := createLightningNode(db, priv)
- if errr != nil {
- t.Fatalf("unable to create node: %v", errr)
- }
-
- // And finally check that we can verify the same signature from the
- // pubkey returned from the lightning node.
- nodePub, errr := node.PubKey()
- if errr != nil {
- t.Fatalf("unable to get pubkey: %v", errr)
- }
-
- if !sign.Verify(data[:], nodePub) {
- t.Fatalf("unable to verify sig")
- }
-}
-
-// TestComputeFee tests fee calculation based on both in- and outgoing amt.
-func TestComputeFee(t *testing.T) {
- var (
- policy = ChannelEdgePolicy{
- FeeBaseMSat: 10000,
- FeeProportionalMillionths: 30000,
- }
- outgoingAmt = lnwire.MilliSatoshi(1000000)
- expectedFee = lnwire.MilliSatoshi(40000)
- )
-
- fee := policy.ComputeFee(outgoingAmt)
- if fee != expectedFee {
- t.Fatalf("expected fee %v, got %v", expectedFee, fee)
- }
-
- fwdFee := policy.ComputeFeeFromIncoming(outgoingAmt + fee)
- if fwdFee != expectedFee {
- t.Fatalf("expected fee %v, but got %v", fee, fwdFee)
- }
-}
diff --git a/lnd/channeldb/invoice_test.go b/lnd/channeldb/invoice_test.go
deleted file mode 100644
index 517d117a..00000000
--- a/lnd/channeldb/invoice_test.go
+++ /dev/null
@@ -1,1248 +0,0 @@
-package channeldb
-
-import (
- "crypto/rand"
- "fmt"
- "math"
- "testing"
- "time"
-
- "github.com/pkt-cash/pktd/btcutil/er"
- "github.com/pkt-cash/pktd/btcutil/util"
- "github.com/pkt-cash/pktd/lnd/lntypes"
- "github.com/pkt-cash/pktd/lnd/lnwire"
- "github.com/pkt-cash/pktd/lnd/record"
- "github.com/stretchr/testify/require"
-)
-
-var (
- emptyFeatures = lnwire.NewFeatureVector(nil, lnwire.Features)
- testNow = time.Unix(1, 0)
-)
-
-func randInvoice(value lnwire.MilliSatoshi) (*Invoice, er.R) {
- var (
- pre lntypes.Preimage
- payAddr [32]byte
- )
- if _, err := rand.Read(pre[:]); err != nil {
- return nil, er.E(err)
- }
- if _, err := rand.Read(payAddr[:]); err != nil {
- return nil, er.E(err)
- }
-
- i := &Invoice{
- CreationDate: testNow,
- Terms: ContractTerm{
- Expiry: 4000,
- PaymentPreimage: &pre,
- PaymentAddr: payAddr,
- Value: value,
- Features: emptyFeatures,
- },
- Htlcs: map[CircuitKey]*InvoiceHTLC{},
- }
- i.Memo = []byte("memo")
-
- // Create a random byte slice of MaxPaymentRequestSize bytes to be used
- // as a dummy paymentrequest, and determine if it should be set based
- // on one of the random bytes.
- var r [MaxPaymentRequestSize]byte
- if _, err := rand.Read(r[:]); err != nil {
- return nil, er.E(err)
- }
- if r[0]&1 == 0 {
- i.PaymentRequest = r[:]
- } else {
- i.PaymentRequest = []byte("")
- }
-
- return i, nil
-}
-
-// settleTestInvoice settles a test invoice.
-func settleTestInvoice(invoice *Invoice, settleIndex uint64) {
- invoice.SettleDate = testNow
- invoice.AmtPaid = invoice.Terms.Value
- invoice.State = ContractSettled
- invoice.Htlcs[CircuitKey{}] = &InvoiceHTLC{
- Amt: invoice.Terms.Value,
- AcceptTime: testNow,
- ResolveTime: testNow,
- State: HtlcStateSettled,
- CustomRecords: make(record.CustomSet),
- }
- invoice.SettleIndex = settleIndex
-}
-
-// Tests that pending invoices are those which are either in ContractOpen or
-// in ContractAccepted state.
-func TestInvoiceIsPending(t *testing.T) {
- contractStates := []ContractState{
- ContractOpen, ContractSettled, ContractCanceled, ContractAccepted,
- }
-
- for _, state := range contractStates {
- invoice := Invoice{
- State: state,
- }
-
- // We expect that an invoice is pending if it's either in ContractOpen
- // or ContractAccepted state.
- pending := (state == ContractOpen || state == ContractAccepted)
-
- if invoice.IsPending() != pending {
- t.Fatalf("expected pending: %v, got: %v, invoice: %v",
- pending, invoice.IsPending(), invoice)
- }
- }
-}
-
-type invWorkflowTest struct {
- name string
- queryPayHash bool
- queryPayAddr bool
-}
-
-var invWorkflowTests = []invWorkflowTest{
- {
- name: "unknown",
- queryPayHash: false,
- queryPayAddr: false,
- },
- {
- name: "only payhash known",
- queryPayHash: true,
- queryPayAddr: false,
- },
- {
- name: "payaddr and payhash known",
- queryPayHash: true,
- queryPayAddr: true,
- },
-}
-
-// TestInvoiceWorkflow asserts the basic process of inserting, fetching, and
-// updating an invoice. We assert that the flow is successful using when
-// querying with various combinations of payment hash and payment address.
-func TestInvoiceWorkflow(t *testing.T) {
- t.Parallel()
-
- for _, test := range invWorkflowTests {
- test := test
- t.Run(test.name, func(t *testing.T) {
- testInvoiceWorkflow(t, test)
- })
- }
-}
-
-func testInvoiceWorkflow(t *testing.T, test invWorkflowTest) {
- db, cleanUp, err := MakeTestDB()
- defer cleanUp()
- if err != nil {
- t.Fatalf("unable to make test db: %v", err)
- }
-
- // Create a fake invoice which we'll use several times in the tests
- // below.
- fakeInvoice, err := randInvoice(10000)
- if err != nil {
- t.Fatalf("unable to create invoice: %v", err)
- }
- invPayHash := fakeInvoice.Terms.PaymentPreimage.Hash()
-
- // Select the payment hash and payment address we will use to lookup or
- // update the invoice for the remainder of the test.
- var (
- payHash lntypes.Hash
- payAddr *[32]byte
- ref InvoiceRef
- )
- switch {
- case test.queryPayHash && test.queryPayAddr:
- payHash = invPayHash
- payAddr = &fakeInvoice.Terms.PaymentAddr
- ref = InvoiceRefByHashAndAddr(payHash, *payAddr)
- case test.queryPayHash:
- payHash = invPayHash
- ref = InvoiceRefByHash(payHash)
- }
-
- // Add the invoice to the database, this should succeed as there aren't
- // any existing invoices within the database with the same payment
- // hash.
- if _, err := db.AddInvoice(fakeInvoice, invPayHash); err != nil {
- t.Fatalf("unable to find invoice: %v", err)
- }
-
- // Attempt to retrieve the invoice which was just added to the
- // database. It should be found, and the invoice returned should be
- // identical to the one created above.
- dbInvoice, err := db.LookupInvoice(ref)
- if !test.queryPayAddr && !test.queryPayHash {
- if !ErrInvoiceNotFound.Is(err) {
- t.Fatalf("invoice should not exist: %v", err)
- }
- return
- }
-
- require.Equal(t,
- *fakeInvoice, dbInvoice,
- "invoice fetched from db doesn't match original",
- )
-
- // The add index of the invoice retrieved from the database should now
- // be fully populated. As this is the first index written to the DB,
- // the addIndex should be 1.
- if dbInvoice.AddIndex != 1 {
- t.Fatalf("wrong add index: expected %v, got %v", 1,
- dbInvoice.AddIndex)
- }
-
- // Settle the invoice, the version retrieved from the database should
- // now have the settled bit toggle to true and a non-default
- // SettledDate
- payAmt := fakeInvoice.Terms.Value * 2
- _, err = db.UpdateInvoice(ref, getUpdateInvoice(payAmt))
- if err != nil {
- t.Fatalf("unable to settle invoice: %v", err)
- }
- dbInvoice2, err := db.LookupInvoice(ref)
- if err != nil {
- t.Fatalf("unable to fetch invoice: %v", err)
- }
- if dbInvoice2.State != ContractSettled {
- t.Fatalf("invoice should now be settled but isn't")
- }
- if dbInvoice2.SettleDate.IsZero() {
- t.Fatalf("invoice should have non-zero SettledDate but isn't")
- }
-
- // Our 2x payment should be reflected, and also the settle index of 1
- // should also have been committed for this index.
- if dbInvoice2.AmtPaid != payAmt {
- t.Fatalf("wrong amt paid: expected %v, got %v", payAmt,
- dbInvoice2.AmtPaid)
- }
- if dbInvoice2.SettleIndex != 1 {
- t.Fatalf("wrong settle index: expected %v, got %v", 1,
- dbInvoice2.SettleIndex)
- }
-
- // Attempt to insert generated above again, this should fail as
- // duplicates are rejected by the processing logic.
- if _, err := db.AddInvoice(fakeInvoice, payHash); !ErrDuplicateInvoice.Is(err) {
- t.Fatalf("invoice insertion should fail due to duplication, "+
- "instead %v", err)
- }
-
- // Attempt to look up a non-existent invoice, this should also fail but
- // with a "not found" error.
- var fakeHash [32]byte
- fakeRef := InvoiceRefByHash(fakeHash)
- _, err = db.LookupInvoice(fakeRef)
- if !ErrInvoiceNotFound.Is(err) {
- t.Fatalf("lookup should have failed, instead %v", err)
- }
-
- // Add 10 random invoices.
- const numInvoices = 10
- amt := lnwire.NewMSatFromSatoshis(1000)
- invoices := make([]*Invoice, numInvoices+1)
- invoices[0] = &dbInvoice2
- for i := 1; i < len(invoices); i++ {
- invoice, err := randInvoice(amt)
- if err != nil {
- t.Fatalf("unable to create invoice: %v", err)
- }
-
- hash := invoice.Terms.PaymentPreimage.Hash()
- if _, err := db.AddInvoice(invoice, hash); err != nil {
- t.Fatalf("unable to add invoice %v", err)
- }
-
- invoices[i] = invoice
- }
-
- // Perform a scan to collect all the active invoices.
- query := InvoiceQuery{
- IndexOffset: 0,
- NumMaxInvoices: math.MaxUint64,
- PendingOnly: false,
- }
-
- response, err := db.QueryInvoices(query)
- if err != nil {
- t.Fatalf("invoice query failed: %v", err)
- }
-
- // The retrieve list of invoices should be identical as since we're
- // using big endian, the invoices should be retrieved in ascending
- // order (and the primary key should be incremented with each
- // insertion).
- for i := 0; i < len(invoices); i++ {
- require.Equal(t,
- *invoices[i], response.Invoices[i],
- "retrieved invoice doesn't match",
- )
- }
-}
-
-// TestAddDuplicatePayAddr asserts that the payment addresses of inserted
-// invoices are unique.
-func TestAddDuplicatePayAddr(t *testing.T) {
- db, cleanUp, err := MakeTestDB()
- defer cleanUp()
- util.RequireNoErr(t, err)
-
- // Create two invoices with the same payment addr.
- invoice1, err := randInvoice(1000)
- util.RequireNoErr(t, err)
-
- invoice2, err := randInvoice(20000)
- util.RequireNoErr(t, err)
- invoice2.Terms.PaymentAddr = invoice1.Terms.PaymentAddr
-
- // First insert should succeed.
- inv1Hash := invoice1.Terms.PaymentPreimage.Hash()
- _, err = db.AddInvoice(invoice1, inv1Hash)
- util.RequireNoErr(t, err)
-
- // Second insert should fail with duplicate payment addr.
- inv2Hash := invoice2.Terms.PaymentPreimage.Hash()
- _, err = db.AddInvoice(invoice2, inv2Hash)
- util.RequireErr(t, err, ErrDuplicatePayAddr)
-}
-
-// TestAddDuplicateKeysendPayAddr asserts that we permit duplicate payment
-// addresses to be inserted if they are blank to support JIT legacy keysend
-// invoices.
-func TestAddDuplicateKeysendPayAddr(t *testing.T) {
- db, cleanUp, err := MakeTestDB()
- defer cleanUp()
- util.RequireNoErr(t, err)
-
- // Create two invoices with the same _blank_ payment addr.
- invoice1, err := randInvoice(1000)
- util.RequireNoErr(t, err)
- invoice1.Terms.PaymentAddr = BlankPayAddr
-
- invoice2, err := randInvoice(20000)
- util.RequireNoErr(t, err)
- invoice2.Terms.PaymentAddr = BlankPayAddr
-
- // Inserting both should succeed without a duplicate payment address
- // failure.
- inv1Hash := invoice1.Terms.PaymentPreimage.Hash()
- _, err = db.AddInvoice(invoice1, inv1Hash)
- util.RequireNoErr(t, err)
-
- inv2Hash := invoice2.Terms.PaymentPreimage.Hash()
- _, err = db.AddInvoice(invoice2, inv2Hash)
- util.RequireNoErr(t, err)
-
- // Querying for each should succeed. Here we use hash+addr refs since
- // the lookup will fail if the hash and addr point to different
- // invoices, so if both succeed we can be assured they aren't included
- // in the payment address index.
- ref1 := InvoiceRefByHashAndAddr(inv1Hash, BlankPayAddr)
- dbInv1, err := db.LookupInvoice(ref1)
- util.RequireNoErr(t, err)
- require.Equal(t, invoice1, &dbInv1)
-
- ref2 := InvoiceRefByHashAndAddr(inv2Hash, BlankPayAddr)
- dbInv2, err := db.LookupInvoice(ref2)
- util.RequireNoErr(t, err)
- require.Equal(t, invoice2, &dbInv2)
-}
-
-// TestInvRefEquivocation asserts that retrieving or updating an invoice using
-// an equivocating InvoiceRef results in ErrInvRefEquivocation.
-func TestInvRefEquivocation(t *testing.T) {
- db, cleanUp, err := MakeTestDB()
- defer cleanUp()
- util.RequireNoErr(t, err)
-
- // Add two random invoices.
- invoice1, err := randInvoice(1000)
- util.RequireNoErr(t, err)
-
- inv1Hash := invoice1.Terms.PaymentPreimage.Hash()
- _, err = db.AddInvoice(invoice1, inv1Hash)
- util.RequireNoErr(t, err)
-
- invoice2, err := randInvoice(2000)
- util.RequireNoErr(t, err)
-
- inv2Hash := invoice2.Terms.PaymentPreimage.Hash()
- _, err = db.AddInvoice(invoice2, inv2Hash)
- util.RequireNoErr(t, err)
-
- // Now, query using invoice 1's payment address, but invoice 2's payment
- // hash. We expect an error since the invref points to multiple
- // invoices.
- ref := InvoiceRefByHashAndAddr(inv2Hash, invoice1.Terms.PaymentAddr)
- _, err = db.LookupInvoice(ref)
- util.RequireErr(t, err, ErrInvRefEquivocation)
-
- // The same error should be returned when updating an equivocating
- // reference.
- nop := func(_ *Invoice) (*InvoiceUpdateDesc, er.R) {
- return nil, nil
- }
- _, err = db.UpdateInvoice(ref, nop)
- util.RequireErr(t, err, ErrInvRefEquivocation)
-}
-
-// TestInvoiceCancelSingleHtlc tests that a single htlc can be canceled on the
-// invoice.
-func TestInvoiceCancelSingleHtlc(t *testing.T) {
- t.Parallel()
-
- db, cleanUp, err := MakeTestDB()
- defer cleanUp()
- if err != nil {
- t.Fatalf("unable to make test db: %v", err)
- }
-
- preimage := lntypes.Preimage{1}
- paymentHash := preimage.Hash()
-
- testInvoice := &Invoice{
- Htlcs: map[CircuitKey]*InvoiceHTLC{},
- Terms: ContractTerm{
- Value: lnwire.NewMSatFromSatoshis(10000),
- Features: emptyFeatures,
- PaymentPreimage: &preimage,
- },
- }
-
- if _, err := db.AddInvoice(testInvoice, paymentHash); err != nil {
- t.Fatalf("unable to find invoice: %v", err)
- }
-
- // Accept an htlc on this invoice.
- key := CircuitKey{ChanID: lnwire.NewShortChanIDFromInt(1), HtlcID: 4}
- htlc := HtlcAcceptDesc{
- Amt: 500,
- CustomRecords: make(record.CustomSet),
- }
-
- ref := InvoiceRefByHash(paymentHash)
- invoice, err := db.UpdateInvoice(ref,
- func(invoice *Invoice) (*InvoiceUpdateDesc, er.R) {
- return &InvoiceUpdateDesc{
- AddHtlcs: map[CircuitKey]*HtlcAcceptDesc{
- key: &htlc,
- },
- }, nil
- })
- if err != nil {
- t.Fatalf("unable to add invoice htlc: %v", err)
- }
- if len(invoice.Htlcs) != 1 {
- t.Fatalf("expected the htlc to be added")
- }
- if invoice.Htlcs[key].State != HtlcStateAccepted {
- t.Fatalf("expected htlc in state accepted")
- }
-
- // Cancel the htlc again.
- invoice, err = db.UpdateInvoice(ref,
- func(invoice *Invoice) (*InvoiceUpdateDesc, er.R) {
- return &InvoiceUpdateDesc{
- CancelHtlcs: map[CircuitKey]struct{}{
- key: {},
- },
- }, nil
- })
- if err != nil {
- t.Fatalf("unable to cancel htlc: %v", err)
- }
- if len(invoice.Htlcs) != 1 {
- t.Fatalf("expected the htlc to be present")
- }
- if invoice.Htlcs[key].State != HtlcStateCanceled {
- t.Fatalf("expected htlc in state canceled")
- }
-}
-
-// TestInvoiceTimeSeries tests that newly added invoices invoices, as well as
-// settled invoices are added to the database are properly placed in the add
-// add or settle index which serves as an event time series.
-func TestInvoiceAddTimeSeries(t *testing.T) {
- t.Parallel()
-
- db, cleanUp, err := MakeTestDB(OptionClock(testClock))
- defer cleanUp()
- if err != nil {
- t.Fatalf("unable to make test db: %v", err)
- }
-
- _, err = db.InvoicesAddedSince(0)
- util.RequireNoErr(t, err)
-
- // We'll start off by creating 20 random invoices, and inserting them
- // into the database.
- const numInvoices = 20
- amt := lnwire.NewMSatFromSatoshis(1000)
- invoices := make([]Invoice, numInvoices)
- for i := 0; i < len(invoices); i++ {
- invoice, err := randInvoice(amt)
- if err != nil {
- t.Fatalf("unable to create invoice: %v", err)
- }
-
- paymentHash := invoice.Terms.PaymentPreimage.Hash()
-
- if _, err := db.AddInvoice(invoice, paymentHash); err != nil {
- t.Fatalf("unable to add invoice %v", err)
- }
-
- invoices[i] = *invoice
- }
-
- // With the invoices constructed, we'll now create a series of queries
- // that we'll use to assert expected return values of
- // InvoicesAddedSince.
- addQueries := []struct {
- sinceAddIndex uint64
-
- resp []Invoice
- }{
- // If we specify a value of zero, we shouldn't get any invoices
- // back.
- {
- sinceAddIndex: 0,
- },
-
- // If we specify a value well beyond the number of inserted
- // invoices, we shouldn't get any invoices back.
- {
- sinceAddIndex: 99999999,
- },
-
- // Using an index of 1 should result in all values, but the
- // first one being returned.
- {
- sinceAddIndex: 1,
- resp: invoices[1:],
- },
-
- // If we use an index of 10, then we should retrieve the
- // reaming 10 invoices.
- {
- sinceAddIndex: 10,
- resp: invoices[10:],
- },
- }
-
- for i, query := range addQueries {
- resp, err := db.InvoicesAddedSince(query.sinceAddIndex)
- if err != nil {
- t.Fatalf("unable to query: %v", err)
- }
-
- require.Equal(t, len(query.resp), len(resp))
-
- for j := 0; j < len(query.resp); j++ {
- require.Equal(t,
- query.resp[j], resp[j],
- fmt.Sprintf("test: #%v, item: #%v", i, j),
- )
- }
- }
-
- _, err = db.InvoicesSettledSince(0)
- util.RequireNoErr(t, err)
-
- var settledInvoices []Invoice
- var settleIndex uint64 = 1
- // We'll now only settle the latter half of each of those invoices.
- for i := 10; i < len(invoices); i++ {
- invoice := &invoices[i]
-
- paymentHash := invoice.Terms.PaymentPreimage.Hash()
-
- ref := InvoiceRefByHash(paymentHash)
- _, err := db.UpdateInvoice(
- ref, getUpdateInvoice(invoice.Terms.Value),
- )
- if err != nil {
- t.Fatalf("unable to settle invoice: %v", err)
- }
-
- // Create the settled invoice for the expectation set.
- settleTestInvoice(invoice, settleIndex)
- settleIndex++
-
- settledInvoices = append(settledInvoices, *invoice)
- }
-
- // We'll now prepare an additional set of queries to ensure the settle
- // time series has properly been maintained in the database.
- settleQueries := []struct {
- sinceSettleIndex uint64
-
- resp []Invoice
- }{
- // If we specify a value of zero, we shouldn't get any settled
- // invoices back.
- {
- sinceSettleIndex: 0,
- },
-
- // If we specify a value well beyond the number of settled
- // invoices, we shouldn't get any invoices back.
- {
- sinceSettleIndex: 99999999,
- },
-
- // Using an index of 1 should result in the final 10 invoices
- // being returned, as we only settled those.
- {
- sinceSettleIndex: 1,
- resp: settledInvoices[1:],
- },
- }
-
- for i, query := range settleQueries {
- resp, err := db.InvoicesSettledSince(query.sinceSettleIndex)
- if err != nil {
- t.Fatalf("unable to query: %v", err)
- }
-
- require.Equal(t, len(query.resp), len(resp))
-
- for j := 0; j < len(query.resp); j++ {
- require.Equal(t,
- query.resp[j], resp[j],
- fmt.Sprintf("test: #%v, item: #%v", i, j),
- )
- }
- }
-}
-
-// TestScanInvoices tests that ScanInvoices scans trough all stored invoices
-// correctly.
-func TestScanInvoices(t *testing.T) {
- t.Parallel()
-
- db, cleanup, err := MakeTestDB()
- defer cleanup()
- if err != nil {
- t.Fatalf("unable to make test db: %v", err)
- }
-
- var invoices map[lntypes.Hash]*Invoice
- callCount := 0
- resetCount := 0
-
- // reset is used to reset/initialize results and is called once
- // upon calling ScanInvoices and when the underlying transaction is
- // retried.
- reset := func() {
- invoices = make(map[lntypes.Hash]*Invoice)
- callCount = 0
- resetCount++
-
- }
-
- scanFunc := func(paymentHash lntypes.Hash, invoice *Invoice) er.R {
- invoices[paymentHash] = invoice
- callCount++
-
- return nil
- }
-
- // With an empty DB we expect to not scan any invoices.
- util.RequireNoErr(t, db.ScanInvoices(scanFunc, reset))
- require.Equal(t, 0, len(invoices))
- require.Equal(t, 0, callCount)
- require.Equal(t, 1, resetCount)
-
- numInvoices := 5
- testInvoices := make(map[lntypes.Hash]*Invoice)
-
- // Now populate the DB and check if we can get all invoices with their
- // payment hashes as expected.
- for i := 1; i <= numInvoices; i++ {
- invoice, err := randInvoice(lnwire.MilliSatoshi(i))
- util.RequireNoErr(t, err)
-
- paymentHash := invoice.Terms.PaymentPreimage.Hash()
- testInvoices[paymentHash] = invoice
-
- _, err = db.AddInvoice(invoice, paymentHash)
- util.RequireNoErr(t, err)
- }
-
- resetCount = 0
- util.RequireNoErr(t, db.ScanInvoices(scanFunc, reset))
- require.Equal(t, numInvoices, callCount)
- require.Equal(t, testInvoices, invoices)
- require.Equal(t, 1, resetCount)
-}
-
-// TestDuplicateSettleInvoice tests that if we add a new invoice and settle it
-// twice, then the second time we also receive the invoice that we settled as a
-// return argument.
-func TestDuplicateSettleInvoice(t *testing.T) {
- t.Parallel()
-
- db, cleanUp, err := MakeTestDB(OptionClock(testClock))
- defer cleanUp()
- if err != nil {
- t.Fatalf("unable to make test db: %v", err)
- }
-
- // We'll start out by creating an invoice and writing it to the DB.
- amt := lnwire.NewMSatFromSatoshis(1000)
- invoice, err := randInvoice(amt)
- if err != nil {
- t.Fatalf("unable to create invoice: %v", err)
- }
-
- payHash := invoice.Terms.PaymentPreimage.Hash()
-
- if _, err := db.AddInvoice(invoice, payHash); err != nil {
- t.Fatalf("unable to add invoice %v", err)
- }
-
- // With the invoice in the DB, we'll now attempt to settle the invoice.
- ref := InvoiceRefByHash(payHash)
- dbInvoice, err := db.UpdateInvoice(ref, getUpdateInvoice(amt))
- if err != nil {
- t.Fatalf("unable to settle invoice: %v", err)
- }
-
- // We'll update what we expect the settle invoice to be so that our
- // comparison below has the correct assumption.
- invoice.SettleIndex = 1
- invoice.State = ContractSettled
- invoice.AmtPaid = amt
- invoice.SettleDate = dbInvoice.SettleDate
- invoice.Htlcs = map[CircuitKey]*InvoiceHTLC{
- {}: {
- Amt: amt,
- AcceptTime: time.Unix(1, 0),
- ResolveTime: time.Unix(1, 0),
- State: HtlcStateSettled,
- CustomRecords: make(record.CustomSet),
- },
- }
-
- // We should get back the exact same invoice that we just inserted.
- require.Equal(t, invoice, dbInvoice, "wrong invoice after settle")
-
- // If we try to settle the invoice again, then we should get the very
- // same invoice back, but with an error this time.
- dbInvoice, err = db.UpdateInvoice(ref, getUpdateInvoice(amt))
- if !ErrInvoiceAlreadySettled.Is(err) {
- t.Fatalf("expected ErrInvoiceAlreadySettled")
- }
-
- if dbInvoice == nil {
- t.Fatalf("invoice from db is nil after settle!")
- }
-
- invoice.SettleDate = dbInvoice.SettleDate
- require.Equal(t, invoice, dbInvoice, "wrong invoice after second settle")
-}
-
-// TestQueryInvoices ensures that we can properly query the invoice database for
-// invoices using different types of queries.
-func TestQueryInvoices(t *testing.T) {
- t.Parallel()
-
- db, cleanUp, err := MakeTestDB(OptionClock(testClock))
- defer cleanUp()
- if err != nil {
- t.Fatalf("unable to make test db: %v", err)
- }
-
- // To begin the test, we'll add 50 invoices to the database. We'll
- // assume that the index of the invoice within the database is the same
- // as the amount of the invoice itself.
- const numInvoices = 50
- var settleIndex uint64 = 1
- var invoices []Invoice
- var pendingInvoices []Invoice
-
- for i := 1; i <= numInvoices; i++ {
- amt := lnwire.MilliSatoshi(i)
- invoice, err := randInvoice(amt)
- if err != nil {
- t.Fatalf("unable to create invoice: %v", err)
- }
-
- paymentHash := invoice.Terms.PaymentPreimage.Hash()
-
- if _, err := db.AddInvoice(invoice, paymentHash); err != nil {
- t.Fatalf("unable to add invoice: %v", err)
- }
-
- // We'll only settle half of all invoices created.
- if i%2 == 0 {
- ref := InvoiceRefByHash(paymentHash)
- _, err := db.UpdateInvoice(ref, getUpdateInvoice(amt))
- if err != nil {
- t.Fatalf("unable to settle invoice: %v", err)
- }
-
- // Create the settled invoice for the expectation set.
- settleTestInvoice(invoice, settleIndex)
- settleIndex++
- } else {
- pendingInvoices = append(pendingInvoices, *invoice)
- }
-
- invoices = append(invoices, *invoice)
- }
-
- // The test will consist of several queries along with their respective
- // expected response. Each query response should match its expected one.
- testCases := []struct {
- query InvoiceQuery
- expected []Invoice
- }{
- // Fetch all invoices with a single query.
- {
- query: InvoiceQuery{
- NumMaxInvoices: numInvoices,
- },
- expected: invoices,
- },
- // Fetch all invoices with a single query, reversed.
- {
- query: InvoiceQuery{
- Reversed: true,
- NumMaxInvoices: numInvoices,
- },
- expected: invoices,
- },
- // Fetch the first 25 invoices.
- {
- query: InvoiceQuery{
- NumMaxInvoices: numInvoices / 2,
- },
- expected: invoices[:numInvoices/2],
- },
- // Fetch the first 10 invoices, but this time iterating
- // backwards.
- {
- query: InvoiceQuery{
- IndexOffset: 11,
- Reversed: true,
- NumMaxInvoices: numInvoices,
- },
- expected: invoices[:10],
- },
- // Fetch the last 40 invoices.
- {
- query: InvoiceQuery{
- IndexOffset: 10,
- NumMaxInvoices: numInvoices,
- },
- expected: invoices[10:],
- },
- // Fetch all but the first invoice.
- {
- query: InvoiceQuery{
- IndexOffset: 1,
- NumMaxInvoices: numInvoices,
- },
- expected: invoices[1:],
- },
- // Fetch one invoice, reversed, with index offset 3. This
- // should give us the second invoice in the array.
- {
- query: InvoiceQuery{
- IndexOffset: 3,
- Reversed: true,
- NumMaxInvoices: 1,
- },
- expected: invoices[1:2],
- },
- // Same as above, at index 2.
- {
- query: InvoiceQuery{
- IndexOffset: 2,
- Reversed: true,
- NumMaxInvoices: 1,
- },
- expected: invoices[0:1],
- },
- // Fetch one invoice, at index 1, reversed. Since invoice#1 is
- // the very first, there won't be any left in a reverse search,
- // so we expect no invoices to be returned.
- {
- query: InvoiceQuery{
- IndexOffset: 1,
- Reversed: true,
- NumMaxInvoices: 1,
- },
- expected: nil,
- },
- // Same as above, but don't restrict the number of invoices to
- // 1.
- {
- query: InvoiceQuery{
- IndexOffset: 1,
- Reversed: true,
- NumMaxInvoices: numInvoices,
- },
- expected: nil,
- },
- // Fetch one invoice, reversed, with no offset set. We expect
- // the last invoice in the response.
- {
- query: InvoiceQuery{
- Reversed: true,
- NumMaxInvoices: 1,
- },
- expected: invoices[numInvoices-1:],
- },
- // Fetch one invoice, reversed, the offset set at numInvoices+1.
- // We expect this to return the last invoice.
- {
- query: InvoiceQuery{
- IndexOffset: numInvoices + 1,
- Reversed: true,
- NumMaxInvoices: 1,
- },
- expected: invoices[numInvoices-1:],
- },
- // Same as above, at offset numInvoices.
- {
- query: InvoiceQuery{
- IndexOffset: numInvoices,
- Reversed: true,
- NumMaxInvoices: 1,
- },
- expected: invoices[numInvoices-2 : numInvoices-1],
- },
- // Fetch one invoice, at no offset (same as offset 0). We
- // expect the first invoice only in the response.
- {
- query: InvoiceQuery{
- NumMaxInvoices: 1,
- },
- expected: invoices[:1],
- },
- // Same as above, at offset 1.
- {
- query: InvoiceQuery{
- IndexOffset: 1,
- NumMaxInvoices: 1,
- },
- expected: invoices[1:2],
- },
- // Same as above, at offset 2.
- {
- query: InvoiceQuery{
- IndexOffset: 2,
- NumMaxInvoices: 1,
- },
- expected: invoices[2:3],
- },
- // Same as above, at offset numInvoices-1. Expect the last
- // invoice to be returned.
- {
- query: InvoiceQuery{
- IndexOffset: numInvoices - 1,
- NumMaxInvoices: 1,
- },
- expected: invoices[numInvoices-1:],
- },
- // Same as above, at offset numInvoices. No invoices should be
- // returned, as there are no invoices after this offset.
- {
- query: InvoiceQuery{
- IndexOffset: numInvoices,
- NumMaxInvoices: 1,
- },
- expected: nil,
- },
- // Fetch all pending invoices with a single query.
- {
- query: InvoiceQuery{
- PendingOnly: true,
- NumMaxInvoices: numInvoices,
- },
- expected: pendingInvoices,
- },
- // Fetch the first 12 pending invoices.
- {
- query: InvoiceQuery{
- PendingOnly: true,
- NumMaxInvoices: numInvoices / 4,
- },
- expected: pendingInvoices[:len(pendingInvoices)/2],
- },
- // Fetch the first 5 pending invoices, but this time iterating
- // backwards.
- {
- query: InvoiceQuery{
- IndexOffset: 10,
- PendingOnly: true,
- Reversed: true,
- NumMaxInvoices: numInvoices,
- },
- // Since we seek to the invoice with index 10 and
- // iterate backwards, there should only be 5 pending
- // invoices before it as every other invoice within the
- // index is settled.
- expected: pendingInvoices[:5],
- },
- // Fetch the last 15 invoices.
- {
- query: InvoiceQuery{
- IndexOffset: 20,
- PendingOnly: true,
- NumMaxInvoices: numInvoices,
- },
- // Since we seek to the invoice with index 20, there are
- // 30 invoices left. From these 30, only 15 of them are
- // still pending.
- expected: pendingInvoices[len(pendingInvoices)-15:],
- },
- // Fetch all invoices paginating backwards, with an index offset
- // that is beyond our last offset. We expect all invoices to be
- // returned.
- {
- query: InvoiceQuery{
- IndexOffset: numInvoices * 2,
- PendingOnly: false,
- Reversed: true,
- NumMaxInvoices: numInvoices,
- },
- expected: invoices,
- },
- }
-
- for i, testCase := range testCases {
- response, err := db.QueryInvoices(testCase.query)
- if err != nil {
- t.Fatalf("unable to query invoice database: %v", err)
- }
-
- require.Equal(t, len(testCase.expected), len(response.Invoices))
-
- for j, expected := range testCase.expected {
- require.Equal(t,
- expected, response.Invoices[j],
- fmt.Sprintf("test: #%v, item: #%v", i, j),
- )
- }
- }
-}
-
-// getUpdateInvoice returns an invoice update callback that, when called,
-// settles the invoice with the given amount.
-func getUpdateInvoice(amt lnwire.MilliSatoshi) InvoiceUpdateCallback {
- return func(invoice *Invoice) (*InvoiceUpdateDesc, er.R) {
- if invoice.State == ContractSettled {
- return nil, ErrInvoiceAlreadySettled.Default()
- }
-
- noRecords := make(record.CustomSet)
-
- update := &InvoiceUpdateDesc{
- State: &InvoiceStateUpdateDesc{
- Preimage: invoice.Terms.PaymentPreimage,
- NewState: ContractSettled,
- },
- AddHtlcs: map[CircuitKey]*HtlcAcceptDesc{
- {}: {
- Amt: amt,
- CustomRecords: noRecords,
- },
- },
- }
-
- return update, nil
- }
-}
-
-// TestCustomRecords tests that custom records are properly recorded in the
-// invoice database.
-func TestCustomRecords(t *testing.T) {
- t.Parallel()
-
- db, cleanUp, err := MakeTestDB()
- defer cleanUp()
- if err != nil {
- t.Fatalf("unable to make test db: %v", err)
- }
-
- preimage := lntypes.Preimage{1}
- paymentHash := preimage.Hash()
-
- testInvoice := &Invoice{
- Htlcs: map[CircuitKey]*InvoiceHTLC{},
- Terms: ContractTerm{
- Value: lnwire.NewMSatFromSatoshis(10000),
- Features: emptyFeatures,
- PaymentPreimage: &preimage,
- },
- }
-
- if _, err := db.AddInvoice(testInvoice, paymentHash); err != nil {
- t.Fatalf("unable to add invoice: %v", err)
- }
-
- // Accept an htlc with custom records on this invoice.
- key := CircuitKey{ChanID: lnwire.NewShortChanIDFromInt(1), HtlcID: 4}
-
- records := record.CustomSet{
- 100000: []byte{},
- 100001: []byte{1, 2},
- }
-
- ref := InvoiceRefByHash(paymentHash)
- _, err = db.UpdateInvoice(ref,
- func(invoice *Invoice) (*InvoiceUpdateDesc, er.R) {
- return &InvoiceUpdateDesc{
- AddHtlcs: map[CircuitKey]*HtlcAcceptDesc{
- key: {
- Amt: 500,
- CustomRecords: records,
- },
- },
- }, nil
- },
- )
- if err != nil {
- t.Fatalf("unable to add invoice htlc: %v", err)
- }
-
- // Retrieve the invoice from that database and verify that the custom
- // records are present.
- dbInvoice, err := db.LookupInvoice(ref)
- if err != nil {
- t.Fatalf("unable to lookup invoice: %v", err)
- }
-
- if len(dbInvoice.Htlcs) != 1 {
- t.Fatalf("expected the htlc to be added")
- }
-
- require.Equal(t,
- records, dbInvoice.Htlcs[key].CustomRecords,
- "invalid custom records",
- )
-}
-
-// TestInvoiceRef asserts that the proper identifiers are returned from an
-// InvoiceRef depending on the constructor used.
-func TestInvoiceRef(t *testing.T) {
- payHash := lntypes.Hash{0x01}
- payAddr := [32]byte{0x02}
-
- // An InvoiceRef by hash should return the provided hash and a nil
- // payment addr.
- refByHash := InvoiceRefByHash(payHash)
- require.Equal(t, payHash, refByHash.PayHash())
- require.Equal(t, (*[32]byte)(nil), refByHash.PayAddr())
-
- // An InvoiceRef by hash and addr should return the payment hash and
- // payment addr passed to the constructor.
- refByHashAndAddr := InvoiceRefByHashAndAddr(payHash, payAddr)
- require.Equal(t, payHash, refByHashAndAddr.PayHash())
- require.Equal(t, &payAddr, refByHashAndAddr.PayAddr())
-}
-
-// TestDeleteInvoices tests that deleting a list of invoices will succeed
-// if all delete references are valid, or will fail otherwise.
-func TestDeleteInvoices(t *testing.T) {
- t.Parallel()
-
- db, cleanup, err := MakeTestDB()
- defer cleanup()
- util.RequireNoErr(t, err, "unable to make test db")
-
- // Add some invoices to the test db.
- numInvoices := 3
- invoicesToDelete := make([]InvoiceDeleteRef, numInvoices)
-
- for i := 0; i < numInvoices; i++ {
- invoice, err := randInvoice(lnwire.MilliSatoshi(i + 1))
- util.RequireNoErr(t, err)
-
- paymentHash := invoice.Terms.PaymentPreimage.Hash()
- addIndex, err := db.AddInvoice(invoice, paymentHash)
- util.RequireNoErr(t, err)
-
- // Settle the second invoice.
- if i == 1 {
- invoice, err = db.UpdateInvoice(
- InvoiceRefByHash(paymentHash),
- getUpdateInvoice(invoice.Terms.Value),
- )
- util.RequireNoErr(t, err, "unable to settle invoice")
- }
-
- // store the delete ref for later.
- invoicesToDelete[i] = InvoiceDeleteRef{
- PayHash: paymentHash,
- PayAddr: &invoice.Terms.PaymentAddr,
- AddIndex: addIndex,
- SettleIndex: invoice.SettleIndex,
- }
- }
-
- // assertInvoiceCount asserts that the number of invoices equals
- // to the passed count.
- assertInvoiceCount := func(count int) {
- // Query to collect all invoices.
- query := InvoiceQuery{
- IndexOffset: 0,
- NumMaxInvoices: math.MaxUint64,
- }
-
- // Check that we really have 3 invoices.
- response, err := db.QueryInvoices(query)
- util.RequireNoErr(t, err)
- require.Equal(t, count, len(response.Invoices))
- }
-
- // XOR one byte of one of the references' hash and attempt to delete.
- invoicesToDelete[0].PayHash[2] ^= 3
- util.RequireErr(t, db.DeleteInvoice(invoicesToDelete))
- assertInvoiceCount(3)
-
- // Restore the hash.
- invoicesToDelete[0].PayHash[2] ^= 3
-
- // XOR one byte of one of the references' payment address and attempt
- // to delete.
- invoicesToDelete[1].PayAddr[5] ^= 7
- util.RequireErr(t, db.DeleteInvoice(invoicesToDelete))
- assertInvoiceCount(3)
-
- // Restore the payment address.
- invoicesToDelete[1].PayAddr[5] ^= 7
-
- // XOR the second invoice's payment settle index as it is settled, and
- // attempt to delete.
- invoicesToDelete[1].SettleIndex ^= 11
- util.RequireErr(t, db.DeleteInvoice(invoicesToDelete))
- assertInvoiceCount(3)
-
- // Restore the settle index.
- invoicesToDelete[1].SettleIndex ^= 11
-
- // XOR the add index for one of the references and attempt to delete.
- invoicesToDelete[2].AddIndex ^= 13
- util.RequireErr(t, db.DeleteInvoice(invoicesToDelete))
- assertInvoiceCount(3)
-
- // Restore the add index.
- invoicesToDelete[2].AddIndex ^= 13
-
- // Delete should succeed with all the valid references.
- util.RequireNoErr(t, db.DeleteInvoice(invoicesToDelete))
- assertInvoiceCount(0)
-}
diff --git a/lnd/channeldb/invoices.go b/lnd/channeldb/invoices.go
deleted file mode 100644
index e4539cf8..00000000
--- a/lnd/channeldb/invoices.go
+++ /dev/null
@@ -1,1876 +0,0 @@
-package channeldb
-
-import (
- "bytes"
- "fmt"
- "io"
- "time"
-
- "github.com/pkt-cash/pktd/btcutil/er"
- "github.com/pkt-cash/pktd/btcutil/util"
- "github.com/pkt-cash/pktd/lnd/channeldb/kvdb"
- "github.com/pkt-cash/pktd/lnd/htlcswitch/hop"
- "github.com/pkt-cash/pktd/lnd/lntypes"
- "github.com/pkt-cash/pktd/lnd/lnwire"
- "github.com/pkt-cash/pktd/lnd/record"
- "github.com/pkt-cash/pktd/lnd/tlv"
-)
-
-var (
- // unknownPreimage is an all-zeroes preimage that indicates that the
- // preimage for this invoice is not yet known.
- unknownPreimage lntypes.Preimage
-
- // BlankPayAddr is a sentinel payment address for legacy invoices.
- // Invoices with this payment address are special-cased in the insertion
- // logic to prevent being indexed in the payment address index,
- // otherwise they would cause collisions after the first insertion.
- BlankPayAddr [32]byte
-
- // invoiceBucket is the name of the bucket within the database that
- // stores all data related to invoices no matter their final state.
- // Within the invoice bucket, each invoice is keyed by its invoice ID
- // which is a monotonically increasing uint32.
- invoiceBucket = []byte("invoices")
-
- // paymentHashIndexBucket is the name of the sub-bucket within the
- // invoiceBucket which indexes all invoices by their payment hash. The
- // payment hash is the sha256 of the invoice's payment preimage. This
- // index is used to detect duplicates, and also to provide a fast path
- // for looking up incoming HTLCs to determine if we're able to settle
- // them fully.
- //
- // maps: payHash => invoiceKey
- invoiceIndexBucket = []byte("paymenthashes")
-
- // payAddrIndexBucket is the name of the top-level bucket that maps
- // payment addresses to their invoice number. This can be used
- // to efficiently query or update non-legacy invoices. Note that legacy
- // invoices will not be included in this index since they all have the
- // same, all-zero payment address, however all newly generated invoices
- // will end up in this index.
- //
- // maps: payAddr => invoiceKey
- payAddrIndexBucket = []byte("pay-addr-index")
-
- // numInvoicesKey is the name of key which houses the auto-incrementing
- // invoice ID which is essentially used as a primary key. With each
- // invoice inserted, the primary key is incremented by one. This key is
- // stored within the invoiceIndexBucket. Within the invoiceBucket
- // invoices are uniquely identified by the invoice ID.
- numInvoicesKey = []byte("nik")
-
- // addIndexBucket is an index bucket that we'll use to create a
- // monotonically increasing set of add indexes. Each time we add a new
- // invoice, this sequence number will be incremented and then populated
- // within the new invoice.
- //
- // In addition to this sequence number, we map:
- //
- // addIndexNo => invoiceKey
- addIndexBucket = []byte("invoice-add-index")
-
- // settleIndexBucket is an index bucket that we'll use to create a
- // monotonically increasing integer for tracking a "settle index". Each
- // time an invoice is settled, this sequence number will be incremented
- // as populate within the newly settled invoice.
- //
- // In addition to this sequence number, we map:
- //
- // settleIndexNo => invoiceKey
- settleIndexBucket = []byte("invoice-settle-index")
-
- // ErrInvoiceAlreadySettled is returned when the invoice is already
- // settled.
- ErrInvoiceAlreadySettled = Err.CodeWithDetail("ErrInvoiceAlreadySettled", "invoice already settled")
-
- // ErrInvoiceAlreadyCanceled is returned when the invoice is already
- // canceled.
- ErrInvoiceAlreadyCanceled = Err.CodeWithDetail("ErrInvoiceAlreadyCanceled", "invoice already canceled")
-
- // ErrInvoiceAlreadyAccepted is returned when the invoice is already
- // accepted.
- ErrInvoiceAlreadyAccepted = Err.CodeWithDetail("ErrInvoiceAlreadyAccepted", "invoice already accepted")
-
- // ErrInvoiceStillOpen is returned when the invoice is still open.
- ErrInvoiceStillOpen = Err.CodeWithDetail("ErrInvoiceStillOpen", "invoice still open")
-
- // ErrInvoiceCannotOpen is returned when an attempt is made to move an
- // invoice to the open state.
- ErrInvoiceCannotOpen = Err.CodeWithDetail("ErrInvoiceCannotOpen", "cannot move invoice to open")
-
- // ErrInvoiceCannotAccept is returned when an attempt is made to accept
- // an invoice while the invoice is not in the open state.
- ErrInvoiceCannotAccept = Err.CodeWithDetail("ErrInvoiceCannotAccept", "cannot accept invoice")
-
- // ErrInvoicePreimageMismatch is returned when the preimage doesn't
- // match the invoice hash.
- ErrInvoicePreimageMismatch = Err.CodeWithDetail("ErrInvoicePreimageMismatch", "preimage does not match")
-)
-
-const (
- // MaxMemoSize is maximum size of the memo field within invoices stored
- // in the database.
- MaxMemoSize = 1024
-
- // MaxPaymentRequestSize is the max size of a payment request for
- // this invoice.
- // TODO(halseth): determine the max length payment request when field
- // lengths are final.
- MaxPaymentRequestSize = 4096
-
- // A set of tlv type definitions used to serialize invoice htlcs to the
- // database.
- //
- // NOTE: A migration should be added whenever this list changes. This
- // prevents against the database being rolled back to an older
- // format where the surrounding logic might assume a different set of
- // fields are known.
- chanIDType tlv.Type = 1
- htlcIDType tlv.Type = 3
- amtType tlv.Type = 5
- acceptHeightType tlv.Type = 7
- acceptTimeType tlv.Type = 9
- resolveTimeType tlv.Type = 11
- expiryHeightType tlv.Type = 13
- htlcStateType tlv.Type = 15
- mppTotalAmtType tlv.Type = 17
-
- // A set of tlv type definitions used to serialize invoice bodiees.
- //
- // NOTE: A migration should be added whenever this list changes. This
- // prevents against the database being rolled back to an older
- // format where the surrounding logic might assume a different set of
- // fields are known.
- memoType tlv.Type = 0
- payReqType tlv.Type = 1
- createTimeType tlv.Type = 2
- settleTimeType tlv.Type = 3
- addIndexType tlv.Type = 4
- settleIndexType tlv.Type = 5
- preimageType tlv.Type = 6
- valueType tlv.Type = 7
- cltvDeltaType tlv.Type = 8
- expiryType tlv.Type = 9
- paymentAddrType tlv.Type = 10
- featuresType tlv.Type = 11
- invStateType tlv.Type = 12
- amtPaidType tlv.Type = 13
- hodlInvoiceType tlv.Type = 14
-)
-
-// InvoiceRef is a composite identifier for invoices. Invoices can be referenced
-// by various combinations of payment hash and payment addr, in certain contexts
-// only some of these are known. An InvoiceRef and its constructors thus
-// encapsulate the valid combinations of query parameters that can be supplied
-// to LookupInvoice and UpdateInvoice.
-type InvoiceRef struct {
- // payHash is the payment hash of the target invoice. All invoices are
- // currently indexed by payment hash. This value will be used as a
- // fallback when no payment address is known.
- payHash lntypes.Hash
-
- // payAddr is the payment addr of the target invoice. Newer invoices
- // (0.11 and up) are indexed by payment address in addition to payment
- // hash, but pre 0.8 invoices do not have one at all. When this value is
- // known it will be used as the primary identifier, falling back to
- // payHash if no value is known.
- payAddr *[32]byte
-}
-
-// InvoiceRefByHash creates an InvoiceRef that queries for an invoice only by
-// its payment hash.
-func InvoiceRefByHash(payHash lntypes.Hash) InvoiceRef {
- return InvoiceRef{
- payHash: payHash,
- }
-}
-
-// InvoiceRefByHashAndAddr creates an InvoiceRef that first queries for an
-// invoice by the provided payment address, falling back to the payment hash if
-// the payment address is unknown.
-func InvoiceRefByHashAndAddr(payHash lntypes.Hash,
- payAddr [32]byte) InvoiceRef {
-
- return InvoiceRef{
- payHash: payHash,
- payAddr: &payAddr,
- }
-}
-
-// PayHash returns the target invoice's payment hash.
-func (r InvoiceRef) PayHash() lntypes.Hash {
- return r.payHash
-}
-
-// PayAddr returns the optional payment address of the target invoice.
-//
-// NOTE: This value may be nil.
-func (r InvoiceRef) PayAddr() *[32]byte {
- if r.payAddr != nil {
- addr := *r.payAddr
- return &addr
- }
- return nil
-}
-
-// String returns a human-readable representation of an InvoiceRef.
-func (r InvoiceRef) String() string {
- if r.payAddr != nil {
- return fmt.Sprintf("(pay_hash=%v, pay_addr=%x)", r.payHash, *r.payAddr)
- }
- return fmt.Sprintf("(pay_hash=%v)", r.payHash)
-}
-
-// ContractState describes the state the invoice is in.
-type ContractState uint8
-
-const (
- // ContractOpen means the invoice has only been created.
- ContractOpen ContractState = 0
-
- // ContractSettled means the htlc is settled and the invoice has been paid.
- ContractSettled ContractState = 1
-
- // ContractCanceled means the invoice has been canceled.
- ContractCanceled ContractState = 2
-
- // ContractAccepted means the HTLC has been accepted but not settled yet.
- ContractAccepted ContractState = 3
-)
-
-// String returns a human readable identifier for the ContractState type.
-func (c ContractState) String() string {
- switch c {
- case ContractOpen:
- return "Open"
- case ContractSettled:
- return "Settled"
- case ContractCanceled:
- return "Canceled"
- case ContractAccepted:
- return "Accepted"
- }
-
- return "Unknown"
-}
-
-// ContractTerm is a companion struct to the Invoice struct. This struct houses
-// the necessary conditions required before the invoice can be considered fully
-// settled by the payee.
-type ContractTerm struct {
- // FinalCltvDelta is the minimum required number of blocks before htlc
- // expiry when the invoice is accepted.
- FinalCltvDelta int32
-
- // Expiry defines how long after creation this invoice should expire.
- Expiry time.Duration
-
- // PaymentPreimage is the preimage which is to be revealed in the
- // occasion that an HTLC paying to the hash of this preimage is
- // extended. Set to nil if the preimage isn't known yet.
- PaymentPreimage *lntypes.Preimage
-
- // Value is the expected amount of milli-satoshis to be paid to an HTLC
- // which can be satisfied by the above preimage.
- Value lnwire.MilliSatoshi
-
- // PaymentAddr is a randomly generated value include in the MPP record
- // by the sender to prevent probing of the receiver.
- PaymentAddr [32]byte
-
- // Features is the feature vectors advertised on the payment request.
- Features *lnwire.FeatureVector
-}
-
-// String returns a human-readable description of the prominent contract terms.
-func (c ContractTerm) String() string {
- return fmt.Sprintf("amt=%v, expiry=%v, final_cltv_delta=%v", c.Value,
- c.Expiry, c.FinalCltvDelta)
-}
-
-// Invoice is a payment invoice generated by a payee in order to request
-// payment for some good or service. The inclusion of invoices within Lightning
-// creates a payment work flow for merchants very similar to that of the
-// existing financial system within PayPal, etc. Invoices are added to the
-// database when a payment is requested, then can be settled manually once the
-// payment is received at the upper layer. For record keeping purposes,
-// invoices are never deleted from the database, instead a bit is toggled
-// denoting the invoice has been fully settled. Within the database, all
-// invoices must have a unique payment hash which is generated by taking the
-// sha256 of the payment preimage.
-type Invoice struct {
- // Memo is an optional memo to be stored along side an invoice. The
- // memo may contain further details pertaining to the invoice itself,
- // or any other message which fits within the size constraints.
- Memo []byte
-
- // PaymentRequest is the encoded payment request for this invoice. For
- // spontaneous (keysend) payments, this field will be empty.
- PaymentRequest []byte
-
- // CreationDate is the exact time the invoice was created.
- CreationDate time.Time
-
- // SettleDate is the exact time the invoice was settled.
- SettleDate time.Time
-
- // Terms are the contractual payment terms of the invoice. Once all the
- // terms have been satisfied by the payer, then the invoice can be
- // considered fully fulfilled.
- //
- // TODO(roasbeef): later allow for multiple terms to fulfill the final
- // invoice: payment fragmentation, etc.
- Terms ContractTerm
-
- // AddIndex is an auto-incrementing integer that acts as a
- // monotonically increasing sequence number for all invoices created.
- // Clients can then use this field as a "checkpoint" of sorts when
- // implementing a streaming RPC to notify consumers of instances where
- // an invoice has been added before they re-connected.
- //
- // NOTE: This index starts at 1.
- AddIndex uint64
-
- // SettleIndex is an auto-incrementing integer that acts as a
- // monotonically increasing sequence number for all settled invoices.
- // Clients can then use this field as a "checkpoint" of sorts when
- // implementing a streaming RPC to notify consumers of instances where
- // an invoice has been settled before they re-connected.
- //
- // NOTE: This index starts at 1.
- SettleIndex uint64
-
- // State describes the state the invoice is in.
- State ContractState
-
- // AmtPaid is the final amount that we ultimately accepted for pay for
- // this invoice. We specify this value independently as it's possible
- // that the invoice originally didn't specify an amount, or the sender
- // overpaid.
- AmtPaid lnwire.MilliSatoshi
-
- // Htlcs records all htlcs that paid to this invoice. Some of these
- // htlcs may have been marked as canceled.
- Htlcs map[CircuitKey]*InvoiceHTLC
-
- // HodlInvoice indicates whether the invoice should be held in the
- // Accepted state or be settled right away.
- HodlInvoice bool
-}
-
-// HtlcState defines the states an htlc paying to an invoice can be in.
-type HtlcState uint8
-
-const (
- // HtlcStateAccepted indicates the htlc is locked-in, but not resolved.
- HtlcStateAccepted HtlcState = iota
-
- // HtlcStateCanceled indicates the htlc is canceled back to the
- // sender.
- HtlcStateCanceled
-
- // HtlcStateSettled indicates the htlc is settled.
- HtlcStateSettled
-)
-
-// InvoiceHTLC contains details about an htlc paying to this invoice.
-type InvoiceHTLC struct {
- // Amt is the amount that is carried by this htlc.
- Amt lnwire.MilliSatoshi
-
- // MppTotalAmt is a field for mpp that indicates the expected total
- // amount.
- MppTotalAmt lnwire.MilliSatoshi
-
- // AcceptHeight is the block height at which the invoice registry
- // decided to accept this htlc as a payment to the invoice. At this
- // height, the invoice cltv delay must have been met.
- AcceptHeight uint32
-
- // AcceptTime is the wall clock time at which the invoice registry
- // decided to accept the htlc.
- AcceptTime time.Time
-
- // ResolveTime is the wall clock time at which the invoice registry
- // decided to settle the htlc.
- ResolveTime time.Time
-
- // Expiry is the expiry height of this htlc.
- Expiry uint32
-
- // State indicates the state the invoice htlc is currently in. A
- // canceled htlc isn't just removed from the invoice htlcs map, because
- // we need AcceptHeight to properly cancel the htlc back.
- State HtlcState
-
- // CustomRecords contains the custom key/value pairs that accompanied
- // the htlc.
- CustomRecords record.CustomSet
-}
-
-// HtlcAcceptDesc describes the details of a newly accepted htlc.
-type HtlcAcceptDesc struct {
- // AcceptHeight is the block height at which this htlc was accepted.
- AcceptHeight int32
-
- // Amt is the amount that is carried by this htlc.
- Amt lnwire.MilliSatoshi
-
- // MppTotalAmt is a field for mpp that indicates the expected total
- // amount.
- MppTotalAmt lnwire.MilliSatoshi
-
- // Expiry is the expiry height of this htlc.
- Expiry uint32
-
- // CustomRecords contains the custom key/value pairs that accompanied
- // the htlc.
- CustomRecords record.CustomSet
-}
-
-// InvoiceUpdateDesc describes the changes that should be applied to the
-// invoice.
-type InvoiceUpdateDesc struct {
- // State is the new state that this invoice should progress to. If nil,
- // the state is left unchanged.
- State *InvoiceStateUpdateDesc
-
- // CancelHtlcs describes the htlcs that need to be canceled.
- CancelHtlcs map[CircuitKey]struct{}
-
- // AddHtlcs describes the newly accepted htlcs that need to be added to
- // the invoice.
- AddHtlcs map[CircuitKey]*HtlcAcceptDesc
-}
-
-// InvoiceStateUpdateDesc describes an invoice-level state transition.
-type InvoiceStateUpdateDesc struct {
- // NewState is the new state that this invoice should progress to.
- NewState ContractState
-
- // Preimage must be set to the preimage when NewState is settled.
- Preimage *lntypes.Preimage
-}
-
-// InvoiceUpdateCallback is a callback used in the db transaction to update the
-// invoice.
-type InvoiceUpdateCallback = func(invoice *Invoice) (*InvoiceUpdateDesc, er.R)
-
-func validateInvoice(i *Invoice, paymentHash lntypes.Hash) er.R {
- // Avoid conflicts with all-zeroes magic value in the database.
- if paymentHash == unknownPreimage.Hash() {
- return er.Errorf("cannot use hash of all-zeroes preimage")
- }
-
- if len(i.Memo) > MaxMemoSize {
- return er.Errorf("max length a memo is %v, and invoice "+
- "of length %v was provided", MaxMemoSize, len(i.Memo))
- }
- if len(i.PaymentRequest) > MaxPaymentRequestSize {
- return er.Errorf("max length of payment request is %v, length "+
- "provided was %v", MaxPaymentRequestSize,
- len(i.PaymentRequest))
- }
- if i.Terms.Features == nil {
- return er.New("invoice must have a feature vector")
- }
-
- if i.Terms.PaymentPreimage == nil && !i.HodlInvoice {
- return er.New("non-hodl invoices must have a preimage")
- }
- return nil
-}
-
-// IsPending returns ture if the invoice is in ContractOpen state.
-func (i *Invoice) IsPending() bool {
- return i.State == ContractOpen || i.State == ContractAccepted
-}
-
-// AddInvoice inserts the targeted invoice into the database. If the invoice has
-// *any* payment hashes which already exists within the database, then the
-// insertion will be aborted and rejected due to the strict policy banning any
-// duplicate payment hashes. A side effect of this function is that it sets
-// AddIndex on newInvoice.
-func (d *DB) AddInvoice(newInvoice *Invoice, paymentHash lntypes.Hash) (
- uint64, er.R) {
-
- if err := validateInvoice(newInvoice, paymentHash); err != nil {
- return 0, err
- }
-
- var invoiceAddIndex uint64
- err := kvdb.Update(d, func(tx kvdb.RwTx) er.R {
- invoices, err := tx.CreateTopLevelBucket(invoiceBucket)
- if err != nil {
- return err
- }
-
- invoiceIndex, err := invoices.CreateBucketIfNotExists(
- invoiceIndexBucket,
- )
- if err != nil {
- return err
- }
- addIndex, err := invoices.CreateBucketIfNotExists(
- addIndexBucket,
- )
- if err != nil {
- return err
- }
-
- // Ensure that an invoice an identical payment hash doesn't
- // already exist within the index.
- if invoiceIndex.Get(paymentHash[:]) != nil {
- return ErrDuplicateInvoice.Default()
- }
-
- // Check that we aren't inserting an invoice with a duplicate
- // payment address. The all-zeros payment address is
- // special-cased to support legacy keysend invoices which don't
- // assign one. This is safe since later we also will avoid
- // indexing them and avoid collisions.
- payAddrIndex := tx.ReadWriteBucket(payAddrIndexBucket)
- if newInvoice.Terms.PaymentAddr != BlankPayAddr {
- if payAddrIndex.Get(newInvoice.Terms.PaymentAddr[:]) != nil {
- return ErrDuplicatePayAddr.Default()
- }
- }
-
- // If the current running payment ID counter hasn't yet been
- // created, then create it now.
- var invoiceNum uint32
- invoiceCounter := invoiceIndex.Get(numInvoicesKey)
- if invoiceCounter == nil {
- var scratch [4]byte
- byteOrder.PutUint32(scratch[:], invoiceNum)
- err := invoiceIndex.Put(numInvoicesKey, scratch[:])
- if err != nil {
- return err
- }
- } else {
- invoiceNum = byteOrder.Uint32(invoiceCounter)
- }
-
- newIndex, errr := putInvoice(
- invoices, invoiceIndex, payAddrIndex, addIndex,
- newInvoice, invoiceNum, paymentHash,
- )
- if errr != nil {
- return errr
- }
-
- invoiceAddIndex = newIndex
- return nil
- }, func() {
- invoiceAddIndex = 0
- })
- if err != nil {
- return 0, err
- }
-
- return invoiceAddIndex, err
-}
-
-// InvoicesAddedSince can be used by callers to seek into the event time series
-// of all the invoices added in the database. The specified sinceAddIndex
-// should be the highest add index that the caller knows of. This method will
-// return all invoices with an add index greater than the specified
-// sinceAddIndex.
-//
-// NOTE: The index starts from 1, as a result. We enforce that specifying a
-// value below the starting index value is a noop.
-func (d *DB) InvoicesAddedSince(sinceAddIndex uint64) ([]Invoice, er.R) {
- var newInvoices []Invoice
-
- // If an index of zero was specified, then in order to maintain
- // backwards compat, we won't send out any new invoices.
- if sinceAddIndex == 0 {
- return newInvoices, nil
- }
-
- var startIndex [8]byte
- byteOrder.PutUint64(startIndex[:], sinceAddIndex)
-
- err := kvdb.View(d, func(tx kvdb.RTx) er.R {
- invoices := tx.ReadBucket(invoiceBucket)
- if invoices == nil {
- return nil
- }
-
- addIndex := invoices.NestedReadBucket(addIndexBucket)
- if addIndex == nil {
- return nil
- }
-
- // We'll now run through each entry in the add index starting
- // at our starting index. We'll continue until we reach the
- // very end of the current key space.
- invoiceCursor := addIndex.ReadCursor()
-
- // We'll seek to the starting index, then manually advance the
- // cursor in order to skip the entry with the since add index.
- invoiceCursor.Seek(startIndex[:])
- addSeqNo, invoiceKey := invoiceCursor.Next()
-
- for ; addSeqNo != nil && bytes.Compare(addSeqNo, startIndex[:]) > 0; addSeqNo, invoiceKey = invoiceCursor.Next() {
-
- // For each key found, we'll look up the actual
- // invoice, then accumulate it into our return value.
- invoice, err := fetchInvoice(invoiceKey, invoices)
- if err != nil {
- return err
- }
-
- newInvoices = append(newInvoices, invoice)
- }
-
- return nil
- }, func() {
- newInvoices = nil
- })
- if err != nil {
- return nil, err
- }
-
- return newInvoices, nil
-}
-
-// LookupInvoice attempts to look up an invoice according to its 32 byte
-// payment hash. If an invoice which can settle the HTLC identified by the
-// passed payment hash isn't found, then an error is returned. Otherwise, the
-// full invoice is returned. Before setting the incoming HTLC, the values
-// SHOULD be checked to ensure the payer meets the agreed upon contractual
-// terms of the payment.
-func (d *DB) LookupInvoice(ref InvoiceRef) (Invoice, er.R) {
- var invoice Invoice
- err := kvdb.View(d, func(tx kvdb.RTx) er.R {
- invoices := tx.ReadBucket(invoiceBucket)
- if invoices == nil {
- return ErrNoInvoicesCreated.Default()
- }
- invoiceIndex := invoices.NestedReadBucket(invoiceIndexBucket)
- if invoiceIndex == nil {
- return ErrNoInvoicesCreated.Default()
- }
- payAddrIndex := tx.ReadBucket(payAddrIndexBucket)
-
- // Retrieve the invoice number for this invoice using the
- // provided invoice reference.
- invoiceNum, err := fetchInvoiceNumByRef(
- invoiceIndex, payAddrIndex, ref,
- )
- if err != nil {
- return err
- }
-
- // An invoice was found, retrieve the remainder of the invoice
- // body.
- i, err := fetchInvoice(invoiceNum, invoices)
- if err != nil {
- return err
- }
- invoice = i
-
- return nil
- }, func() {})
- if err != nil {
- return invoice, err
- }
-
- return invoice, nil
-}
-
-// fetchInvoiceNumByRef retrieve the invoice number for the provided invoice
-// reference. The payment address will be treated as the primary key, falling
-// back to the payment hash if nothing is found for the payment address. An
-// error is returned if the invoice is not found.
-func fetchInvoiceNumByRef(invoiceIndex, payAddrIndex kvdb.RBucket,
- ref InvoiceRef) ([]byte, er.R) {
-
- payHash := ref.PayHash()
- payAddr := ref.PayAddr()
-
- var (
- invoiceNumByHash = invoiceIndex.Get(payHash[:])
- invoiceNumByAddr []byte
- )
- if payAddr != nil {
- // Only allow lookups for payment address if it is not a blank
- // payment address, which is a special-cased value for legacy
- // keysend invoices.
- if *payAddr != BlankPayAddr {
- invoiceNumByAddr = payAddrIndex.Get(payAddr[:])
- }
- }
-
- switch {
-
- // If payment address and payment hash both reference an existing
- // invoice, ensure they reference the _same_ invoice.
- case invoiceNumByAddr != nil && invoiceNumByHash != nil:
- if !bytes.Equal(invoiceNumByAddr, invoiceNumByHash) {
- return nil, ErrInvRefEquivocation.Default()
- }
-
- return invoiceNumByAddr, nil
-
- // If we were only able to reference the invoice by hash, return the
- // corresponding invoice number. This can happen when no payment address
- // was provided, or if it didn't match anything in our records.
- case invoiceNumByHash != nil:
- return invoiceNumByHash, nil
-
- // Otherwise we don't know of the target invoice.
- default:
- return nil, ErrInvoiceNotFound.Default()
- }
-}
-
-// ScanInvoices scans trough all invoices and calls the passed scanFunc for
-// for each invoice with its respective payment hash. Additionally a reset()
-// closure is passed which is used to reset/initialize partial results and also
-// to signal if the kvdb.View transaction has been retried.
-func (d *DB) ScanInvoices(
- scanFunc func(lntypes.Hash, *Invoice) er.R, reset func()) er.R {
-
- return kvdb.View(d, func(tx kvdb.RTx) er.R {
- invoices := tx.ReadBucket(invoiceBucket)
- if invoices == nil {
- return ErrNoInvoicesCreated.Default()
- }
-
- invoiceIndex := invoices.NestedReadBucket(invoiceIndexBucket)
- if invoiceIndex == nil {
- // Mask the error if there's no invoice
- // index as that simply means there are no
- // invoices added yet to the DB. In this case
- // we simply return an empty list.
- return nil
- }
-
- return invoiceIndex.ForEach(func(k, v []byte) er.R {
- // Skip the special numInvoicesKey as that does not
- // point to a valid invoice.
- if bytes.Equal(k, numInvoicesKey) {
- return nil
- }
-
- if v == nil {
- return nil
- }
-
- invoice, err := fetchInvoice(v, invoices)
- if err != nil {
- return err
- }
-
- var paymentHash lntypes.Hash
- copy(paymentHash[:], k)
-
- return scanFunc(paymentHash, &invoice)
- })
- }, reset)
-}
-
-// InvoiceQuery represents a query to the invoice database. The query allows a
-// caller to retrieve all invoices starting from a particular add index and
-// limit the number of results returned.
-type InvoiceQuery struct {
- // IndexOffset is the offset within the add indices to start at. This
- // can be used to start the response at a particular invoice.
- IndexOffset uint64
-
- // NumMaxInvoices is the maximum number of invoices that should be
- // starting from the add index.
- NumMaxInvoices uint64
-
- // PendingOnly, if set, returns unsettled invoices starting from the
- // add index.
- PendingOnly bool
-
- // Reversed, if set, indicates that the invoices returned should start
- // from the IndexOffset and go backwards.
- Reversed bool
-}
-
-// InvoiceSlice is the response to a invoice query. It includes the original
-// query, the set of invoices that match the query, and an integer which
-// represents the offset index of the last item in the set of returned invoices.
-// This integer allows callers to resume their query using this offset in the
-// event that the query's response exceeds the maximum number of returnable
-// invoices.
-type InvoiceSlice struct {
- InvoiceQuery
-
- // Invoices is the set of invoices that matched the query above.
- Invoices []Invoice
-
- // FirstIndexOffset is the index of the first element in the set of
- // returned Invoices above. Callers can use this to resume their query
- // in the event that the slice has too many events to fit into a single
- // response.
- FirstIndexOffset uint64
-
- // LastIndexOffset is the index of the last element in the set of
- // returned Invoices above. Callers can use this to resume their query
- // in the event that the slice has too many events to fit into a single
- // response.
- LastIndexOffset uint64
-}
-
-// QueryInvoices allows a caller to query the invoice database for invoices
-// within the specified add index range.
-func (d *DB) QueryInvoices(q InvoiceQuery) (InvoiceSlice, er.R) {
- var resp InvoiceSlice
-
- err := kvdb.View(d, func(tx kvdb.RTx) er.R {
- // If the bucket wasn't found, then there aren't any invoices
- // within the database yet, so we can simply exit.
- invoices := tx.ReadBucket(invoiceBucket)
- if invoices == nil {
- return ErrNoInvoicesCreated.Default()
- }
-
- // Get the add index bucket which we will use to iterate through
- // our indexed invoices.
- invoiceAddIndex := invoices.NestedReadBucket(addIndexBucket)
- if invoiceAddIndex == nil {
- return ErrNoInvoicesCreated.Default()
- }
-
- // Create a paginator which reads from our add index bucket with
- // the parameters provided by the invoice query.
- paginator := newPaginator(
- invoiceAddIndex.ReadCursor(), q.Reversed, q.IndexOffset,
- q.NumMaxInvoices,
- )
-
- // accumulateInvoices looks up an invoice based on the index we
- // are given, adds it to our set of invoices if it has the right
- // characteristics for our query and returns the number of items
- // we have added to our set of invoices.
- accumulateInvoices := func(_, indexValue []byte) (bool, er.R) {
- invoice, err := fetchInvoice(indexValue, invoices)
- if err != nil {
- return false, err
- }
-
- // Skip any settled or canceled invoices if the caller
- // is only interested in pending ones.
- if q.PendingOnly && !invoice.IsPending() {
- return false, nil
- }
-
- // At this point, we've exhausted the offset, so we'll
- // begin collecting invoices found within the range.
- resp.Invoices = append(resp.Invoices, invoice)
- return true, nil
- }
-
- // Query our paginator using accumulateInvoices to build up a
- // set of invoices.
- if err := paginator.query(accumulateInvoices); err != nil {
- return err
- }
-
- // If we iterated through the add index in reverse order, then
- // we'll need to reverse the slice of invoices to return them in
- // forward order.
- if q.Reversed {
- numInvoices := len(resp.Invoices)
- for i := 0; i < numInvoices/2; i++ {
- opposite := numInvoices - i - 1
- resp.Invoices[i], resp.Invoices[opposite] =
- resp.Invoices[opposite], resp.Invoices[i]
- }
- }
-
- return nil
- }, func() {
- resp = InvoiceSlice{
- InvoiceQuery: q,
- }
- })
- if err != nil && !ErrNoInvoicesCreated.Is(err) {
- return resp, err
- }
-
- // Finally, record the indexes of the first and last invoices returned
- // so that the caller can resume from this point later on.
- if len(resp.Invoices) > 0 {
- resp.FirstIndexOffset = resp.Invoices[0].AddIndex
- resp.LastIndexOffset = resp.Invoices[len(resp.Invoices)-1].AddIndex
- }
-
- return resp, nil
-}
-
-// UpdateInvoice attempts to update an invoice corresponding to the passed
-// payment hash. If an invoice matching the passed payment hash doesn't exist
-// within the database, then the action will fail with a "not found" error.
-//
-// The update is performed inside the same database transaction that fetches the
-// invoice and is therefore atomic. The fields to update are controlled by the
-// supplied callback.
-func (d *DB) UpdateInvoice(ref InvoiceRef,
- callback InvoiceUpdateCallback) (*Invoice, er.R) {
-
- var updatedInvoice *Invoice
- err := kvdb.Update(d, func(tx kvdb.RwTx) er.R {
- invoices, err := tx.CreateTopLevelBucket(invoiceBucket)
- if err != nil {
- return err
- }
- invoiceIndex, err := invoices.CreateBucketIfNotExists(
- invoiceIndexBucket,
- )
- if err != nil {
- return err
- }
- settleIndex, err := invoices.CreateBucketIfNotExists(
- settleIndexBucket,
- )
- if err != nil {
- return err
- }
- payAddrIndex := tx.ReadBucket(payAddrIndexBucket)
-
- // Retrieve the invoice number for this invoice using the
- // provided invoice reference.
- invoiceNum, errr := fetchInvoiceNumByRef(
- invoiceIndex, payAddrIndex, ref,
- )
- if errr != nil {
- return errr
-
- }
- payHash := ref.PayHash()
- updatedInvoice, errr = d.updateInvoice(
- payHash, invoices, settleIndex, invoiceNum,
- callback,
- )
-
- return errr
- }, func() {
- updatedInvoice = nil
- })
-
- return updatedInvoice, err
-}
-
-// InvoicesSettledSince can be used by callers to catch up any settled invoices
-// they missed within the settled invoice time series. We'll return all known
-// settled invoice that have a settle index higher than the passed
-// sinceSettleIndex.
-//
-// NOTE: The index starts from 1, as a result. We enforce that specifying a
-// value below the starting index value is a noop.
-func (d *DB) InvoicesSettledSince(sinceSettleIndex uint64) ([]Invoice, er.R) {
- var settledInvoices []Invoice
-
- // If an index of zero was specified, then in order to maintain
- // backwards compat, we won't send out any new invoices.
- if sinceSettleIndex == 0 {
- return settledInvoices, nil
- }
-
- var startIndex [8]byte
- byteOrder.PutUint64(startIndex[:], sinceSettleIndex)
-
- err := kvdb.View(d, func(tx kvdb.RTx) er.R {
- invoices := tx.ReadBucket(invoiceBucket)
- if invoices == nil {
- return nil
- }
-
- settleIndex := invoices.NestedReadBucket(settleIndexBucket)
- if settleIndex == nil {
- return nil
- }
-
- // We'll now run through each entry in the add index starting
- // at our starting index. We'll continue until we reach the
- // very end of the current key space.
- invoiceCursor := settleIndex.ReadCursor()
-
- // We'll seek to the starting index, then manually advance the
- // cursor in order to skip the entry with the since add index.
- invoiceCursor.Seek(startIndex[:])
- seqNo, invoiceKey := invoiceCursor.Next()
-
- for ; seqNo != nil && bytes.Compare(seqNo, startIndex[:]) > 0; seqNo, invoiceKey = invoiceCursor.Next() {
-
- // For each key found, we'll look up the actual
- // invoice, then accumulate it into our return value.
- invoice, err := fetchInvoice(invoiceKey, invoices)
- if err != nil {
- return err
- }
-
- settledInvoices = append(settledInvoices, invoice)
- }
-
- return nil
- }, func() {
- settledInvoices = nil
- })
- if err != nil {
- return nil, err
- }
-
- return settledInvoices, nil
-}
-
-func putInvoice(invoices, invoiceIndex, payAddrIndex, addIndex kvdb.RwBucket,
- i *Invoice, invoiceNum uint32, paymentHash lntypes.Hash) (
- uint64, er.R) {
-
- // Create the invoice key which is just the big-endian representation
- // of the invoice number.
- var invoiceKey [4]byte
- byteOrder.PutUint32(invoiceKey[:], invoiceNum)
-
- // Increment the num invoice counter index so the next invoice bares
- // the proper ID.
- var scratch [4]byte
- invoiceCounter := invoiceNum + 1
- byteOrder.PutUint32(scratch[:], invoiceCounter)
- if err := invoiceIndex.Put(numInvoicesKey, scratch[:]); err != nil {
- return 0, err
- }
-
- // Add the payment hash to the invoice index. This will let us quickly
- // identify if we can settle an incoming payment, and also to possibly
- // allow a single invoice to have multiple payment installations.
- err := invoiceIndex.Put(paymentHash[:], invoiceKey[:])
- if err != nil {
- return 0, err
- }
- // Add the invoice to the payment address index, but only if the invoice
- // has a non-zero payment address. The all-zero payment address is still
- // in use by legacy keysend, so we special-case here to avoid
- // collisions.
- if i.Terms.PaymentAddr != BlankPayAddr {
- err = payAddrIndex.Put(i.Terms.PaymentAddr[:], invoiceKey[:])
- if err != nil {
- return 0, err
- }
- }
-
- // Next, we'll obtain the next add invoice index (sequence
- // number), so we can properly place this invoice within this
- // event stream.
- nextAddSeqNo, errr := addIndex.NextSequence()
- if errr != nil {
- return 0, errr
- }
-
- // With the next sequence obtained, we'll updating the event series in
- // the add index bucket to map this current add counter to the index of
- // this new invoice.
- var seqNoBytes [8]byte
- byteOrder.PutUint64(seqNoBytes[:], nextAddSeqNo)
- if err := addIndex.Put(seqNoBytes[:], invoiceKey[:]); err != nil {
- return 0, err
- }
-
- i.AddIndex = nextAddSeqNo
-
- // Finally, serialize the invoice itself to be written to the disk.
- var buf bytes.Buffer
- if err := serializeInvoice(&buf, i); err != nil {
- return 0, err
- }
-
- if err := invoices.Put(invoiceKey[:], buf.Bytes()); err != nil {
- return 0, err
- }
-
- return nextAddSeqNo, nil
-}
-
-// serializeInvoice serializes an invoice to a writer.
-//
-// Note: this function is in use for a migration. Before making changes that
-// would modify the on disk format, make a copy of the original code and store
-// it with the migration.
-func serializeInvoice(w io.Writer, i *Invoice) er.R {
- creationDateBytes, errr := i.CreationDate.MarshalBinary()
- if errr != nil {
- return er.E(errr)
- }
-
- settleDateBytes, errr := i.SettleDate.MarshalBinary()
- if errr != nil {
- return er.E(errr)
- }
-
- var fb bytes.Buffer
- err := i.Terms.Features.EncodeBase256(&fb)
- if err != nil {
- return err
- }
- featureBytes := fb.Bytes()
-
- preimage := [32]byte(unknownPreimage)
- if i.Terms.PaymentPreimage != nil {
- preimage = *i.Terms.PaymentPreimage
- if preimage == unknownPreimage {
- return er.New("cannot use all-zeroes preimage")
- }
- }
- value := uint64(i.Terms.Value)
- cltvDelta := uint32(i.Terms.FinalCltvDelta)
- expiry := uint64(i.Terms.Expiry)
-
- amtPaid := uint64(i.AmtPaid)
- state := uint8(i.State)
-
- var hodlInvoice uint8
- if i.HodlInvoice {
- hodlInvoice = 1
- }
-
- tlvStream, err := tlv.NewStream(
- // Memo and payreq.
- tlv.MakePrimitiveRecord(memoType, &i.Memo),
- tlv.MakePrimitiveRecord(payReqType, &i.PaymentRequest),
-
- // Add/settle metadata.
- tlv.MakePrimitiveRecord(createTimeType, &creationDateBytes),
- tlv.MakePrimitiveRecord(settleTimeType, &settleDateBytes),
- tlv.MakePrimitiveRecord(addIndexType, &i.AddIndex),
- tlv.MakePrimitiveRecord(settleIndexType, &i.SettleIndex),
-
- // Terms.
- tlv.MakePrimitiveRecord(preimageType, &preimage),
- tlv.MakePrimitiveRecord(valueType, &value),
- tlv.MakePrimitiveRecord(cltvDeltaType, &cltvDelta),
- tlv.MakePrimitiveRecord(expiryType, &expiry),
- tlv.MakePrimitiveRecord(paymentAddrType, &i.Terms.PaymentAddr),
- tlv.MakePrimitiveRecord(featuresType, &featureBytes),
-
- // Invoice state.
- tlv.MakePrimitiveRecord(invStateType, &state),
- tlv.MakePrimitiveRecord(amtPaidType, &amtPaid),
-
- tlv.MakePrimitiveRecord(hodlInvoiceType, &hodlInvoice),
- )
- if err != nil {
- return err
- }
-
- var b bytes.Buffer
- if err = tlvStream.Encode(&b); err != nil {
- return err
- }
-
- err = util.WriteBin(w, byteOrder, uint64(b.Len()))
- if err != nil {
- return err
- }
-
- if _, err = util.Write(w, b.Bytes()); err != nil {
- return err
- }
-
- return serializeHtlcs(w, i.Htlcs)
-}
-
-// serializeHtlcs serializes a map containing circuit keys and invoice htlcs to
-// a writer.
-func serializeHtlcs(w io.Writer, htlcs map[CircuitKey]*InvoiceHTLC) er.R {
- for key, htlc := range htlcs {
- // Encode the htlc in a tlv stream.
- chanID := key.ChanID.ToUint64()
- amt := uint64(htlc.Amt)
- mppTotalAmt := uint64(htlc.MppTotalAmt)
- acceptTime := uint64(htlc.AcceptTime.UnixNano())
- resolveTime := uint64(htlc.ResolveTime.UnixNano())
- state := uint8(htlc.State)
-
- var records []tlv.Record
- records = append(records,
- tlv.MakePrimitiveRecord(chanIDType, &chanID),
- tlv.MakePrimitiveRecord(htlcIDType, &key.HtlcID),
- tlv.MakePrimitiveRecord(amtType, &amt),
- tlv.MakePrimitiveRecord(
- acceptHeightType, &htlc.AcceptHeight,
- ),
- tlv.MakePrimitiveRecord(acceptTimeType, &acceptTime),
- tlv.MakePrimitiveRecord(resolveTimeType, &resolveTime),
- tlv.MakePrimitiveRecord(expiryHeightType, &htlc.Expiry),
- tlv.MakePrimitiveRecord(htlcStateType, &state),
- tlv.MakePrimitiveRecord(mppTotalAmtType, &mppTotalAmt),
- )
-
- // Convert the custom records to tlv.Record types that are ready
- // for serialization.
- customRecords := tlv.MapToRecords(htlc.CustomRecords)
-
- // Append the custom records. Their ids are in the experimental
- // range and sorted, so there is no need to sort again.
- records = append(records, customRecords...)
-
- tlvStream, err := tlv.NewStream(records...)
- if err != nil {
- return err
- }
-
- var b bytes.Buffer
- if err := tlvStream.Encode(&b); err != nil {
- return err
- }
-
- // Write the length of the tlv stream followed by the stream
- // bytes.
- err = util.WriteBin(w, byteOrder, uint64(b.Len()))
- if err != nil {
- return err
- }
-
- if _, err := util.Write(w, b.Bytes()); err != nil {
- return err
- }
- }
-
- return nil
-}
-
-func fetchInvoice(invoiceNum []byte, invoices kvdb.RBucket) (Invoice, er.R) {
- invoiceBytes := invoices.Get(invoiceNum)
- if invoiceBytes == nil {
- return Invoice{}, ErrInvoiceNotFound.Default()
- }
-
- invoiceReader := bytes.NewReader(invoiceBytes)
-
- return deserializeInvoice(invoiceReader)
-}
-
-func deserializeInvoice(r io.Reader) (Invoice, er.R) {
- var (
- preimageBytes [32]byte
- value uint64
- cltvDelta uint32
- expiry uint64
- amtPaid uint64
- state uint8
- hodlInvoice uint8
-
- creationDateBytes []byte
- settleDateBytes []byte
- featureBytes []byte
- )
-
- var i Invoice
- tlvStream, err := tlv.NewStream(
- // Memo and payreq.
- tlv.MakePrimitiveRecord(memoType, &i.Memo),
- tlv.MakePrimitiveRecord(payReqType, &i.PaymentRequest),
-
- // Add/settle metadata.
- tlv.MakePrimitiveRecord(createTimeType, &creationDateBytes),
- tlv.MakePrimitiveRecord(settleTimeType, &settleDateBytes),
- tlv.MakePrimitiveRecord(addIndexType, &i.AddIndex),
- tlv.MakePrimitiveRecord(settleIndexType, &i.SettleIndex),
-
- // Terms.
- tlv.MakePrimitiveRecord(preimageType, &preimageBytes),
- tlv.MakePrimitiveRecord(valueType, &value),
- tlv.MakePrimitiveRecord(cltvDeltaType, &cltvDelta),
- tlv.MakePrimitiveRecord(expiryType, &expiry),
- tlv.MakePrimitiveRecord(paymentAddrType, &i.Terms.PaymentAddr),
- tlv.MakePrimitiveRecord(featuresType, &featureBytes),
-
- // Invoice state.
- tlv.MakePrimitiveRecord(invStateType, &state),
- tlv.MakePrimitiveRecord(amtPaidType, &amtPaid),
-
- tlv.MakePrimitiveRecord(hodlInvoiceType, &hodlInvoice),
- )
- if err != nil {
- return i, err
- }
-
- var bodyLen int64
- err = util.ReadBin(r, byteOrder, &bodyLen)
- if err != nil {
- return i, err
- }
-
- lr := io.LimitReader(r, bodyLen)
- if err = tlvStream.Decode(lr); err != nil {
- return i, err
- }
-
- preimage := lntypes.Preimage(preimageBytes)
- if preimage != unknownPreimage {
- i.Terms.PaymentPreimage = &preimage
- }
-
- i.Terms.Value = lnwire.MilliSatoshi(value)
- i.Terms.FinalCltvDelta = int32(cltvDelta)
- i.Terms.Expiry = time.Duration(expiry)
- i.AmtPaid = lnwire.MilliSatoshi(amtPaid)
- i.State = ContractState(state)
-
- if hodlInvoice != 0 {
- i.HodlInvoice = true
- }
-
- errr := i.CreationDate.UnmarshalBinary(creationDateBytes)
- if errr != nil {
- return i, er.E(errr)
- }
-
- errr = i.SettleDate.UnmarshalBinary(settleDateBytes)
- if errr != nil {
- return i, er.E(errr)
- }
-
- rawFeatures := lnwire.NewRawFeatureVector()
- err = rawFeatures.DecodeBase256(
- bytes.NewReader(featureBytes), len(featureBytes),
- )
- if err != nil {
- return i, err
- }
-
- i.Terms.Features = lnwire.NewFeatureVector(
- rawFeatures, lnwire.Features,
- )
-
- i.Htlcs, err = deserializeHtlcs(r)
- return i, err
-}
-
-// deserializeHtlcs reads a list of invoice htlcs from a reader and returns it
-// as a map.
-func deserializeHtlcs(r io.Reader) (map[CircuitKey]*InvoiceHTLC, er.R) {
- htlcs := make(map[CircuitKey]*InvoiceHTLC)
-
- for {
- // Read the length of the tlv stream for this htlc.
- var streamLen int64
- if err := util.ReadBin(r, byteOrder, &streamLen); err != nil {
- if er.EOF.Is(err) {
- break
- }
-
- return nil, err
- }
-
- // Limit the reader so that it stops at the end of this htlc's
- // stream.
- htlcReader := io.LimitReader(r, streamLen)
-
- // Decode the contents into the htlc fields.
- var (
- htlc InvoiceHTLC
- key CircuitKey
- chanID uint64
- state uint8
- acceptTime, resolveTime uint64
- amt, mppTotalAmt uint64
- )
- tlvStream, err := tlv.NewStream(
- tlv.MakePrimitiveRecord(chanIDType, &chanID),
- tlv.MakePrimitiveRecord(htlcIDType, &key.HtlcID),
- tlv.MakePrimitiveRecord(amtType, &amt),
- tlv.MakePrimitiveRecord(
- acceptHeightType, &htlc.AcceptHeight,
- ),
- tlv.MakePrimitiveRecord(acceptTimeType, &acceptTime),
- tlv.MakePrimitiveRecord(resolveTimeType, &resolveTime),
- tlv.MakePrimitiveRecord(expiryHeightType, &htlc.Expiry),
- tlv.MakePrimitiveRecord(htlcStateType, &state),
- tlv.MakePrimitiveRecord(mppTotalAmtType, &mppTotalAmt),
- )
- if err != nil {
- return nil, err
- }
-
- parsedTypes, err := tlvStream.DecodeWithParsedTypes(htlcReader)
- if err != nil {
- return nil, err
- }
-
- key.ChanID = lnwire.NewShortChanIDFromInt(chanID)
- htlc.AcceptTime = time.Unix(0, int64(acceptTime))
- htlc.ResolveTime = time.Unix(0, int64(resolveTime))
- htlc.State = HtlcState(state)
- htlc.Amt = lnwire.MilliSatoshi(amt)
- htlc.MppTotalAmt = lnwire.MilliSatoshi(mppTotalAmt)
-
- // Reconstruct the custom records fields from the parsed types
- // map return from the tlv parser.
- htlc.CustomRecords = hop.NewCustomRecords(parsedTypes)
-
- htlcs[key] = &htlc
- }
-
- return htlcs, nil
-}
-
-// copySlice allocates a new slice and copies the source into it.
-func copySlice(src []byte) []byte {
- dest := make([]byte, len(src))
- copy(dest, src)
- return dest
-}
-
-// copyInvoiceHTLC makes a deep copy of the supplied invoice HTLC.
-func copyInvoiceHTLC(src *InvoiceHTLC) *InvoiceHTLC {
- result := *src
-
- // Make a copy of the CustomSet map.
- result.CustomRecords = make(record.CustomSet)
- for k, v := range src.CustomRecords {
- result.CustomRecords[k] = v
- }
-
- return &result
-}
-
-// copyInvoice makes a deep copy of the supplied invoice.
-func copyInvoice(src *Invoice) *Invoice {
- dest := Invoice{
- Memo: copySlice(src.Memo),
- PaymentRequest: copySlice(src.PaymentRequest),
- CreationDate: src.CreationDate,
- SettleDate: src.SettleDate,
- Terms: src.Terms,
- AddIndex: src.AddIndex,
- SettleIndex: src.SettleIndex,
- State: src.State,
- AmtPaid: src.AmtPaid,
- Htlcs: make(
- map[CircuitKey]*InvoiceHTLC, len(src.Htlcs),
- ),
- HodlInvoice: src.HodlInvoice,
- }
-
- dest.Terms.Features = src.Terms.Features.Clone()
-
- if src.Terms.PaymentPreimage != nil {
- preimage := *src.Terms.PaymentPreimage
- dest.Terms.PaymentPreimage = &preimage
- }
-
- for k, v := range src.Htlcs {
- dest.Htlcs[k] = copyInvoiceHTLC(v)
- }
-
- return &dest
-}
-
-// updateInvoice fetches the invoice, obtains the update descriptor from the
-// callback and applies the updates in a single db transaction.
-func (d *DB) updateInvoice(hash lntypes.Hash, invoices, settleIndex kvdb.RwBucket,
- invoiceNum []byte, callback InvoiceUpdateCallback) (*Invoice, er.R) {
-
- invoice, err := fetchInvoice(invoiceNum, invoices)
- if err != nil {
- return nil, err
- }
-
- // Create deep copy to prevent any accidental modification in the
- // callback.
- invoiceCopy := copyInvoice(&invoice)
-
- // Call the callback and obtain the update descriptor.
- update, err := callback(invoiceCopy)
- if err != nil {
- return &invoice, err
- }
-
- // If there is nothing to update, return early.
- if update == nil {
- return &invoice, nil
- }
-
- now := d.clock.Now()
-
- // Update invoice state if the update descriptor indicates an invoice
- // state change.
- if update.State != nil {
- err := updateInvoiceState(&invoice, hash, *update.State)
- if err != nil {
- return nil, err
- }
-
- if update.State.NewState == ContractSettled {
- err := setSettleMetaFields(
- settleIndex, invoiceNum, &invoice, now,
- )
- if err != nil {
- return nil, err
- }
- }
- }
-
- // Process add actions from update descriptor.
- for key, htlcUpdate := range update.AddHtlcs {
- if _, exists := invoice.Htlcs[key]; exists {
- return nil, er.Errorf("duplicate add of htlc %v", key)
- }
-
- // Force caller to supply htlc without custom records in a
- // consistent way.
- if htlcUpdate.CustomRecords == nil {
- return nil, er.New("nil custom records map")
- }
-
- htlc := &InvoiceHTLC{
- Amt: htlcUpdate.Amt,
- MppTotalAmt: htlcUpdate.MppTotalAmt,
- Expiry: htlcUpdate.Expiry,
- AcceptHeight: uint32(htlcUpdate.AcceptHeight),
- AcceptTime: now,
- State: HtlcStateAccepted,
- CustomRecords: htlcUpdate.CustomRecords,
- }
-
- invoice.Htlcs[key] = htlc
- }
-
- // Align htlc states with invoice state and recalculate amount paid.
- var (
- amtPaid lnwire.MilliSatoshi
- cancelHtlcs = update.CancelHtlcs
- )
- for key, htlc := range invoice.Htlcs {
- // Check whether this htlc needs to be canceled. If it does,
- // update the htlc state to Canceled.
- _, cancel := cancelHtlcs[key]
- if cancel {
- // Consistency check to verify that there is no overlap
- // between the add and cancel sets.
- if _, added := update.AddHtlcs[key]; added {
- return nil, er.Errorf("added htlc %v canceled",
- key)
- }
-
- err := cancelSingleHtlc(now, htlc, invoice.State)
- if err != nil {
- return nil, err
- }
-
- // Delete processed cancel action, so that we can check
- // later that there are no actions left.
- delete(cancelHtlcs, key)
-
- continue
- }
-
- // The invoice state may have changed and this could have
- // implications for the states of the individual htlcs. Align
- // the htlc state with the current invoice state.
- err := updateHtlc(now, htlc, invoice.State)
- if err != nil {
- return nil, err
- }
-
- // Update the running amount paid to this invoice. We don't
- // include accepted htlcs when the invoice is still open.
- if invoice.State != ContractOpen &&
- (htlc.State == HtlcStateAccepted ||
- htlc.State == HtlcStateSettled) {
-
- amtPaid += htlc.Amt
- }
- }
- invoice.AmtPaid = amtPaid
-
- // Verify that we didn't get an action for htlcs that are not present on
- // the invoice.
- if len(cancelHtlcs) > 0 {
- return nil, er.New("cancel action on non-existent htlc(s)")
- }
-
- // Reserialize and update invoice.
- var buf bytes.Buffer
- if err := serializeInvoice(&buf, &invoice); err != nil {
- return nil, err
- }
-
- if err := invoices.Put(invoiceNum[:], buf.Bytes()); err != nil {
- return nil, err
- }
-
- return &invoice, nil
-}
-
-// updateInvoiceState validates and processes an invoice state update.
-func updateInvoiceState(invoice *Invoice, hash lntypes.Hash,
- update InvoiceStateUpdateDesc) er.R {
-
- // Returning to open is never allowed from any state.
- if update.NewState == ContractOpen {
- return ErrInvoiceCannotOpen.Default()
- }
-
- switch invoice.State {
-
- // Once a contract is accepted, we can only transition to settled or
- // canceled. Forbid transitioning back into this state. Otherwise this
- // state is identical to ContractOpen, so we fallthrough to apply the
- // same checks that we apply to open invoices.
- case ContractAccepted:
- if update.NewState == ContractAccepted {
- return ErrInvoiceCannotAccept.Default()
- }
-
- fallthrough
-
- // If a contract is open, permit a state transition to accepted, settled
- // or canceled. The only restriction is on transitioning to settled
- // where we ensure the preimage is valid.
- case ContractOpen:
- if update.NewState == ContractSettled {
- // Validate preimage.
- switch {
- case update.Preimage != nil:
- if update.Preimage.Hash() != hash {
- return ErrInvoicePreimageMismatch.Default()
- }
- invoice.Terms.PaymentPreimage = update.Preimage
-
- case invoice.Terms.PaymentPreimage == nil:
- return er.New("unknown preimage")
- }
- }
-
- // Once settled, we are in a terminal state.
- case ContractSettled:
- return ErrInvoiceAlreadySettled.Default()
-
- // Once canceled, we are in a terminal state.
- case ContractCanceled:
- return ErrInvoiceAlreadyCanceled.Default()
-
- default:
- return er.New("unknown state transition")
- }
-
- invoice.State = update.NewState
-
- return nil
-}
-
-// cancelSingleHtlc validates cancelation of a single htlc and update its state.
-func cancelSingleHtlc(resolveTime time.Time, htlc *InvoiceHTLC,
- invState ContractState) er.R {
-
- // It is only possible to cancel individual htlcs on an open invoice.
- if invState != ContractOpen {
- return er.Errorf("htlc canceled on invoice in "+
- "state %v", invState)
- }
-
- // It is only possible if the htlc is still pending.
- if htlc.State != HtlcStateAccepted {
- return er.Errorf("htlc canceled in state %v",
- htlc.State)
- }
-
- htlc.State = HtlcStateCanceled
- htlc.ResolveTime = resolveTime
-
- return nil
-}
-
-// updateHtlc aligns the state of an htlc with the given invoice state.
-func updateHtlc(resolveTime time.Time, htlc *InvoiceHTLC,
- invState ContractState) er.R {
-
- switch invState {
-
- case ContractSettled:
- if htlc.State == HtlcStateAccepted {
- htlc.State = HtlcStateSettled
- htlc.ResolveTime = resolveTime
- }
-
- case ContractCanceled:
- switch htlc.State {
-
- case HtlcStateAccepted:
- htlc.State = HtlcStateCanceled
- htlc.ResolveTime = resolveTime
-
- case HtlcStateSettled:
- return er.Errorf("cannot have a settled htlc with " +
- "invoice in state canceled")
- }
-
- case ContractOpen, ContractAccepted:
- if htlc.State == HtlcStateSettled {
- return er.Errorf("cannot have a settled htlc with "+
- "invoice in state %v", invState)
- }
-
- default:
- return er.New("unknown state transition")
- }
-
- return nil
-}
-
-// setSettleMetaFields updates the metadata associated with settlement of an
-// invoice.
-func setSettleMetaFields(settleIndex kvdb.RwBucket, invoiceNum []byte,
- invoice *Invoice, now time.Time) er.R {
-
- // Now that we know the invoice hasn't already been settled, we'll
- // update the settle index so we can place this settle event in the
- // proper location within our time series.
- nextSettleSeqNo, err := settleIndex.NextSequence()
- if err != nil {
- return err
- }
-
- var seqNoBytes [8]byte
- byteOrder.PutUint64(seqNoBytes[:], nextSettleSeqNo)
- if err := settleIndex.Put(seqNoBytes[:], invoiceNum); err != nil {
- return err
- }
-
- invoice.SettleDate = now
- invoice.SettleIndex = nextSettleSeqNo
-
- return nil
-}
-
-// InvoiceDeleteRef holds a refererence to an invoice to be deleted.
-type InvoiceDeleteRef struct {
- // PayHash is the payment hash of the target invoice. All invoices are
- // currently indexed by payment hash.
- PayHash lntypes.Hash
-
- // PayAddr is the payment addr of the target invoice. Newer invoices
- // (0.11 and up) are indexed by payment address in addition to payment
- // hash, but pre 0.8 invoices do not have one at all.
- PayAddr *[32]byte
-
- // AddIndex is the add index of the invoice.
- AddIndex uint64
-
- // SettleIndex is the settle index of the invoice.
- SettleIndex uint64
-}
-
-// DeleteInvoice attempts to delete the passed invoices from the database in
-// one transaction. The passed delete references hold all keys required to
-// delete the invoices without also needing to deserialze them.
-func (d *DB) DeleteInvoice(invoicesToDelete []InvoiceDeleteRef) er.R {
- err := kvdb.Update(d, func(tx kvdb.RwTx) er.R {
- invoices := tx.ReadWriteBucket(invoiceBucket)
- if invoices == nil {
- return ErrNoInvoicesCreated.Default()
- }
-
- invoiceIndex := invoices.NestedReadWriteBucket(
- invoiceIndexBucket,
- )
- if invoiceIndex == nil {
- return ErrNoInvoicesCreated.Default()
- }
-
- invoiceAddIndex := invoices.NestedReadWriteBucket(
- addIndexBucket,
- )
- if invoiceAddIndex == nil {
- return ErrNoInvoicesCreated.Default()
- }
- // settleIndex can be nil, as the bucket is created lazily
- // when the first invoice is settled.
- settleIndex := invoices.NestedReadWriteBucket(settleIndexBucket)
-
- payAddrIndex := tx.ReadWriteBucket(payAddrIndexBucket)
-
- for _, ref := range invoicesToDelete {
- // Fetch the invoice key for using it to check for
- // consistency and also to delete from the invoice index.
- invoiceKey := invoiceIndex.Get(ref.PayHash[:])
- if invoiceKey == nil {
- return ErrInvoiceNotFound.Default()
- }
-
- err := invoiceIndex.Delete(ref.PayHash[:])
- if err != nil {
- return err
- }
-
- // Delete payment address index reference if there's a
- // valid payment address passed.
- if ref.PayAddr != nil {
- // To ensure consistency check that the already
- // fetched invoice key matches the one in the
- // payment address index.
- key := payAddrIndex.Get(ref.PayAddr[:])
- if !bytes.Equal(key, invoiceKey) {
- return er.Errorf("unknown invoice")
- }
-
- // Delete from the payment address index.
- err := payAddrIndex.Delete(ref.PayAddr[:])
- if err != nil {
- return err
- }
- }
-
- var addIndexKey [8]byte
- byteOrder.PutUint64(addIndexKey[:], ref.AddIndex)
-
- // To ensure consistency check that the key stored in
- // the add index also matches the previously fetched
- // invoice key.
- key := invoiceAddIndex.Get(addIndexKey[:])
- if !bytes.Equal(key, invoiceKey) {
- return er.Errorf("unknown invoice")
- }
-
- // Remove from the add index.
- err = invoiceAddIndex.Delete(addIndexKey[:])
- if err != nil {
- return err
- }
-
- // Remove from the settle index if available and
- // if the invoice is settled.
- if settleIndex != nil && ref.SettleIndex > 0 {
- var settleIndexKey [8]byte
- byteOrder.PutUint64(
- settleIndexKey[:], ref.SettleIndex,
- )
-
- // To ensure consistency check that the already
- // fetched invoice key matches the one in the
- // settle index
- key := settleIndex.Get(settleIndexKey[:])
- if !bytes.Equal(key, invoiceKey) {
- return er.Errorf("unknown invoice")
- }
-
- err = settleIndex.Delete(settleIndexKey[:])
- if err != nil {
- return err
- }
- }
-
- // Finally remove the serialized invoice from the
- // invoice bucket.
- err = invoices.Delete(invoiceKey)
- if err != nil {
- return err
- }
- }
-
- return nil
- }, func() {})
-
- return err
-}
diff --git a/lnd/channeldb/kvdb/backend.go b/lnd/channeldb/kvdb/backend.go
deleted file mode 100644
index f186a7d8..00000000
--- a/lnd/channeldb/kvdb/backend.go
+++ /dev/null
@@ -1,250 +0,0 @@
-package kvdb
-
-import (
- "encoding/binary"
- "fmt"
- "io/ioutil"
- "os"
- "path/filepath"
- "time"
-
- "github.com/pkt-cash/pktd/btcutil/er"
- "github.com/pkt-cash/pktd/pktlog/log"
- _ "github.com/pkt-cash/pktd/pktwallet/walletdb/bdb" // Import to register backend.
-)
-
-const (
- // DefaultTempDBFileName is the default name of the temporary bolt DB
- // file that we'll use to atomically compact the primary DB file on
- // startup.
- DefaultTempDBFileName = "temp-dont-use.db"
-
- // LastCompactionFileNameSuffix is the suffix we append to the file name
- // of a database file to record the timestamp when the last compaction
- // occurred.
- LastCompactionFileNameSuffix = ".last-compacted"
-)
-
-var (
- byteOrder = binary.BigEndian
-)
-
-// fileExists returns true if the file exists, and false otherwise.
-func fileExists(path string) bool {
- if _, err := os.Stat(path); err != nil {
- if os.IsNotExist(err) {
- return false
- }
- }
-
- return true
-}
-
-// BoltBackendConfig is a struct that holds settings specific to the bolt
-// database backend.
-type BoltBackendConfig struct {
- // DBPath is the directory path in which the database file should be
- // stored.
- DBPath string
-
- // DBFileName is the name of the database file.
- DBFileName string
-
- // NoFreelistSync, if true, prevents the database from syncing its
- // freelist to disk, resulting in improved performance at the expense of
- // increased startup time.
- NoFreelistSync bool
-
- // AutoCompact specifies if a Bolt based database backend should be
- // automatically compacted on startup (if the minimum age of the
- // database file is reached). This will require additional disk space
- // for the compacted copy of the database but will result in an overall
- // lower database size after the compaction.
- AutoCompact bool
-
- // AutoCompactMinAge specifies the minimum time that must have passed
- // since a bolt database file was last compacted for the compaction to
- // be considered again.
- AutoCompactMinAge time.Duration
-}
-
-// GetBoltBackend opens (or creates if doesn't exits) a bbolt backed database
-// and returns a kvdb.Backend wrapping it.
-func GetBoltBackend(cfg *BoltBackendConfig) (Backend, er.R) {
- dbFilePath := filepath.Join(cfg.DBPath, cfg.DBFileName)
-
- // Is this a new database?
- if !fileExists(dbFilePath) {
- if !fileExists(cfg.DBPath) {
- if err := os.MkdirAll(cfg.DBPath, 0700); err != nil {
- return nil, er.E(err)
- }
- }
-
- return Create(BoltBackendName, dbFilePath, cfg.NoFreelistSync)
- }
-
- // This is an existing database. We might want to compact it on startup
- // to free up some space.
- if cfg.AutoCompact {
- if err := compactAndSwap(cfg); err != nil {
- return nil, err
- }
- }
-
- return Open(BoltBackendName, dbFilePath, cfg.NoFreelistSync)
-}
-
-// compactAndSwap will attempt to write a new temporary DB file to disk with
-// the compacted database content, then atomically swap (via rename) the old
-// file for the new file by updating the name of the new file to the old.
-func compactAndSwap(cfg *BoltBackendConfig) er.R {
- sourceName := cfg.DBFileName
-
- // If the main DB file isn't set, then we can't proceed.
- if sourceName == "" {
- return er.Errorf("cannot compact DB with empty name")
- }
- sourceFilePath := filepath.Join(cfg.DBPath, sourceName)
- tempDestFilePath := filepath.Join(cfg.DBPath, DefaultTempDBFileName)
-
- // Let's find out how long ago the last compaction of the source file
- // occurred and possibly skip compacting it again now.
- lastCompactionDate, err := lastCompactionDate(sourceFilePath)
- if err != nil {
- return er.Errorf("cannot determine last compaction date of "+
- "source DB file: %v", err)
- }
- compactAge := time.Since(lastCompactionDate)
- if cfg.AutoCompactMinAge != 0 && compactAge <= cfg.AutoCompactMinAge {
- log.Infof("Not compacting database file at %v, it was last "+
- "compacted at %v (%v ago), min age is set to %v",
- sourceFilePath, lastCompactionDate,
- compactAge.Truncate(time.Second), cfg.AutoCompactMinAge)
- return nil
- }
-
- log.Infof("Compacting database file at %v", sourceFilePath)
-
- // If the old temporary DB file still exists, then we'll delete it
- // before proceeding.
- if _, err := os.Stat(tempDestFilePath); err == nil {
- log.Infof("Found old temp DB @ %v, removing before swap",
- tempDestFilePath)
-
- err = os.Remove(tempDestFilePath)
- if err != nil {
- return er.Errorf("unable to remove old temp DB file: "+
- "%v", err)
- }
- }
-
- // Now that we know the staging area is clear, we'll create the new
- // temporary DB file and close it before we write the new DB to it.
- tempFile, errr := os.Create(tempDestFilePath)
- if errr != nil {
- return er.Errorf("unable to create temp DB file: %v", errr)
- }
- if err := tempFile.Close(); err != nil {
- return er.Errorf("unable to close file: %v", err)
- }
-
- // With the file created, we'll start the compaction and remove the
- // temporary file all together once this method exits.
- defer func() {
- // This will only succeed if the rename below fails. If the
- // compaction is successful, the file won't exist on exit
- // anymore so no need to log an error here.
- _ = os.Remove(tempDestFilePath)
- }()
- c := &compacter{
- srcPath: sourceFilePath,
- dstPath: tempDestFilePath,
- }
- initialSize, newSize, err := c.execute()
- if err != nil {
- return er.Errorf("error during compact: %v", err)
- }
-
- log.Infof("DB compaction of %v successful, %d -> %d bytes (gain=%.2fx)",
- sourceFilePath, initialSize, newSize,
- float64(initialSize)/float64(newSize))
-
- // We try to store the current timestamp in a file with the suffix
- // .last-compacted so we can figure out how long ago the last compaction
- // was. But since this shouldn't fail the compaction process itself, we
- // only log the error. Worst case if this file cannot be written is that
- // we compact on every startup.
- err = updateLastCompactionDate(sourceFilePath)
- if err != nil {
- log.Warnf("Could not update last compaction timestamp in "+
- "%s%s: %v", sourceFilePath,
- LastCompactionFileNameSuffix, err)
- }
-
- log.Infof("Swapping old DB file from %v to %v", tempDestFilePath,
- sourceFilePath)
-
- // Finally, we'll attempt to atomically rename the temporary file to
- // the main back up file. If this succeeds, then we'll only have a
- // single file on disk once this method exits.
- return er.E(os.Rename(tempDestFilePath, sourceFilePath))
-}
-
-// lastCompactionDate returns the date the given database file was last
-// compacted or a zero time.Time if no compaction was recorded before. The
-// compaction date is read from a file in the same directory and with the same
-// name as the DB file, but with the suffix ".last-compacted".
-func lastCompactionDate(dbFile string) (time.Time, er.R) {
- zeroTime := time.Unix(0, 0)
-
- tsFile := fmt.Sprintf("%s%s", dbFile, LastCompactionFileNameSuffix)
- if !fileExists(tsFile) {
- return zeroTime, nil
- }
-
- tsBytes, err := ioutil.ReadFile(tsFile)
- if err != nil {
- return zeroTime, er.E(err)
- }
-
- tsNano := byteOrder.Uint64(tsBytes)
- return time.Unix(0, int64(tsNano)), nil
-}
-
-// updateLastCompactionDate stores the current time as a timestamp in a file
-// in the same directory and with the same name as the DB file, but with the
-// suffix ".last-compacted".
-func updateLastCompactionDate(dbFile string) er.R {
- var tsBytes [8]byte
- byteOrder.PutUint64(tsBytes[:], uint64(time.Now().UnixNano()))
-
- tsFile := fmt.Sprintf("%s%s", dbFile, LastCompactionFileNameSuffix)
- return er.E(ioutil.WriteFile(tsFile, tsBytes[:], 0600))
-}
-
-// GetTestBackend opens (or creates if doesn't exist) a bbolt or etcd
-// backed database (for testing), and returns a kvdb.Backend and a cleanup
-// func. Whether to create/open bbolt or embedded etcd database is based
-// on the TestBackend constant which is conditionally compiled with build tag.
-// The passed path is used to hold all db files, while the name is only used
-// for bbolt.
-func GetTestBackend(path, name string) (Backend, func(), er.R) {
- empty := func() {}
-
- if TestBackend == BoltBackendName {
- db, err := GetBoltBackend(&BoltBackendConfig{
- DBPath: path,
- DBFileName: name,
- NoFreelistSync: true,
- })
- if err != nil {
- return nil, nil, err
- }
- return db, empty, nil
- } else if TestBackend == EtcdBackendName {
- return GetEtcdTestBackend(path, name)
- }
-
- return nil, nil, er.Errorf("unknown backend")
-}
diff --git a/lnd/channeldb/kvdb/bolt_compact.go b/lnd/channeldb/kvdb/bolt_compact.go
deleted file mode 100644
index 6154b031..00000000
--- a/lnd/channeldb/kvdb/bolt_compact.go
+++ /dev/null
@@ -1,247 +0,0 @@
-// The code in this file is an adapted version of the bbolt compact command
-// implemented in this file:
-// https://github.com/etcd-io/bbolt/blob/master/cmd/bbolt/main.go
-
-package kvdb
-
-import (
- "os"
- "path"
-
- "github.com/pkt-cash/pktd/btcutil/er"
- "github.com/pkt-cash/pktd/lnd/healthcheck"
- "github.com/pkt-cash/pktd/pktlog/log"
- "go.etcd.io/bbolt"
-)
-
-const (
- // defaultResultFileSizeMultiplier is the default multiplier we apply to
- // the current database size to calculate how big it could possibly get
- // after compacting, in case the database is already at its optimal size
- // and compaction causes it to grow. This should normally not be the
- // case but we really want to avoid not having enough disk space for the
- // compaction, so we apply a safety margin of 10%.
- defaultResultFileSizeMultiplier = float64(1.1)
-
- // defaultTxMaxSize is the default maximum number of operations that
- // are allowed to be executed in a single transaction.
- defaultTxMaxSize = 65536
-
- // bucketFillSize is the fill size setting that is used for each new
- // bucket that is created in the compacted database. This setting is not
- // persisted and is therefore only effective for the compaction itself.
- // Because during the compaction we only append data a fill percent of
- // 100% is optimal for performance.
- bucketFillSize = 1.0
-)
-
-type compacter struct {
- srcPath string
- dstPath string
- txMaxSize int64
-}
-
-// execute opens the source and destination databases and then compacts the
-// source into destination and returns the size of both files as a result.
-func (cmd *compacter) execute() (int64, int64, er.R) {
- if cmd.txMaxSize == 0 {
- cmd.txMaxSize = defaultTxMaxSize
- }
-
- // Ensure source file exists.
- fi, errr := os.Stat(cmd.srcPath)
- if errr != nil {
- return 0, 0, er.Errorf("error determining source database "+
- "size: %v", errr)
- }
- initialSize := fi.Size()
- marginSize := float64(initialSize) * defaultResultFileSizeMultiplier
-
- // Before opening any of the databases, let's first make sure we have
- // enough free space on the destination file system to create a full
- // copy of the source DB (worst-case scenario if the compaction doesn't
- // actually shrink the file size).
- destFolder := path.Dir(cmd.dstPath)
- freeSpace, err := healthcheck.AvailableDiskSpace(destFolder)
- if err != nil {
- return 0, 0, er.Errorf("error determining free disk space on "+
- "%s: %v", destFolder, err)
- }
- log.Debugf("Free disk space on compaction destination file system: "+
- "%d bytes", freeSpace)
- if freeSpace < uint64(marginSize) {
- return 0, 0, er.Errorf("could not start compaction, "+
- "destination folder %s only has %d bytes of free disk "+
- "space available while we need at least %d for worst-"+
- "case compaction", destFolder, freeSpace, initialSize)
- }
-
- // Open source database. We open it in read only mode to avoid (and fix)
- // possible freelist sync problems.
- src, errr := bbolt.Open(cmd.srcPath, 0444, &bbolt.Options{
- ReadOnly: true,
- })
- if errr != nil {
- return 0, 0, er.Errorf("error opening source database: %v",
- errr)
- }
- defer func() {
- if err := src.Close(); err != nil {
- log.Errorf("Compact error: closing source DB: %v", err)
- }
- }()
-
- // Open destination database.
- dst, errr := bbolt.Open(cmd.dstPath, fi.Mode(), nil)
- if errr != nil {
- return 0, 0, er.Errorf("error opening destination database: "+
- "%v", errr)
- }
- defer func() {
- if err := dst.Close(); err != nil {
- log.Errorf("Compact error: closing dest DB: %v", err)
- }
- }()
-
- // Run compaction.
- if err := cmd.compact(dst, src); err != nil {
- return 0, 0, er.Errorf("error running compaction: %v", err)
- }
-
- // Report stats on new size.
- fi, errr = os.Stat(cmd.dstPath)
- if errr != nil {
- return 0, 0, er.Errorf("error determining destination "+
- "database size: %v", errr)
- } else if fi.Size() == 0 {
- return 0, 0, er.Errorf("zero db size")
- }
-
- return initialSize, fi.Size(), nil
-}
-
-// compact tries to create a compacted copy of the source database in a new
-// destination database.
-func (cmd *compacter) compact(dst, src *bbolt.DB) er.R {
- // Commit regularly, or we'll run out of memory for large datasets if
- // using one transaction.
- var size int64
- tx, err := dst.Begin(true)
- if err != nil {
- return er.E(err)
- }
- defer func() {
- _ = tx.Rollback()
- }()
-
- if err := cmd.walk(src, func(keys [][]byte, k, v []byte, seq uint64) er.R {
- // On each key/value, check if we have exceeded tx size.
- sz := int64(len(k) + len(v))
- if size+sz > cmd.txMaxSize && cmd.txMaxSize != 0 {
- // Commit previous transaction.
- if err := tx.Commit(); err != nil {
- return er.E(err)
- }
-
- // Start new transaction.
- tx, err = dst.Begin(true)
- if err != nil {
- return er.E(err)
- }
- size = 0
- }
- size += sz
-
- // Create bucket on the root transaction if this is the first
- // level.
- nk := len(keys)
- if nk == 0 {
- bkt, err := tx.CreateBucket(k)
- if err != nil {
- return er.E(err)
- }
- if err := bkt.SetSequence(seq); err != nil {
- return er.E(err)
- }
- return nil
- }
-
- // Create buckets on subsequent levels, if necessary.
- b := tx.Bucket(keys[0])
- if nk > 1 {
- for _, k := range keys[1:] {
- b = b.Bucket(k)
- }
- }
-
- // Fill the entire page for best compaction.
- b.FillPercent = bucketFillSize
-
- // If there is no value then this is a bucket call.
- if v == nil {
- bkt, err := b.CreateBucket(k)
- if err != nil {
- return er.E(err)
- }
- if err := bkt.SetSequence(seq); err != nil {
- return er.E(err)
- }
- return nil
- }
-
- // Otherwise treat it as a key/value pair.
- return er.E(b.Put(k, v))
- }); err != nil {
- return err
- }
-
- return er.E(tx.Commit())
-}
-
-// walkFunc is the type of the function called for keys (buckets and "normal"
-// values) discovered by Walk. keys is the list of keys to descend to the bucket
-// owning the discovered key/value pair k/v.
-type walkFunc func(keys [][]byte, k, v []byte, seq uint64) er.R
-
-// walk walks recursively the bolt database db, calling walkFn for each key it
-// finds.
-func (cmd *compacter) walk(db *bbolt.DB, walkFn walkFunc) er.R {
- return er.E(db.View(func(tx *bbolt.Tx) error {
- return tx.ForEach(func(name []byte, b *bbolt.Bucket) error {
- // This will log the top level buckets only to give the
- // user some sense of progress.
- log.Debugf("Compacting top level bucket %s", name)
-
- return er.Native(cmd.walkBucket(
- b, nil, name, nil, b.Sequence(), walkFn,
- ))
- })
- }))
-}
-
-// walkBucket recursively walks through a bucket.
-func (cmd *compacter) walkBucket(b *bbolt.Bucket, keyPath [][]byte, k, v []byte,
- seq uint64, fn walkFunc) er.R {
-
- // Execute callback.
- if err := fn(keyPath, k, v, seq); err != nil {
- return err
- }
-
- // If this is not a bucket then stop.
- if v != nil {
- return nil
- }
-
- // Iterate over each child key/value.
- keyPath = append(keyPath, k)
- return er.E(b.ForEach(func(k, v []byte) error {
- if v == nil {
- bkt := b.Bucket(k)
- return er.Native(cmd.walkBucket(
- bkt, keyPath, k, nil, bkt.Sequence(), fn,
- ))
- }
- return er.Native(cmd.walkBucket(b, keyPath, k, v, b.Sequence(), fn))
- }))
-}
diff --git a/lnd/channeldb/kvdb/config.go b/lnd/channeldb/kvdb/config.go
deleted file mode 100644
index 9ea50adc..00000000
--- a/lnd/channeldb/kvdb/config.go
+++ /dev/null
@@ -1,48 +0,0 @@
-package kvdb
-
-import "time"
-
-const (
- // BoltBackendName is the name of the backend that should be passed into
- // kvdb.Create to initialize a new instance of kvdb.Backend backed by a
- // live instance of bbolt.
- BoltBackendName = "bdb"
-
- // EtcdBackendName is the name of the backend that should be passed into
- // kvdb.Create to initialize a new instance of kvdb.Backend backed by a
- // live instance of etcd.
- EtcdBackendName = "etcd"
-
- // DefaultBoltAutoCompactMinAge is the default minimum time that must
- // have passed since a bolt database file was last compacted for the
- // compaction to be considered again.
- DefaultBoltAutoCompactMinAge = time.Hour * 24 * 7
-)
-
-// BoltConfig holds bolt configuration.
-type BoltConfig struct {
- SyncFreelist bool `long:"nofreelistsync" description:"Whether the databases used within lnd should sync their freelist to disk. This is disabled by default resulting in improved memory performance during operation, but with an increase in startup time."`
-
- AutoCompact bool `long:"auto-compact" description:"Whether the databases used within lnd should automatically be compacted on every startup (and if the database has the configured minimum age). This is disabled by default because it requires additional disk space to be available during the compaction that is freed afterwards. In general compaction leads to smaller database files."`
-
- AutoCompactMinAge time.Duration `long:"auto-compact-min-age" description:"How long ago the last compaction of a database file must be for it to be considered for auto compaction again. Can be set to 0 to compact on every startup."`
-}
-
-// EtcdConfig holds etcd configuration.
-type EtcdConfig struct {
- Embedded bool `long:"embedded" description:"Use embedded etcd instance instead of the external one."`
-
- Host string `long:"host" description:"Etcd database host."`
-
- User string `long:"user" description:"Etcd database user."`
-
- Pass string `long:"pass" description:"Password for the database user."`
-
- CertFile string `long:"cert_file" description:"Path to the TLS certificate for etcd RPC."`
-
- KeyFile string `long:"key_file" description:"Path to the TLS private key for etcd RPC."`
-
- InsecureSkipVerify bool `long:"insecure_skip_verify" description:"Whether we intend to skip TLS verification"`
-
- CollectStats bool `long:"collect_stats" description:"Whether to collect etcd commit stats."`
-}
diff --git a/lnd/channeldb/kvdb/etcd/bucket.go b/lnd/channeldb/kvdb/etcd/bucket.go
deleted file mode 100644
index 8a1ff071..00000000
--- a/lnd/channeldb/kvdb/etcd/bucket.go
+++ /dev/null
@@ -1,92 +0,0 @@
-// +build kvdb_etcd
-
-package etcd
-
-import (
- "crypto/sha256"
-)
-
-const (
- bucketIDLength = 32
-)
-
-var (
- valuePostfix = []byte{0x00}
- bucketPostfix = []byte{0xFF}
- sequencePrefix = []byte("$seq$")
-)
-
-// makeBucketID returns a deterministic key for the passed byte slice.
-// Currently it returns the sha256 hash of the slice.
-func makeBucketID(key []byte) [bucketIDLength]byte {
- return sha256.Sum256(key)
-}
-
-// isValidBucketID checks if the passed slice is the required length to be a
-// valid bucket id.
-func isValidBucketID(s []byte) bool {
- return len(s) == bucketIDLength
-}
-
-// makeKey concatenates parent, key and postfix into one byte slice.
-// The postfix indicates the use of this key (whether bucket or value), while
-// parent refers to the parent bucket.
-func makeKey(parent, key, postfix []byte) []byte {
- keyBuf := make([]byte, len(parent)+len(key)+len(postfix))
- copy(keyBuf, parent)
- copy(keyBuf[len(parent):], key)
- copy(keyBuf[len(parent)+len(key):], postfix)
-
- return keyBuf
-}
-
-// makeBucketKey returns a bucket key from the passed parent bucket id and
-// the key.
-func makeBucketKey(parent []byte, key []byte) []byte {
- return makeKey(parent, key, bucketPostfix)
-}
-
-// makeValueKey returns a value key from the passed parent bucket id and
-// the key.
-func makeValueKey(parent []byte, key []byte) []byte {
- return makeKey(parent, key, valuePostfix)
-}
-
-// makeSequenceKey returns a sequence key of the passed parent bucket id.
-func makeSequenceKey(parent []byte) []byte {
- keyBuf := make([]byte, len(sequencePrefix)+len(parent))
- copy(keyBuf, sequencePrefix)
- copy(keyBuf[len(sequencePrefix):], parent)
- return keyBuf
-}
-
-// isBucketKey returns true if the passed key is a bucket key, meaning it
-// keys a bucket name.
-func isBucketKey(key string) bool {
- if len(key) < bucketIDLength+1 {
- return false
- }
-
- return key[len(key)-1] == bucketPostfix[0]
-}
-
-// getKey chops out the key from the raw key (by removing the bucket id
-// prefixing the key and the postfix indicating whether it is a bucket or
-// a value key)
-func getKey(rawKey string) []byte {
- return []byte(rawKey[bucketIDLength : len(rawKey)-1])
-}
-
-// getKeyVal chops out the key from the raw key (by removing the bucket id
-// prefixing the key and the postfix indicating whether it is a bucket or
-// a value key) and also returns the appropriate value for the key, which is
-// nil in case of buckets (or the set value otherwise).
-func getKeyVal(kv *KV) ([]byte, []byte) {
- var val []byte
-
- if !isBucketKey(kv.key) {
- val = []byte(kv.val)
- }
-
- return getKey(kv.key), val
-}
diff --git a/lnd/channeldb/kvdb/etcd/bucket_test.go b/lnd/channeldb/kvdb/etcd/bucket_test.go
deleted file mode 100644
index e68821f1..00000000
--- a/lnd/channeldb/kvdb/etcd/bucket_test.go
+++ /dev/null
@@ -1,42 +0,0 @@
-// +build kvdb_etcd
-
-package etcd
-
-// bkey is a helper functon used in tests to create a bucket key from passed
-// bucket list.
-func bkey(buckets ...string) string {
- var bucketKey []byte
-
- rootID := makeBucketID([]byte(""))
- parent := rootID[:]
-
- for _, bucketName := range buckets {
- bucketKey = makeBucketKey(parent, []byte(bucketName))
- id := makeBucketID(bucketKey)
- parent = id[:]
- }
-
- return string(bucketKey)
-}
-
-// bval is a helper function used in tests to create a bucket value (the value
-// for a bucket key) from the passed bucket list.
-func bval(buckets ...string) string {
- id := makeBucketID([]byte(bkey(buckets...)))
- return string(id[:])
-}
-
-// vkey is a helper function used in tests to create a value key from the
-// passed key and bucket list.
-func vkey(key string, buckets ...string) string {
- rootID := makeBucketID([]byte(""))
- bucket := rootID[:]
-
- for _, bucketName := range buckets {
- bucketKey := makeBucketKey(bucket, []byte(bucketName))
- id := makeBucketID(bucketKey)
- bucket = id[:]
- }
-
- return string(makeValueKey(bucket, []byte(key)))
-}
diff --git a/lnd/channeldb/kvdb/etcd/commit_queue.go b/lnd/channeldb/kvdb/etcd/commit_queue.go
deleted file mode 100644
index f0384565..00000000
--- a/lnd/channeldb/kvdb/etcd/commit_queue.go
+++ /dev/null
@@ -1,150 +0,0 @@
-// +build kvdb_etcd
-
-package etcd
-
-import (
- "context"
- "sync"
-)
-
-// commitQueueSize is the maximum number of commits we let to queue up. All
-// remaining commits will block on commitQueue.Add().
-const commitQueueSize = 100
-
-// commitQueue is a simple execution queue to manage conflicts for transactions
-// and thereby reduce the number of times conflicting transactions need to be
-// retried. When a new transaction is added to the queue, we first upgrade the
-// read/write counts in the queue's own accounting to decide whether the new
-// transaction has any conflicting dependencies. If the transaction does not
-// conflict with any other, then it is comitted immediately, otherwise it'll be
-// queued up for later exection.
-// The algorithm is described in: http://www.cs.umd.edu/~abadi/papers/vll-vldb13.pdf
-type commitQueue struct {
- ctx context.Context
- mx sync.Mutex
- readerMap map[string]int
- writerMap map[string]int
-
- commitMutex sync.RWMutex
- queue chan (func())
- wg sync.WaitGroup
-}
-
-// NewCommitQueue creates a new commit queue, with the passed abort context.
-func NewCommitQueue(ctx context.Context) *commitQueue {
- q := &commitQueue{
- ctx: ctx,
- readerMap: make(map[string]int),
- writerMap: make(map[string]int),
- queue: make(chan func(), commitQueueSize),
- }
-
- // Start the queue consumer loop.
- q.wg.Add(1)
- go q.mainLoop()
-
- return q
-}
-
-// Wait waits for the queue to stop (after the queue context has been canceled).
-func (c *commitQueue) Wait() {
- c.wg.Wait()
-}
-
-// Add increases lock counts and queues up tx commit closure for execution.
-// Transactions that don't have any conflicts are executed immediately by
-// "downgrading" the count mutex to allow concurrency.
-func (c *commitQueue) Add(commitLoop func(), rset readSet, wset writeSet) {
- c.mx.Lock()
- blocked := false
-
- // Mark as blocked if there's any writer changing any of the keys in
- // the read set. Do not increment the reader counts yet as we'll need to
- // use the original reader counts when scanning through the write set.
- for key := range rset {
- if c.writerMap[key] > 0 {
- blocked = true
- break
- }
- }
-
- // Mark as blocked if there's any writer or reader for any of the keys
- // in the write set.
- for key := range wset {
- blocked = blocked || c.readerMap[key] > 0 || c.writerMap[key] > 0
-
- // Increment the writer count.
- c.writerMap[key] += 1
- }
-
- // Finally we can increment the reader counts for keys in the read set.
- for key := range rset {
- c.readerMap[key] += 1
- }
-
- if blocked {
- // Add the transaction to the queue if conflicts with an already
- // queued one.
- c.mx.Unlock()
-
- select {
- case c.queue <- commitLoop:
- case <-c.ctx.Done():
- }
- } else {
- // To make sure we don't add a new tx to the queue that depends
- // on this "unblocked" tx, grab the commitMutex before lifting
- // the mutex guarding the lock maps.
- c.commitMutex.RLock()
- c.mx.Unlock()
-
- // At this point we're safe to execute the "unblocked" tx, as
- // we cannot execute blocked tx that may have been read from the
- // queue until the commitMutex is held.
- commitLoop()
-
- c.commitMutex.RUnlock()
- }
-}
-
-// Done decreases lock counts of the keys in the read/write sets.
-func (c *commitQueue) Done(rset readSet, wset writeSet) {
- c.mx.Lock()
- defer c.mx.Unlock()
-
- for key := range rset {
- c.readerMap[key] -= 1
- if c.readerMap[key] == 0 {
- delete(c.readerMap, key)
- }
- }
-
- for key := range wset {
- c.writerMap[key] -= 1
- if c.writerMap[key] == 0 {
- delete(c.writerMap, key)
- }
- }
-}
-
-// mainLoop executes queued transaction commits for transactions that have
-// dependencies. The queue ensures that the top element doesn't conflict with
-// any other transactions and therefore can be executed freely.
-func (c *commitQueue) mainLoop() {
- defer c.wg.Done()
-
- for {
- select {
- case top := <-c.queue:
- // Execute the next blocked transaction. As it is
- // the top element in the queue it means that it doesn't
- // depend on any other transactions anymore.
- c.commitMutex.Lock()
- top()
- c.commitMutex.Unlock()
-
- case <-c.ctx.Done():
- return
- }
- }
-}
diff --git a/lnd/channeldb/kvdb/etcd/commit_queue_test.go b/lnd/channeldb/kvdb/etcd/commit_queue_test.go
deleted file mode 100644
index 16ff7100..00000000
--- a/lnd/channeldb/kvdb/etcd/commit_queue_test.go
+++ /dev/null
@@ -1,115 +0,0 @@
-// +build kvdb_etcd
-
-package etcd
-
-import (
- "context"
- "sync"
- "sync/atomic"
- "testing"
- "time"
-
- "github.com/stretchr/testify/require"
-)
-
-// TestCommitQueue tests that non-conflicting transactions commit concurrently,
-// while conflicting transactions are queued up.
-func TestCommitQueue(t *testing.T) {
- // The duration of each commit.
- const commitDuration = time.Millisecond * 500
- const numCommits = 4
-
- var wg sync.WaitGroup
- commits := make([]string, numCommits)
- idx := int32(-1)
-
- commit := func(tag string, sleep bool) func() {
- return func() {
- defer wg.Done()
-
- // Update our log of commit order. Avoid blocking
- // by preallocating the commit log and increasing
- // the log index atomically.
- i := atomic.AddInt32(&idx, 1)
- commits[i] = tag
-
- if sleep {
- time.Sleep(commitDuration)
- }
- }
- }
-
- // Helper function to create a read set from the passed keys.
- makeReadSet := func(keys []string) readSet {
- rs := make(map[string]stmGet)
-
- for _, key := range keys {
- rs[key] = stmGet{}
- }
-
- return rs
- }
-
- // Helper function to create a write set from the passed keys.
- makeWriteSet := func(keys []string) writeSet {
- ws := make(map[string]stmPut)
-
- for _, key := range keys {
- ws[key] = stmPut{}
- }
-
- return ws
- }
-
- ctx := context.Background()
- ctx, cancel := context.WithCancel(ctx)
- q := NewCommitQueue(ctx)
- defer q.Wait()
- defer cancel()
-
- wg.Add(numCommits)
- t1 := time.Now()
-
- // Tx1: reads: key1, key2, writes: key3, conflict: none
- q.Add(
- commit("free", true),
- makeReadSet([]string{"key1", "key2"}),
- makeWriteSet([]string{"key3"}),
- )
- // Tx2: reads: key1, key2, writes: key3, conflict: Tx1
- q.Add(
- commit("blocked1", false),
- makeReadSet([]string{"key1", "key2"}),
- makeWriteSet([]string{"key3"}),
- )
- // Tx3: reads: key1, writes: key4, conflict: none
- q.Add(
- commit("free", true),
- makeReadSet([]string{"key1", "key2"}),
- makeWriteSet([]string{"key4"}),
- )
- // Tx4: reads: key2, writes: key4 conflict: Tx3
- q.Add(
- commit("blocked2", false),
- makeReadSet([]string{"key2"}),
- makeWriteSet([]string{"key4"}),
- )
-
- // Wait for all commits.
- wg.Wait()
- t2 := time.Now()
-
- // Expected total execution time: delta.
- // 2 * commitDuration <= delta < 3 * commitDuration
- delta := t2.Sub(t1)
- require.LessOrEqual(t, int64(commitDuration*2), int64(delta))
- require.Greater(t, int64(commitDuration*3), int64(delta))
-
- // Expect that the non-conflicting "free" transactions are executed
- // before the blocking ones, and the blocking ones are executed in
- // the order of addition.
- require.Equal(t,
- []string{"free", "free", "blocked1", "blocked2"},
- commits,
- )
-}
diff --git a/lnd/channeldb/kvdb/etcd/db.go b/lnd/channeldb/kvdb/etcd/db.go
deleted file mode 100644
index 0c0b8d0c..00000000
--- a/lnd/channeldb/kvdb/etcd/db.go
+++ /dev/null
@@ -1,311 +0,0 @@
-// +build kvdb_etcd
-
-package etcd
-
-import (
- "context"
- "fmt"
- "io"
- "runtime"
- "sync"
- "time"
-
- "github.com/coreos/etcd/clientv3"
- "github.com/coreos/etcd/pkg/transport"
- "github.com/pkt-cash/pktd/btcutil/er"
- "github.com/pkt-cash/pktd/pktwallet/walletdb"
-)
-
-const (
- // etcdConnectionTimeout is the timeout until successful connection to
- // the etcd instance.
- etcdConnectionTimeout = 10 * time.Second
-
- // etcdLongTimeout is a timeout for longer taking etcd operatons.
- etcdLongTimeout = 30 * time.Second
-)
-
-// callerStats holds commit stats for a specific caller. Currently it only
-// holds the max stat, meaning that for a particular caller the largest
-// commit set is recorded.
-type callerStats struct {
- count int
- commitStats CommitStats
-}
-
-func (s callerStats) String() string {
- return fmt.Sprintf("count: %d, retries: %d, rset: %d, wset: %d",
- s.count, s.commitStats.Retries, s.commitStats.Rset,
- s.commitStats.Wset)
-}
-
-// commitStatsCollector collects commit stats for commits succeeding
-// and also for commits failing.
-type commitStatsCollector struct {
- sync.RWMutex
- succ map[string]*callerStats
- fail map[string]*callerStats
-}
-
-// newCommitStatsColletor creates a new commitStatsCollector instance.
-func newCommitStatsColletor() *commitStatsCollector {
- return &commitStatsCollector{
- succ: make(map[string]*callerStats),
- fail: make(map[string]*callerStats),
- }
-}
-
-// PrintStats returns collected stats pretty printed into a string.
-func (c *commitStatsCollector) PrintStats() string {
- c.RLock()
- defer c.RUnlock()
-
- s := "\nFailure:\n"
- for k, v := range c.fail {
- s += fmt.Sprintf("%s\t%s\n", k, v)
- }
-
- s += "\nSuccess:\n"
- for k, v := range c.succ {
- s += fmt.Sprintf("%s\t%s\n", k, v)
- }
-
- return s
-}
-
-// updateStatsMap updatess commit stats map for a caller.
-func updateStatMap(
- caller string, stats CommitStats, m map[string]*callerStats) {
-
- if _, ok := m[caller]; !ok {
- m[caller] = &callerStats{}
- }
-
- curr := m[caller]
- curr.count++
-
- // Update only if the total commit set is greater or equal.
- currTotal := curr.commitStats.Rset + curr.commitStats.Wset
- if currTotal <= (stats.Rset + stats.Wset) {
- curr.commitStats = stats
- }
-}
-
-// callback is an STM commit stats callback passed which can be passed
-// using a WithCommitStatsCallback to the STM upon construction.
-func (c *commitStatsCollector) callback(succ bool, stats CommitStats) {
- caller := "unknown"
-
- // Get the caller. As this callback is called from
- // the backend interface that means we need to ascend
- // 4 frames in the callstack.
- _, file, no, ok := runtime.Caller(4)
- if ok {
- caller = fmt.Sprintf("%s#%d", file, no)
- }
-
- c.Lock()
- defer c.Unlock()
-
- if succ {
- updateStatMap(caller, stats, c.succ)
- } else {
- updateStatMap(caller, stats, c.fail)
- }
-}
-
-// db holds a reference to the etcd client connection.
-type db struct {
- config BackendConfig
- cli *clientv3.Client
- commitStatsCollector *commitStatsCollector
- txQueue *commitQueue
-}
-
-// Enforce db implements the walletdb.DB interface.
-var _ walletdb.DB = (*db)(nil)
-
-// BackendConfig holds and etcd backend config and connection parameters.
-type BackendConfig struct {
- // Ctx is the context we use to cancel operations upon exit.
- Ctx context.Context
-
- // Host holds the peer url of the etcd instance.
- Host string
-
- // User is the username for the etcd peer.
- User string
-
- // Pass is the password for the etcd peer.
- Pass string
-
- // CertFile holds the path to the TLS certificate for etcd RPC.
- CertFile string
-
- // KeyFile holds the path to the TLS private key for etcd RPC.
- KeyFile string
-
- // InsecureSkipVerify should be set to true if we intend to
- // skip TLS verification.
- InsecureSkipVerify bool
-
- // Prefix the hash of the prefix will be used as the root
- // bucket id. This enables key space separation similar to
- // name spaces.
- Prefix string
-
- // CollectCommitStats indicates wheter to commit commit stats.
- CollectCommitStats bool
-}
-
-// newEtcdBackend returns a db object initialized with the passed backend
-// config. If etcd connection cannot be estabished, then returns error.
-func newEtcdBackend(config BackendConfig) (*db, er.R) {
- if config.Ctx == nil {
- config.Ctx = context.Background()
- }
-
- tlsInfo := transport.TLSInfo{
- CertFile: config.CertFile,
- KeyFile: config.KeyFile,
- InsecureSkipVerify: config.InsecureSkipVerify,
- }
-
- tlsConfig, err := tlsInfo.ClientConfig()
- if err != nil {
- return nil, err
- }
-
- cli, err := clientv3.New(clientv3.Config{
- Context: config.Ctx,
- Endpoints: []string{config.Host},
- DialTimeout: etcdConnectionTimeout,
- Username: config.User,
- Password: config.Pass,
- TLS: tlsConfig,
- MaxCallSendMsgSize: 16384*1024 - 1,
- })
-
- if err != nil {
- return nil, err
- }
-
- backend := &db{
- cli: cli,
- config: config,
- txQueue: NewCommitQueue(config.Ctx),
- }
-
- if config.CollectCommitStats {
- backend.commitStatsCollector = newCommitStatsColletor()
- }
-
- return backend, nil
-}
-
-// getSTMOptions creats all STM options based on the backend config.
-func (db *db) getSTMOptions() []STMOptionFunc {
- opts := []STMOptionFunc{
- WithAbortContext(db.config.Ctx),
- }
-
- if db.config.CollectCommitStats {
- opts = append(opts,
- WithCommitStatsCallback(db.commitStatsCollector.callback),
- )
- }
-
- return opts
-}
-
-// View opens a database read transaction and executes the function f with the
-// transaction passed as a parameter. After f exits, the transaction is rolled
-// back. If f errors, its error is returned, not a rollback error (if any
-// occur). The passed reset function is called before the start of the
-// transaction and can be used to reset intermediate state. As callers may
-// expect retries of the f closure (depending on the database backend used), the
-// reset function will be called before each retry respectively.
-func (db *db) View(f func(tx walletdb.ReadTx) error, reset func()) er.R {
- apply := func(stm STM) er.R {
- reset()
- return f(newReadWriteTx(stm, db.config.Prefix))
- }
-
- return RunSTM(db.cli, apply, db.txQueue, db.getSTMOptions()...)
-}
-
-// Update opens a database read/write transaction and executes the function f
-// with the transaction passed as a parameter. After f exits, if f did not
-// error, the transaction is committed. Otherwise, if f did error, the
-// transaction is rolled back. If the rollback fails, the original error
-// returned by f is still returned. If the commit fails, the commit error is
-// returned. As callers may expect retries of the f closure, the reset function
-// will be called before each retry respectively.
-func (db *db) Update(f func(tx walletdb.ReadWriteTx) error, reset func()) er.R {
- apply := func(stm STM) er.R {
- reset()
- return f(newReadWriteTx(stm, db.config.Prefix))
- }
-
- return RunSTM(db.cli, apply, db.txQueue, db.getSTMOptions()...)
-}
-
-// PrintStats returns all collected stats pretty printed into a string.
-func (db *db) PrintStats() string {
- if db.commitStatsCollector != nil {
- return db.commitStatsCollector.PrintStats()
- }
-
- return ""
-}
-
-// BeginReadWriteTx opens a database read+write transaction.
-func (db *db) BeginReadWriteTx() (walletdb.ReadWriteTx, er.R) {
- return newReadWriteTx(
- NewSTM(db.cli, db.txQueue, db.getSTMOptions()...),
- db.config.Prefix,
- ), nil
-}
-
-// BeginReadTx opens a database read transaction.
-func (db *db) BeginReadTx() (walletdb.ReadTx, er.R) {
- return newReadWriteTx(
- NewSTM(db.cli, db.txQueue, db.getSTMOptions()...),
- db.config.Prefix,
- ), nil
-}
-
-// Copy writes a copy of the database to the provided writer. This call will
-// start a read-only transaction to perform all operations.
-// This function is part of the walletdb.Db interface implementation.
-func (db *db) Copy(w io.Writer) er.R {
- ctx, cancel := context.WithTimeout(db.config.Ctx, etcdLongTimeout)
- defer cancel()
-
- readCloser, err := db.cli.Snapshot(ctx)
- if err != nil {
- return err
- }
-
- _, err = io.Copy(w, readCloser)
-
- return err
-}
-
-// Close cleanly shuts down the database and syncs all data.
-// This function is part of the walletdb.Db interface implementation.
-func (db *db) Close() er.R {
- return db.cli.Close()
-}
-
-// Batch opens a database read/write transaction and executes the function f
-// with the transaction passed as a parameter. After f exits, if f did not
-// error, the transaction is committed. Otherwise, if f did error, the
-// transaction is rolled back. If the rollback fails, the original error
-// returned by f is still returned. If the commit fails, the commit error is
-// returned.
-//
-// Batch is only useful when there are multiple goroutines calling it.
-func (db *db) Batch(apply func(tx walletdb.ReadWriteTx) er.R) er.R {
- return db.Update(apply, func() {})
-}
diff --git a/lnd/channeldb/kvdb/etcd/db_test.go b/lnd/channeldb/kvdb/etcd/db_test.go
deleted file mode 100644
index 90ac734b..00000000
--- a/lnd/channeldb/kvdb/etcd/db_test.go
+++ /dev/null
@@ -1,76 +0,0 @@
-// +build kvdb_etcd
-
-package etcd
-
-import (
- "bytes"
- "context"
- "testing"
-
- "github.com/pkt-cash/pktd/btcutil/er"
- "github.com/pkt-cash/pktd/pktwallet/walletdb"
- "github.com/stretchr/testify/require"
-)
-
-func TestCopy(t *testing.T) {
- t.Parallel()
-
- f := NewEtcdTestFixture(t)
- defer f.Cleanup()
-
- db, err := newEtcdBackend(f.BackendConfig())
- util.RequireNoErr(t, err)
-
- err = db.Update(func(tx walletdb.ReadWriteTx) er.R {
- // "apple"
- apple, err := tx.CreateTopLevelBucket([]byte("apple"))
- util.RequireNoErr(t, err)
- require.NotNil(t, apple)
-
- util.RequireNoErr(t, apple.Put([]byte("key"), []byte("val")))
- return nil
- }, func() {})
-
- // Expect non-zero copy.
- var buf bytes.Buffer
-
- util.RequireNoErr(t, db.Copy(&buf))
- require.Greater(t, buf.Len(), 0)
- require.Nil(t, err)
-
- expected := map[string]string{
- bkey("apple"): bval("apple"),
- vkey("key", "apple"): "val",
- }
- require.Equal(t, expected, f.Dump())
-}
-
-func TestAbortContext(t *testing.T) {
- t.Parallel()
-
- f := NewEtcdTestFixture(t)
- defer f.Cleanup()
-
- ctx, cancel := context.WithCancel(context.Background())
-
- config := f.BackendConfig()
- config.Ctx = ctx
-
- // Pass abort context and abort right away.
- db, err := newEtcdBackend(config)
- util.RequireNoErr(t, err)
- cancel()
-
- // Expect that the update will fail.
- err = db.Update(func(tx walletdb.ReadWriteTx) er.R {
- _, err := tx.CreateTopLevelBucket([]byte("bucket"))
- util.RequireErr(t, err, "context canceled")
-
- return nil
- }, func() {})
-
- util.RequireErr(t, err, "context canceled")
-
- // No changes in the DB.
- require.Equal(t, map[string]string{}, f.Dump())
-}
diff --git a/lnd/channeldb/kvdb/etcd/driver.go b/lnd/channeldb/kvdb/etcd/driver.go
deleted file mode 100644
index 4b4886f3..00000000
--- a/lnd/channeldb/kvdb/etcd/driver.go
+++ /dev/null
@@ -1,68 +0,0 @@
-// +build kvdb_etcd
-
-package etcd
-
-import (
- "fmt"
-
- "github.com/pkt-cash/pktd/pktwallet/walletdb"
-)
-
-const (
- dbType = "etcd"
-)
-
-// parseArgs parses the arguments from the walletdb Open/Create methods.
-func parseArgs(funcName string, args ...interface{}) (*BackendConfig, er.R) {
- if len(args) != 1 {
- return nil, er.Errorf("invalid number of arguments to %s.%s -- "+
- "expected: etcd.BackendConfig",
- dbType, funcName,
- )
- }
-
- config, ok := args[0].(BackendConfig)
- if !ok {
- return nil, er.Errorf("argument to %s.%s is invalid -- "+
- "expected: etcd.BackendConfig",
- dbType, funcName,
- )
- }
-
- return &config, nil
-}
-
-// createDBDriver is the callback provided during driver registration that
-// creates, initializes, and opens a database for use.
-func createDBDriver(args ...interface{}) (walletdb.DB, er.R) {
- config, err := parseArgs("Create", args...)
- if err != nil {
- return nil, err
- }
-
- return newEtcdBackend(*config)
-}
-
-// openDBDriver is the callback provided during driver registration that opens
-// an existing database for use.
-func openDBDriver(args ...interface{}) (walletdb.DB, er.R) {
- config, err := parseArgs("Open", args...)
- if err != nil {
- return nil, err
- }
-
- return newEtcdBackend(*config)
-}
-
-func init() {
- // Register the driver.
- driver := walletdb.Driver{
- DbType: dbType,
- Create: createDBDriver,
- Open: openDBDriver,
- }
- if err := walletdb.RegisterDriver(driver); err != nil {
- panic(fmt.Sprintf("Failed to regiser database driver '%s': %v",
- dbType, err))
- }
-}
diff --git a/lnd/channeldb/kvdb/etcd/driver_test.go b/lnd/channeldb/kvdb/etcd/driver_test.go
deleted file mode 100644
index 59983dc3..00000000
--- a/lnd/channeldb/kvdb/etcd/driver_test.go
+++ /dev/null
@@ -1,31 +0,0 @@
-// +build kvdb_etcd
-
-package etcd
-
-import (
- "testing"
-
- "github.com/pkt-cash/pktd/btcutil/util"
- "github.com/pkt-cash/pktd/pktwallet/walletdb"
- "github.com/stretchr/testify/require"
-)
-
-func TestOpenCreateFailure(t *testing.T) {
- t.Parallel()
-
- db, err := walletdb.Open(dbType)
- util.RequireErr(t, err)
- require.Nil(t, db)
-
- db, err = walletdb.Open(dbType, "wrong")
- util.RequireErr(t, err)
- require.Nil(t, db)
-
- db, err = walletdb.Create(dbType)
- util.RequireErr(t, err)
- require.Nil(t, db)
-
- db, err = walletdb.Create(dbType, "wrong")
- util.RequireErr(t, err)
- require.Nil(t, db)
-}
diff --git a/lnd/channeldb/kvdb/etcd/embed.go b/lnd/channeldb/kvdb/etcd/embed.go
deleted file mode 100644
index 195396d5..00000000
--- a/lnd/channeldb/kvdb/etcd/embed.go
+++ /dev/null
@@ -1,81 +0,0 @@
-// +build kvdb_etcd
-
-package etcd
-
-import (
- "context"
- "fmt"
- "net"
- "net/url"
- "time"
-
- "github.com/coreos/etcd/embed"
-)
-
-const (
- // readyTimeout is the time until the embedded etcd instance should start.
- readyTimeout = 10 * time.Second
-)
-
-// getFreePort returns a random open TCP port.
-func getFreePort() int {
- ln, err := net.Listen("tcp", "[::]:0")
- if err != nil {
- panic(err)
- }
-
- port := ln.Addr().(*net.TCPAddr).Port
-
- err = ln.Close()
- if err != nil {
- panic(err)
- }
-
- return port
-}
-
-// NewEmbeddedEtcdInstance creates an embedded etcd instance for testing,
-// listening on random open ports. Returns the backend config and a cleanup
-// func that will stop the etcd instance.
-func NewEmbeddedEtcdInstance(path string) (*BackendConfig, func(), er.R) {
- cfg := embed.NewConfig()
- cfg.Dir = path
-
- // To ensure that we can submit large transactions.
- cfg.MaxTxnOps = 8192
- cfg.MaxRequestBytes = 16384 * 1024
-
- // Listen on random free ports.
- clientURL := fmt.Sprintf("127.0.0.1:%d", getFreePort())
- peerURL := fmt.Sprintf("127.0.0.1:%d", getFreePort())
- cfg.LCUrls = []url.URL{{Host: clientURL}}
- cfg.LPUrls = []url.URL{{Host: peerURL}}
-
- etcd, err := embed.StartEtcd(cfg)
- if err != nil {
- return nil, nil, err
- }
-
- select {
- case <-etcd.Server.ReadyNotify():
- case <-time.After(readyTimeout):
- etcd.Close()
- return nil, nil,
- er.Errorf("etcd failed to start after: %v", readyTimeout)
- }
-
- ctx, cancel := context.WithCancel(context.Background())
-
- connConfig := &BackendConfig{
- Ctx: ctx,
- Host: "http://" + peerURL,
- User: "user",
- Pass: "pass",
- InsecureSkipVerify: true,
- }
-
- return connConfig, func() {
- cancel()
- etcd.Close()
- }, nil
-}
diff --git a/lnd/channeldb/kvdb/etcd/fixture_test.go b/lnd/channeldb/kvdb/etcd/fixture_test.go
deleted file mode 100644
index 56526906..00000000
--- a/lnd/channeldb/kvdb/etcd/fixture_test.go
+++ /dev/null
@@ -1,129 +0,0 @@
-// +build kvdb_etcd
-
-package etcd
-
-import (
- "context"
- "io/ioutil"
- "os"
- "testing"
- "time"
-
- "github.com/coreos/etcd/clientv3"
-)
-
-const (
- // testEtcdTimeout is used for all RPC calls initiated by the test fixture.
- testEtcdTimeout = 5 * time.Second
-)
-
-// EtcdTestFixture holds internal state of the etcd test fixture.
-type EtcdTestFixture struct {
- t *testing.T
- cli *clientv3.Client
- config *BackendConfig
- cleanup func()
-}
-
-// NewTestEtcdInstance creates an embedded etcd instance for testing, listening
-// on random open ports. Returns the connection config and a cleanup func that
-// will stop the etcd instance.
-func NewTestEtcdInstance(t *testing.T, path string) (*BackendConfig, func()) {
- t.Helper()
-
- config, cleanup, err := NewEmbeddedEtcdInstance(path)
- if err != nil {
- t.Fatalf("error while staring embedded etcd instance: %v", err)
- }
-
- return config, cleanup
-}
-
-// NewTestEtcdTestFixture creates a new etcd-test fixture. This is helper
-// object to facilitate etcd tests and ensure pre and post conditions.
-func NewEtcdTestFixture(t *testing.T) *EtcdTestFixture {
- tmpDir, err := ioutil.TempDir("", "etcd")
- if err != nil {
- t.Fatalf("unable to create temp dir: %v", err)
- }
-
- config, etcdCleanup := NewTestEtcdInstance(t, tmpDir)
-
- cli, err := clientv3.New(clientv3.Config{
- Endpoints: []string{config.Host},
- Username: config.User,
- Password: config.Pass,
- })
- if err != nil {
- os.RemoveAll(tmpDir)
- t.Fatalf("unable to create etcd test fixture: %v", err)
- }
-
- return &EtcdTestFixture{
- t: t,
- cli: cli,
- config: config,
- cleanup: func() {
- etcdCleanup()
- os.RemoveAll(tmpDir)
- },
- }
-}
-
-// Put puts a string key/value into the test etcd database.
-func (f *EtcdTestFixture) Put(key, value string) {
- ctx, cancel := context.WithTimeout(context.TODO(), testEtcdTimeout)
- defer cancel()
-
- _, err := f.cli.Put(ctx, key, value)
- if err != nil {
- f.t.Fatalf("etcd test fixture failed to put: %v", err)
- }
-}
-
-// Get queries a key and returns the stored value from the test etcd database.
-func (f *EtcdTestFixture) Get(key string) string {
- ctx, cancel := context.WithTimeout(context.TODO(), testEtcdTimeout)
- defer cancel()
-
- resp, err := f.cli.Get(ctx, key)
- if err != nil {
- f.t.Fatalf("etcd test fixture failed to put: %v", err)
- }
-
- if len(resp.Kvs) > 0 {
- return string(resp.Kvs[0].Value)
- }
-
- return ""
-}
-
-// Dump scans and returns all key/values from the test etcd database.
-func (f *EtcdTestFixture) Dump() map[string]string {
- ctx, cancel := context.WithTimeout(context.TODO(), testEtcdTimeout)
- defer cancel()
-
- resp, err := f.cli.Get(ctx, "", clientv3.WithPrefix())
- if err != nil {
- f.t.Fatalf("etcd test fixture failed to put: %v", err)
- }
-
- result := make(map[string]string)
- for _, kv := range resp.Kvs {
- result[string(kv.Key)] = string(kv.Value)
- }
-
- return result
-}
-
-// BackendConfig returns the backend config for connecting to theembedded
-// etcd instance.
-func (f *EtcdTestFixture) BackendConfig() BackendConfig {
- return *f.config
-}
-
-// Cleanup should be called at test fixture teardown to stop the embedded
-// etcd instance and remove all temp db files form the filesystem.
-func (f *EtcdTestFixture) Cleanup() {
- f.cleanup()
-}
diff --git a/lnd/channeldb/kvdb/etcd/readwrite_bucket.go b/lnd/channeldb/kvdb/etcd/readwrite_bucket.go
deleted file mode 100644
index 373f90b9..00000000
--- a/lnd/channeldb/kvdb/etcd/readwrite_bucket.go
+++ /dev/null
@@ -1,357 +0,0 @@
-// +build kvdb_etcd
-
-package etcd
-
-import (
- "strconv"
-
- "github.com/pkt-cash/pktd/btcutil/er"
- "github.com/pkt-cash/pktd/pktwallet/walletdb"
-)
-
-// readWriteBucket stores the bucket id and the buckets transaction.
-type readWriteBucket struct {
- // id is used to identify the bucket and is created by
- // hashing the parent id with the bucket key. For each key/value,
- // sub-bucket or the bucket sequence the bucket id is used with the
- // appropriate prefix to prefix the key.
- id []byte
-
- // tx holds the parent transaction.
- tx *readWriteTx
-}
-
-// newReadWriteBucket creates a new rw bucket with the passed transaction
-// and bucket id.
-func newReadWriteBucket(tx *readWriteTx, key, id []byte) *readWriteBucket {
- return &readWriteBucket{
- id: id,
- tx: tx,
- }
-}
-
-// NestedReadBucket retrieves a nested read bucket with the given key.
-// Returns nil if the bucket does not exist.
-func (b *readWriteBucket) NestedReadBucket(key []byte) walletdb.ReadBucket {
- return b.NestedReadWriteBucket(key)
-}
-
-// ForEach invokes the passed function with every key/value pair in
-// the bucket. This includes nested buckets, in which case the value
-// is nil, but it does not include the key/value pairs within those
-// nested buckets.
-func (b *readWriteBucket) ForEach(cb func(k, v []byte) er.R) er.R {
- prefix := string(b.id)
-
- // Get the first matching key that is in the bucket.
- kv, err := b.tx.stm.First(prefix)
- if err != nil {
- return err
- }
-
- for kv != nil {
- key, val := getKeyVal(kv)
-
- if err := cb(key, val); err != nil {
- return err
- }
-
- // Step to the next key.
- kv, err = b.tx.stm.Next(prefix, kv.key)
- if err != nil {
- return err
- }
- }
-
- return nil
-}
-
-// Get returns the value for the given key. Returns nil if the key does
-// not exist in this bucket.
-func (b *readWriteBucket) Get(key []byte) []byte {
- // Return nil if the key is empty.
- if len(key) == 0 {
- return nil
- }
-
- // Fetch the associated value.
- val, err := b.tx.stm.Get(string(makeValueKey(b.id, key)))
- if err != nil {
- // TODO: we should return the error once the
- // kvdb inteface is extended.
- return nil
- }
-
- if val == nil {
- return nil
- }
-
- return val
-}
-
-func (b *readWriteBucket) ReadCursor() walletdb.ReadCursor {
- return newReadWriteCursor(b)
-}
-
-// NestedReadWriteBucket retrieves a nested bucket with the given key.
-// Returns nil if the bucket does not exist.
-func (b *readWriteBucket) NestedReadWriteBucket(key []byte) walletdb.ReadWriteBucket {
- if len(key) == 0 {
- return nil
- }
-
- // Get the bucket id (and return nil if bucket doesn't exist).
- bucketKey := makeBucketKey(b.id, key)
- bucketVal, err := b.tx.stm.Get(string(bucketKey))
- if err != nil {
- // TODO: we should return the error once the
- // kvdb inteface is extended.
- return nil
- }
-
- if !isValidBucketID(bucketVal) {
- return nil
- }
-
- // Return the bucket with the fetched bucket id.
- return newReadWriteBucket(b.tx, bucketKey, bucketVal)
-}
-
-// assertNoValue checks if the value for the passed key exists.
-func (b *readWriteBucket) assertNoValue(key []byte) er.R {
- val, err := b.tx.stm.Get(string(makeValueKey(b.id, key)))
- if err != nil {
- return err
- }
-
- if val != nil {
- return walletdb.ErrIncompatibleValue.Default()
- }
-
- return nil
-}
-
-// CreateBucket creates and returns a new nested bucket with the given
-// key. Returns ErrBucketExists if the bucket already exists,
-// ErrBucketNameRequired if the key is empty, or ErrIncompatibleValue
-// if the key value is otherwise invalid for the particular database
-// implementation. Other errors are possible depending on the
-// implementation.
-func (b *readWriteBucket) CreateBucket(key []byte) (
- walletdb.ReadWriteBucket, er.R) {
-
- if len(key) == 0 {
- return nil, walletdb.ErrBucketNameRequired.Default()
- }
-
- // Check if the bucket already exists.
- bucketKey := makeBucketKey(b.id, key)
-
- bucketVal, err := b.tx.stm.Get(string(bucketKey))
- if err != nil {
- return nil, err
- }
-
- if isValidBucketID(bucketVal) {
- return nil, walletdb.ErrBucketExists.Default()
- }
-
- if err := b.assertNoValue(key); err != nil {
- return nil, err
- }
-
- // Create a deterministic bucket id from the bucket key.
- newID := makeBucketID(bucketKey)
-
- // Create the bucket.
- b.tx.stm.Put(string(bucketKey), string(newID[:]))
-
- return newReadWriteBucket(b.tx, bucketKey, newID[:]), nil
-}
-
-// CreateBucketIfNotExists creates and returns a new nested bucket with
-// the given key if it does not already exist. Returns
-// ErrBucketNameRequired if the key is empty or ErrIncompatibleValue
-// if the key value is otherwise invalid for the particular database
-// backend. Other errors are possible depending on the implementation.
-func (b *readWriteBucket) CreateBucketIfNotExists(key []byte) (
- walletdb.ReadWriteBucket, er.R) {
-
- if len(key) == 0 {
- return nil, walletdb.ErrBucketNameRequired.Default()
- }
-
- // Check for the bucket and create if it doesn't exist.
- bucketKey := makeBucketKey(b.id, key)
-
- bucketVal, err := b.tx.stm.Get(string(bucketKey))
- if err != nil {
- return nil, err
- }
-
- if !isValidBucketID(bucketVal) {
- if err := b.assertNoValue(key); err != nil {
- return nil, err
- }
-
- newID := makeBucketID(bucketKey)
- b.tx.stm.Put(string(bucketKey), string(newID[:]))
-
- return newReadWriteBucket(b.tx, bucketKey, newID[:]), nil
- }
-
- // Otherwise return the bucket with the fetched bucket id.
- return newReadWriteBucket(b.tx, bucketKey, bucketVal), nil
-}
-
-// DeleteNestedBucket deletes the nested bucket and its sub-buckets
-// pointed to by the passed key. All values in the bucket and sub-buckets
-// will be deleted as well.
-func (b *readWriteBucket) DeleteNestedBucket(key []byte) er.R {
- // TODO shouldn't empty key return ErrBucketNameRequired ?
- if len(key) == 0 {
- return walletdb.ErrIncompatibleValue.Default()
- }
-
- // Get the bucket first.
- bucketKey := string(makeBucketKey(b.id, key))
-
- bucketVal, err := b.tx.stm.Get(bucketKey)
- if err != nil {
- return err
- }
-
- if !isValidBucketID(bucketVal) {
- return walletdb.ErrBucketNotFound.Default()
- }
-
- // Enqueue the top level bucket id.
- queue := [][]byte{bucketVal}
-
- // Traverse the buckets breadth first.
- for len(queue) != 0 {
- if !isValidBucketID(queue[0]) {
- return walletdb.ErrBucketNotFound.Default()
- }
-
- id := queue[0]
- queue = queue[1:]
-
- kv, err := b.tx.stm.First(string(id))
- if err != nil {
- return err
- }
-
- for kv != nil {
- b.tx.stm.Del(kv.key)
-
- if isBucketKey(kv.key) {
- queue = append(queue, []byte(kv.val))
- }
-
- kv, err = b.tx.stm.Next(string(id), kv.key)
- if err != nil {
- return err
- }
- }
-
- // Finally delete the sequence key for the bucket.
- b.tx.stm.Del(string(makeSequenceKey(id)))
- }
-
- // Delete the top level bucket and sequence key.
- b.tx.stm.Del(bucketKey)
- b.tx.stm.Del(string(makeSequenceKey(bucketVal)))
-
- return nil
-}
-
-// Put updates the value for the passed key.
-// Returns ErrKeyRequred if te passed key is empty.
-func (b *readWriteBucket) Put(key, value []byte) er.R {
- if len(key) == 0 {
- return walletdb.ErrKeyRequired.Default()
- }
-
- val, err := b.tx.stm.Get(string(makeBucketKey(b.id, key)))
- if err != nil {
- return err
- }
-
- if val != nil {
- return walletdb.ErrIncompatibleValue.Default()
- }
-
- // Update the transaction with the new value.
- b.tx.stm.Put(string(makeValueKey(b.id, key)), string(value))
-
- return nil
-}
-
-// Delete deletes the key/value pointed to by the passed key.
-// Returns ErrKeyRequred if the passed key is empty.
-func (b *readWriteBucket) Delete(key []byte) er.R {
- if key == nil {
- return nil
- }
- if len(key) == 0 {
- return walletdb.ErrKeyRequired.Default()
- }
-
- // Update the transaction to delete the key/value.
- b.tx.stm.Del(string(makeValueKey(b.id, key)))
-
- return nil
-}
-
-// ReadWriteCursor returns a new read-write cursor for this bucket.
-func (b *readWriteBucket) ReadWriteCursor() walletdb.ReadWriteCursor {
- return newReadWriteCursor(b)
-}
-
-// Tx returns the buckets transaction.
-func (b *readWriteBucket) Tx() walletdb.ReadWriteTx {
- return b.tx
-}
-
-// NextSequence returns an autoincrementing sequence number for this bucket.
-// Note that this is not a thread safe function and as such it must not be used
-// for synchronization.
-func (b *readWriteBucket) NextSequence() (uint64, er.R) {
- seq := b.Sequence() + 1
-
- return seq, b.SetSequence(seq)
-}
-
-// SetSequence updates the sequence number for the bucket.
-func (b *readWriteBucket) SetSequence(v uint64) er.R {
- // Convert the number to string.
- val := strconv.FormatUint(v, 10)
-
- // Update the transaction with the new value for the sequence key.
- b.tx.stm.Put(string(makeSequenceKey(b.id)), val)
-
- return nil
-}
-
-// Sequence returns the current sequence number for this bucket without
-// incrementing it.
-func (b *readWriteBucket) Sequence() uint64 {
- val, err := b.tx.stm.Get(string(makeSequenceKey(b.id)))
- if err != nil {
- // TODO: This update kvdb interface such that error
- // may be returned here.
- return 0
- }
-
- if val == nil {
- // If the sequence number is not yet
- // stored, then take the default value.
- return 0
- }
-
- // Otherwise try to parse a 64 bit unsigned integer from the value.
- num, _ := strconv.ParseUint(string(val), 10, 64)
-
- return num
-}
diff --git a/lnd/channeldb/kvdb/etcd/readwrite_bucket_test.go b/lnd/channeldb/kvdb/etcd/readwrite_bucket_test.go
deleted file mode 100644
index dd846986..00000000
--- a/lnd/channeldb/kvdb/etcd/readwrite_bucket_test.go
+++ /dev/null
@@ -1,523 +0,0 @@
-// +build kvdb_etcd
-
-package etcd
-
-import (
- "math"
- "testing"
-
- "github.com/pkt-cash/pktd/btcutil/er"
- "github.com/pkt-cash/pktd/pktwallet/walletdb"
- "github.com/stretchr/testify/require"
-)
-
-func TestBucketCreation(t *testing.T) {
- t.Parallel()
-
- f := NewEtcdTestFixture(t)
- defer f.Cleanup()
-
- db, err := newEtcdBackend(f.BackendConfig())
- util.RequireNoErr(t, err)
-
- err = db.Update(func(tx walletdb.ReadWriteTx) er.R {
- // empty bucket name
- b, err := tx.CreateTopLevelBucket(nil)
- util.RequireErr(t, walletdb.ErrBucketNameRequired, err)
- require.Nil(t, b)
-
- // empty bucket name
- b, err = tx.CreateTopLevelBucket([]byte(""))
- util.RequireErr(t, walletdb.ErrBucketNameRequired, err)
- require.Nil(t, b)
-
- // "apple"
- apple, err := tx.CreateTopLevelBucket([]byte("apple"))
- util.RequireNoErr(t, err)
- require.NotNil(t, apple)
-
- // Check bucket tx.
- require.Equal(t, tx, apple.Tx())
-
- // "apple" already created
- b, err = tx.CreateTopLevelBucket([]byte("apple"))
- util.RequireNoErr(t, err)
- require.NotNil(t, b)
-
- // "apple/banana"
- banana, err := apple.CreateBucket([]byte("banana"))
- util.RequireNoErr(t, err)
- require.NotNil(t, banana)
-
- banana, err = apple.CreateBucketIfNotExists([]byte("banana"))
- util.RequireNoErr(t, err)
- require.NotNil(t, banana)
-
- // Try creating "apple/banana" again
- b, err = apple.CreateBucket([]byte("banana"))
- util.RequireErr(t, walletdb.ErrBucketExists, err)
- require.Nil(t, b)
-
- // "apple/mango"
- mango, err := apple.CreateBucket([]byte("mango"))
- require.Nil(t, err)
- require.NotNil(t, mango)
-
- // "apple/banana/pear"
- pear, err := banana.CreateBucket([]byte("pear"))
- require.Nil(t, err)
- require.NotNil(t, pear)
-
- // empty bucket
- require.Nil(t, apple.NestedReadWriteBucket(nil))
- require.Nil(t, apple.NestedReadWriteBucket([]byte("")))
-
- // "apple/pear" doesn't exist
- require.Nil(t, apple.NestedReadWriteBucket([]byte("pear")))
-
- // "apple/banana" exits
- require.NotNil(t, apple.NestedReadWriteBucket([]byte("banana")))
- require.NotNil(t, apple.NestedReadBucket([]byte("banana")))
- return nil
- }, func() {})
-
- require.Nil(t, err)
-
- expected := map[string]string{
- bkey("apple"): bval("apple"),
- bkey("apple", "banana"): bval("apple", "banana"),
- bkey("apple", "mango"): bval("apple", "mango"),
- bkey("apple", "banana", "pear"): bval("apple", "banana", "pear"),
- }
- require.Equal(t, expected, f.Dump())
-}
-
-func TestBucketDeletion(t *testing.T) {
- t.Parallel()
-
- f := NewEtcdTestFixture(t)
- defer f.Cleanup()
-
- db, err := newEtcdBackend(f.BackendConfig())
- util.RequireNoErr(t, err)
-
- err = db.Update(func(tx walletdb.ReadWriteTx) er.R {
- // "apple"
- apple, err := tx.CreateTopLevelBucket([]byte("apple"))
- require.Nil(t, err)
- require.NotNil(t, apple)
-
- // "apple/banana"
- banana, err := apple.CreateBucket([]byte("banana"))
- require.Nil(t, err)
- require.NotNil(t, banana)
-
- kvs := []KV{{"key1", "val1"}, {"key2", "val2"}, {"key3", "val3"}}
-
- for _, kv := range kvs {
- util.RequireNoErr(t, banana.Put([]byte(kv.key), []byte(kv.val)))
- require.Equal(t, []byte(kv.val), banana.Get([]byte(kv.key)))
- }
-
- // Delete a k/v from "apple/banana"
- util.RequireNoErr(t, banana.Delete([]byte("key2")))
- // Try getting/putting/deleting invalid k/v's.
- require.Nil(t, banana.Get(nil))
- util.RequireErr(t, walletdb.ErrKeyRequired, banana.Put(nil, []byte("val")))
- util.RequireErr(t, walletdb.ErrKeyRequired, banana.Delete(nil))
-
- // Try deleting a k/v that doesn't exist.
- util.RequireNoErr(t, banana.Delete([]byte("nokey")))
-
- // "apple/pear"
- pear, err := apple.CreateBucket([]byte("pear"))
- require.Nil(t, err)
- require.NotNil(t, pear)
-
- // Put some values into "apple/pear"
- for _, kv := range kvs {
- require.Nil(t, pear.Put([]byte(kv.key), []byte(kv.val)))
- require.Equal(t, []byte(kv.val), pear.Get([]byte(kv.key)))
- }
-
- // Create nested bucket "apple/pear/cherry"
- cherry, err := pear.CreateBucket([]byte("cherry"))
- require.Nil(t, err)
- require.NotNil(t, cherry)
-
- // Put some values into "apple/pear/cherry"
- for _, kv := range kvs {
- util.RequireNoErr(t, cherry.Put([]byte(kv.key), []byte(kv.val)))
- }
-
- // Read back values in "apple/pear/cherry" trough a read bucket.
- cherryReadBucket := pear.NestedReadBucket([]byte("cherry"))
- for _, kv := range kvs {
- require.Equal(
- t, []byte(kv.val),
- cherryReadBucket.Get([]byte(kv.key)),
- )
- }
-
- // Try deleting some invalid buckets.
- util.RequireErr(t,
- walletdb.ErrBucketNameRequired, apple.DeleteNestedBucket(nil),
- )
-
- // Try deleting a non existing bucket.
- util.RequireErr(
- t,
- walletdb.ErrBucketNotFound,
- apple.DeleteNestedBucket([]byte("missing")),
- )
-
- // Delete "apple/pear"
- require.Nil(t, apple.DeleteNestedBucket([]byte("pear")))
-
- // "apple/pear" deleted
- require.Nil(t, apple.NestedReadWriteBucket([]byte("pear")))
-
- // "apple/pear/cherry" deleted
- require.Nil(t, pear.NestedReadWriteBucket([]byte("cherry")))
-
- // Values deleted too.
- for _, kv := range kvs {
- require.Nil(t, pear.Get([]byte(kv.key)))
- require.Nil(t, cherry.Get([]byte(kv.key)))
- }
-
- // "aple/banana" exists
- require.NotNil(t, apple.NestedReadWriteBucket([]byte("banana")))
- return nil
- }, func() {})
-
- require.Nil(t, err)
-
- expected := map[string]string{
- bkey("apple"): bval("apple"),
- bkey("apple", "banana"): bval("apple", "banana"),
- vkey("key1", "apple", "banana"): "val1",
- vkey("key3", "apple", "banana"): "val3",
- }
- require.Equal(t, expected, f.Dump())
-}
-
-func TestBucketForEach(t *testing.T) {
- t.Parallel()
-
- f := NewEtcdTestFixture(t)
- defer f.Cleanup()
-
- db, err := newEtcdBackend(f.BackendConfig())
- util.RequireNoErr(t, err)
-
- err = db.Update(func(tx walletdb.ReadWriteTx) er.R {
- // "apple"
- apple, err := tx.CreateTopLevelBucket([]byte("apple"))
- require.Nil(t, err)
- require.NotNil(t, apple)
-
- // "apple/banana"
- banana, err := apple.CreateBucket([]byte("banana"))
- require.Nil(t, err)
- require.NotNil(t, banana)
-
- kvs := []KV{{"key1", "val1"}, {"key2", "val2"}, {"key3", "val3"}}
-
- // put some values into "apple" and "apple/banana" too
- for _, kv := range kvs {
- require.Nil(t, apple.Put([]byte(kv.key), []byte(kv.val)))
- require.Equal(t, []byte(kv.val), apple.Get([]byte(kv.key)))
-
- require.Nil(t, banana.Put([]byte(kv.key), []byte(kv.val)))
- require.Equal(t, []byte(kv.val), banana.Get([]byte(kv.key)))
- }
-
- got := make(map[string]string)
- err = apple.ForEach(func(key, val []byte) er.R {
- got[string(key)] = string(val)
- return nil
- })
-
- expected := map[string]string{
- "key1": "val1",
- "key2": "val2",
- "key3": "val3",
- "banana": "",
- }
-
- util.RequireNoErr(t, err)
- require.Equal(t, expected, got)
-
- got = make(map[string]string)
- err = banana.ForEach(func(key, val []byte) er.R {
- got[string(key)] = string(val)
- return nil
- })
-
- util.RequireNoErr(t, err)
- // remove the sub-bucket key
- delete(expected, "banana")
- require.Equal(t, expected, got)
-
- return nil
- }, func() {})
-
- require.Nil(t, err)
-
- expected := map[string]string{
- bkey("apple"): bval("apple"),
- bkey("apple", "banana"): bval("apple", "banana"),
- vkey("key1", "apple"): "val1",
- vkey("key2", "apple"): "val2",
- vkey("key3", "apple"): "val3",
- vkey("key1", "apple", "banana"): "val1",
- vkey("key2", "apple", "banana"): "val2",
- vkey("key3", "apple", "banana"): "val3",
- }
- require.Equal(t, expected, f.Dump())
-}
-
-func TestBucketForEachWithError(t *testing.T) {
- t.Parallel()
-
- f := NewEtcdTestFixture(t)
- defer f.Cleanup()
-
- db, err := newEtcdBackend(f.BackendConfig())
- util.RequireNoErr(t, err)
-
- err = db.Update(func(tx walletdb.ReadWriteTx) er.R {
- // "apple"
- apple, err := tx.CreateTopLevelBucket([]byte("apple"))
- require.Nil(t, err)
- require.NotNil(t, apple)
-
- // "apple/banana"
- banana, err := apple.CreateBucket([]byte("banana"))
- require.Nil(t, err)
- require.NotNil(t, banana)
-
- // "apple/pear"
- pear, err := apple.CreateBucket([]byte("pear"))
- require.Nil(t, err)
- require.NotNil(t, pear)
-
- kvs := []KV{{"key1", "val1"}, {"key2", "val2"}}
-
- // Put some values into "apple" and "apple/banana" too.
- for _, kv := range kvs {
- require.Nil(t, apple.Put([]byte(kv.key), []byte(kv.val)))
- require.Equal(t, []byte(kv.val), apple.Get([]byte(kv.key)))
- }
-
- got := make(map[string]string)
- i := 0
- // Error while iterating value keys.
- err = apple.ForEach(func(key, val []byte) er.R {
- if i == 2 {
- return er.Errorf("error")
- }
-
- got[string(key)] = string(val)
- i++
- return nil
- })
-
- expected := map[string]string{
- "banana": "",
- "key1": "val1",
- }
-
- require.Equal(t, expected, got)
- util.RequireErr(t, err)
-
- got = make(map[string]string)
- i = 0
- // Erro while iterating buckets.
- err = apple.ForEach(func(key, val []byte) er.R {
- if i == 3 {
- return er.Errorf("error")
- }
-
- got[string(key)] = string(val)
- i++
- return nil
- })
-
- expected = map[string]string{
- "banana": "",
- "key1": "val1",
- "key2": "val2",
- }
-
- require.Equal(t, expected, got)
- util.RequireErr(t, err)
- return nil
- }, func() {})
-
- require.Nil(t, err)
-
- expected := map[string]string{
- bkey("apple"): bval("apple"),
- bkey("apple", "banana"): bval("apple", "banana"),
- bkey("apple", "pear"): bval("apple", "pear"),
- vkey("key1", "apple"): "val1",
- vkey("key2", "apple"): "val2",
- }
- require.Equal(t, expected, f.Dump())
-}
-
-func TestBucketSequence(t *testing.T) {
- t.Parallel()
-
- f := NewEtcdTestFixture(t)
- defer f.Cleanup()
-
- db, err := newEtcdBackend(f.BackendConfig())
- util.RequireNoErr(t, err)
-
- err = db.Update(func(tx walletdb.ReadWriteTx) er.R {
- apple, err := tx.CreateTopLevelBucket([]byte("apple"))
- require.Nil(t, err)
- require.NotNil(t, apple)
-
- banana, err := apple.CreateBucket([]byte("banana"))
- require.Nil(t, err)
- require.NotNil(t, banana)
-
- require.Equal(t, uint64(0), apple.Sequence())
- require.Equal(t, uint64(0), banana.Sequence())
-
- require.Nil(t, apple.SetSequence(math.MaxUint64))
- require.Equal(t, uint64(math.MaxUint64), apple.Sequence())
-
- for i := uint64(0); i < uint64(5); i++ {
- s, err := apple.NextSequence()
- require.Nil(t, err)
- require.Equal(t, i, s)
- }
-
- return nil
- }, func() {})
-
- require.Nil(t, err)
-}
-
-// TestKeyClash tests that one cannot create a bucket if a value with the same
-// key exists and the same is true in reverse: that a value cannot be put if
-// a bucket with the same key exists.
-func TestKeyClash(t *testing.T) {
- t.Parallel()
-
- f := NewEtcdTestFixture(t)
- defer f.Cleanup()
-
- db, err := newEtcdBackend(f.BackendConfig())
- util.RequireNoErr(t, err)
-
- // First:
- // put: /apple/key -> val
- // create bucket: /apple/banana
- err = db.Update(func(tx walletdb.ReadWriteTx) er.R {
- apple, err := tx.CreateTopLevelBucket([]byte("apple"))
- require.Nil(t, err)
- require.NotNil(t, apple)
-
- util.RequireNoErr(t, apple.Put([]byte("key"), []byte("val")))
-
- banana, err := apple.CreateBucket([]byte("banana"))
- require.Nil(t, err)
- require.NotNil(t, banana)
-
- return nil
- }, func() {})
-
- require.Nil(t, err)
-
- // Next try to:
- // put: /apple/banana -> val => will fail (as /apple/banana is a bucket)
- // create bucket: /apple/key => will fail (as /apple/key is a value)
- err = db.Update(func(tx walletdb.ReadWriteTx) er.R {
- apple, err := tx.CreateTopLevelBucket([]byte("apple"))
- require.Nil(t, err)
- require.NotNil(t, apple)
-
- util.RequireErr(t,
- walletdb.ErrIncompatibleValue,
- apple.Put([]byte("banana"), []byte("val")),
- )
-
- b, err := apple.CreateBucket([]byte("key"))
- require.Nil(t, b)
- util.RequireErr(t, walletdb.ErrIncompatibleValue, b)
-
- b, err = apple.CreateBucketIfNotExists([]byte("key"))
- require.Nil(t, b)
- util.RequireErr(t, walletdb.ErrIncompatibleValue, b)
-
- return nil
- }, func() {})
-
- require.Nil(t, err)
-
- // Except that the only existing items in the db are:
- // bucket: /apple
- // bucket: /apple/banana
- // value: /apple/key -> val
- expected := map[string]string{
- bkey("apple"): bval("apple"),
- bkey("apple", "banana"): bval("apple", "banana"),
- vkey("key", "apple"): "val",
- }
- require.Equal(t, expected, f.Dump())
-
-}
-
-// TestBucketCreateDelete tests that creating then deleting then creating a
-// bucket suceeds.
-func TestBucketCreateDelete(t *testing.T) {
- t.Parallel()
- f := NewEtcdTestFixture(t)
- defer f.Cleanup()
-
- db, err := newEtcdBackend(f.BackendConfig())
- util.RequireNoErr(t, err)
-
- err = db.Update(func(tx walletdb.ReadWriteTx) er.R {
- apple, err := tx.CreateTopLevelBucket([]byte("apple"))
- util.RequireNoErr(t, err)
- require.NotNil(t, apple)
-
- banana, err := apple.CreateBucket([]byte("banana"))
- util.RequireNoErr(t, err)
- require.NotNil(t, banana)
-
- return nil
- }, func() {})
- util.RequireNoErr(t, err)
-
- err = db.Update(func(tx walletdb.ReadWriteTx) er.R {
- apple := tx.ReadWriteBucket([]byte("apple"))
- require.NotNil(t, apple)
- util.RequireNoErr(t, apple.DeleteNestedBucket([]byte("banana")))
-
- return nil
- }, func() {})
- util.RequireNoErr(t, err)
-
- err = db.Update(func(tx walletdb.ReadWriteTx) er.R {
- apple := tx.ReadWriteBucket([]byte("apple"))
- require.NotNil(t, apple)
- util.RequireNoErr(t, apple.Put([]byte("banana"), []byte("value")))
-
- return nil
- }, func() {})
- util.RequireNoErr(t, err)
-
- expected := map[string]string{
- vkey("banana", "apple"): "value",
- bkey("apple"): bval("apple"),
- }
- require.Equal(t, expected, f.Dump())
-}
diff --git a/lnd/channeldb/kvdb/etcd/readwrite_cursor.go b/lnd/channeldb/kvdb/etcd/readwrite_cursor.go
deleted file mode 100644
index 251b4c67..00000000
--- a/lnd/channeldb/kvdb/etcd/readwrite_cursor.go
+++ /dev/null
@@ -1,143 +0,0 @@
-// +build kvdb_etcd
-
-package etcd
-
-// readWriteCursor holds a reference to the cursors bucket, the value
-// prefix and the current key used while iterating.
-type readWriteCursor struct {
- // bucket holds the reference to the parent bucket.
- bucket *readWriteBucket
-
- // prefix holds the value prefix which is in front of each
- // value key in the bucket.
- prefix string
-
- // currKey holds the current key of the cursor.
- currKey string
-}
-
-func newReadWriteCursor(bucket *readWriteBucket) *readWriteCursor {
- return &readWriteCursor{
- bucket: bucket,
- prefix: string(bucket.id),
- }
-}
-
-// First positions the cursor at the first key/value pair and returns
-// the pair.
-func (c *readWriteCursor) First() (key, value []byte) {
- // Get the first key with the value prefix.
- kv, err := c.bucket.tx.stm.First(c.prefix)
- if err != nil {
- // TODO: revise this once kvdb interface supports errors
- return nil, nil
- }
-
- if kv != nil {
- c.currKey = kv.key
- return getKeyVal(kv)
- }
-
- return nil, nil
-}
-
-// Last positions the cursor at the last key/value pair and returns the
-// pair.
-func (c *readWriteCursor) Last() (key, value []byte) {
- kv, err := c.bucket.tx.stm.Last(c.prefix)
- if err != nil {
- // TODO: revise this once kvdb interface supports errors
- return nil, nil
- }
-
- if kv != nil {
- c.currKey = kv.key
- return getKeyVal(kv)
- }
-
- return nil, nil
-}
-
-// Next moves the cursor one key/value pair forward and returns the new
-// pair.
-func (c *readWriteCursor) Next() (key, value []byte) {
- kv, err := c.bucket.tx.stm.Next(c.prefix, c.currKey)
- if err != nil {
- // TODO: revise this once kvdb interface supports errors
- return nil, nil
- }
-
- if kv != nil {
- c.currKey = kv.key
- return getKeyVal(kv)
- }
-
- return nil, nil
-}
-
-// Prev moves the cursor one key/value pair backward and returns the new
-// pair.
-func (c *readWriteCursor) Prev() (key, value []byte) {
- kv, err := c.bucket.tx.stm.Prev(c.prefix, c.currKey)
- if err != nil {
- // TODO: revise this once kvdb interface supports errors
- return nil, nil
- }
-
- if kv != nil {
- c.currKey = kv.key
- return getKeyVal(kv)
- }
-
- return nil, nil
-}
-
-// Seek positions the cursor at the passed seek key. If the key does
-// not exist, the cursor is moved to the next key after seek. Returns
-// the new pair.
-func (c *readWriteCursor) Seek(seek []byte) (key, value []byte) {
- // Return nil if trying to seek to an empty key.
- if seek == nil {
- return nil, nil
- }
-
- // Seek to the first key with prefix + seek. If that key is not present
- // STM will seek to the next matching key with prefix.
- kv, err := c.bucket.tx.stm.Seek(c.prefix, c.prefix+string(seek))
- if err != nil {
- // TODO: revise this once kvdb interface supports errors
- return nil, nil
- }
-
- if kv != nil {
- c.currKey = kv.key
- return getKeyVal(kv)
- }
-
- return nil, nil
-}
-
-// Delete removes the current key/value pair the cursor is at without
-// invalidating the cursor. Returns ErrIncompatibleValue if attempted
-// when the cursor points to a nested bucket.
-func (c *readWriteCursor) Delete() er.R {
- // Get the next key after the current one. We could do this
- // after deletion too but it's one step more efficient here.
- nextKey, err := c.bucket.tx.stm.Next(c.prefix, c.currKey)
- if err != nil {
- return err
- }
-
- if isBucketKey(c.currKey) {
- c.bucket.DeleteNestedBucket(getKey(c.currKey))
- } else {
- c.bucket.Delete(getKey(c.currKey))
- }
-
- if nextKey != nil {
- // Set current key to the next one.
- c.currKey = nextKey.key
- }
-
- return nil
-}
diff --git a/lnd/channeldb/kvdb/etcd/readwrite_cursor_test.go b/lnd/channeldb/kvdb/etcd/readwrite_cursor_test.go
deleted file mode 100644
index 7fc3a3c2..00000000
--- a/lnd/channeldb/kvdb/etcd/readwrite_cursor_test.go
+++ /dev/null
@@ -1,369 +0,0 @@
-// +build kvdb_etcd
-
-package etcd
-
-import (
- "testing"
-
- "github.com/pkt-cash/pktd/btcutil/er"
- "github.com/pkt-cash/pktd/pktwallet/walletdb"
- "github.com/stretchr/testify/require"
-)
-
-func TestReadCursorEmptyInterval(t *testing.T) {
- t.Parallel()
-
- f := NewEtcdTestFixture(t)
- defer f.Cleanup()
-
- db, err := newEtcdBackend(f.BackendConfig())
- util.RequireNoErr(t, err)
-
- err = db.Update(func(tx walletdb.ReadWriteTx) er.R {
- b, err := tx.CreateTopLevelBucket([]byte("apple"))
- util.RequireNoErr(t, err)
- require.NotNil(t, b)
-
- return nil
- }, func() {})
- util.RequireNoErr(t, err)
-
- err = db.View(func(tx walletdb.ReadTx) er.R {
- b := tx.ReadBucket([]byte("apple"))
- require.NotNil(t, b)
-
- cursor := b.ReadCursor()
- k, v := cursor.First()
- require.Nil(t, k)
- require.Nil(t, v)
-
- k, v = cursor.Next()
- require.Nil(t, k)
- require.Nil(t, v)
-
- k, v = cursor.Last()
- require.Nil(t, k)
- require.Nil(t, v)
-
- k, v = cursor.Prev()
- require.Nil(t, k)
- require.Nil(t, v)
-
- return nil
- }, func() {})
- util.RequireNoErr(t, err)
-}
-
-func TestReadCursorNonEmptyInterval(t *testing.T) {
- t.Parallel()
-
- f := NewEtcdTestFixture(t)
- defer f.Cleanup()
-
- db, err := newEtcdBackend(f.BackendConfig())
- util.RequireNoErr(t, err)
-
- testKeyValues := []KV{
- {"b", "1"},
- {"c", "2"},
- {"da", "3"},
- {"e", "4"},
- }
-
- err = db.Update(func(tx walletdb.ReadWriteTx) er.R {
- b, err := tx.CreateTopLevelBucket([]byte("apple"))
- util.RequireNoErr(t, err)
- require.NotNil(t, b)
-
- for _, kv := range testKeyValues {
- util.RequireNoErr(t, b.Put([]byte(kv.key), []byte(kv.val)))
- }
- return nil
- }, func() {})
-
- util.RequireNoErr(t, err)
-
- err = db.View(func(tx walletdb.ReadTx) er.R {
- b := tx.ReadBucket([]byte("apple"))
- require.NotNil(t, b)
-
- // Iterate from the front.
- var kvs []KV
- cursor := b.ReadCursor()
- k, v := cursor.First()
-
- for k != nil && v != nil {
- kvs = append(kvs, KV{string(k), string(v)})
- k, v = cursor.Next()
- }
- require.Equal(t, testKeyValues, kvs)
-
- // Iterate from the back.
- kvs = []KV{}
- k, v = cursor.Last()
-
- for k != nil && v != nil {
- kvs = append(kvs, KV{string(k), string(v)})
- k, v = cursor.Prev()
- }
- require.Equal(t, reverseKVs(testKeyValues), kvs)
-
- // Random access
- perm := []int{3, 0, 2, 1}
- for _, i := range perm {
- k, v := cursor.Seek([]byte(testKeyValues[i].key))
- require.Equal(t, []byte(testKeyValues[i].key), k)
- require.Equal(t, []byte(testKeyValues[i].val), v)
- }
-
- // Seek to nonexisting key.
- k, v = cursor.Seek(nil)
- require.Nil(t, k)
- require.Nil(t, v)
-
- k, v = cursor.Seek([]byte("x"))
- require.Nil(t, k)
- require.Nil(t, v)
-
- return nil
- }, func() {})
-
- util.RequireNoErr(t, err)
-}
-
-func TestReadWriteCursor(t *testing.T) {
- t.Parallel()
-
- f := NewEtcdTestFixture(t)
- defer f.Cleanup()
-
- db, err := newEtcdBackend(f.BackendConfig())
- util.RequireNoErr(t, err)
-
- testKeyValues := []KV{
- {"b", "1"},
- {"c", "2"},
- {"da", "3"},
- {"e", "4"},
- }
-
- count := len(testKeyValues)
-
- // Pre-store the first half of the interval.
- util.RequireNoErr(t, db.Update(func(tx walletdb.ReadWriteTx) er.R {
- b, err := tx.CreateTopLevelBucket([]byte("apple"))
- util.RequireNoErr(t, err)
- require.NotNil(t, b)
-
- for i := 0; i < count/2; i++ {
- err = b.Put(
- []byte(testKeyValues[i].key),
- []byte(testKeyValues[i].val),
- )
- util.RequireNoErr(t, err)
- }
- return nil
- }, func() {}))
-
- err = db.Update(func(tx walletdb.ReadWriteTx) er.R {
- b := tx.ReadWriteBucket([]byte("apple"))
- require.NotNil(t, b)
-
- // Store the second half of the interval.
- for i := count / 2; i < count; i++ {
- err = b.Put(
- []byte(testKeyValues[i].key),
- []byte(testKeyValues[i].val),
- )
- util.RequireNoErr(t, err)
- }
-
- cursor := b.ReadWriteCursor()
-
- // First on valid interval.
- fk, fv := cursor.First()
- require.Equal(t, []byte("b"), fk)
- require.Equal(t, []byte("1"), fv)
-
- // Prev(First()) = nil
- k, v := cursor.Prev()
- require.Nil(t, k)
- require.Nil(t, v)
-
- // Last on valid interval.
- lk, lv := cursor.Last()
- require.Equal(t, []byte("e"), lk)
- require.Equal(t, []byte("4"), lv)
-
- // Next(Last()) = nil
- k, v = cursor.Next()
- require.Nil(t, k)
- require.Nil(t, v)
-
- // Delete first item, then add an item before the
- // deleted one. Check that First/Next will "jump"
- // over the deleted item and return the new first.
- _, _ = cursor.First()
- util.RequireNoErr(t, cursor.Delete())
- util.RequireNoErr(t, b.Put([]byte("a"), []byte("0")))
- fk, fv = cursor.First()
-
- require.Equal(t, []byte("a"), fk)
- require.Equal(t, []byte("0"), fv)
-
- k, v = cursor.Next()
- require.Equal(t, []byte("c"), k)
- require.Equal(t, []byte("2"), v)
-
- // Similarly test that a new end is returned if
- // the old end is deleted first.
- _, _ = cursor.Last()
- util.RequireNoErr(t, cursor.Delete())
- util.RequireNoErr(t, b.Put([]byte("f"), []byte("5")))
-
- lk, lv = cursor.Last()
- require.Equal(t, []byte("f"), lk)
- require.Equal(t, []byte("5"), lv)
-
- k, v = cursor.Prev()
- require.Equal(t, []byte("da"), k)
- require.Equal(t, []byte("3"), v)
-
- // Overwrite k/v in the middle of the interval.
- util.RequireNoErr(t, b.Put([]byte("c"), []byte("3")))
- k, v = cursor.Prev()
- require.Equal(t, []byte("c"), k)
- require.Equal(t, []byte("3"), v)
-
- // Insert new key/values.
- util.RequireNoErr(t, b.Put([]byte("cx"), []byte("x")))
- util.RequireNoErr(t, b.Put([]byte("cy"), []byte("y")))
-
- k, v = cursor.Next()
- require.Equal(t, []byte("cx"), k)
- require.Equal(t, []byte("x"), v)
-
- k, v = cursor.Next()
- require.Equal(t, []byte("cy"), k)
- require.Equal(t, []byte("y"), v)
-
- expected := []KV{
- {"a", "0"},
- {"c", "3"},
- {"cx", "x"},
- {"cy", "y"},
- {"da", "3"},
- {"f", "5"},
- }
-
- // Iterate from the front.
- var kvs []KV
- k, v = cursor.First()
-
- for k != nil && v != nil {
- kvs = append(kvs, KV{string(k), string(v)})
- k, v = cursor.Next()
- }
- require.Equal(t, expected, kvs)
-
- // Iterate from the back.
- kvs = []KV{}
- k, v = cursor.Last()
-
- for k != nil && v != nil {
- kvs = append(kvs, KV{string(k), string(v)})
- k, v = cursor.Prev()
- }
- require.Equal(t, reverseKVs(expected), kvs)
-
- return nil
- }, func() {})
-
- util.RequireNoErr(t, err)
-
- expected := map[string]string{
- bkey("apple"): bval("apple"),
- vkey("a", "apple"): "0",
- vkey("c", "apple"): "3",
- vkey("cx", "apple"): "x",
- vkey("cy", "apple"): "y",
- vkey("da", "apple"): "3",
- vkey("f", "apple"): "5",
- }
- require.Equal(t, expected, f.Dump())
-}
-
-// TestReadWriteCursorWithBucketAndValue tests that cursors are able to iterate
-// over both bucket and value keys if both are present in the iterated bucket.
-func TestReadWriteCursorWithBucketAndValue(t *testing.T) {
- t.Parallel()
-
- f := NewEtcdTestFixture(t)
- defer f.Cleanup()
-
- db, err := newEtcdBackend(f.BackendConfig())
- util.RequireNoErr(t, err)
-
- // Pre-store the first half of the interval.
- util.RequireNoErr(t, db.Update(func(tx walletdb.ReadWriteTx) er.R {
- b, err := tx.CreateTopLevelBucket([]byte("apple"))
- util.RequireNoErr(t, err)
- require.NotNil(t, b)
-
- util.RequireNoErr(t, b.Put([]byte("key"), []byte("val")))
-
- b1, err := b.CreateBucket([]byte("banana"))
- util.RequireNoErr(t, err)
- require.NotNil(t, b1)
-
- b2, err := b.CreateBucket([]byte("pear"))
- util.RequireNoErr(t, err)
- require.NotNil(t, b2)
-
- return nil
- }, func() {}))
-
- err = db.View(func(tx walletdb.ReadTx) er.R {
- b := tx.ReadBucket([]byte("apple"))
- require.NotNil(t, b)
-
- cursor := b.ReadCursor()
-
- // First on valid interval.
- k, v := cursor.First()
- require.Equal(t, []byte("banana"), k)
- require.Nil(t, v)
-
- k, v = cursor.Next()
- require.Equal(t, []byte("key"), k)
- require.Equal(t, []byte("val"), v)
-
- k, v = cursor.Last()
- require.Equal(t, []byte("pear"), k)
- require.Nil(t, v)
-
- k, v = cursor.Seek([]byte("k"))
- require.Equal(t, []byte("key"), k)
- require.Equal(t, []byte("val"), v)
-
- k, v = cursor.Seek([]byte("banana"))
- require.Equal(t, []byte("banana"), k)
- require.Nil(t, v)
-
- k, v = cursor.Next()
- require.Equal(t, []byte("key"), k)
- require.Equal(t, []byte("val"), v)
-
- return nil
- }, func() {})
-
- util.RequireNoErr(t, err)
-
- expected := map[string]string{
- bkey("apple"): bval("apple"),
- bkey("apple", "banana"): bval("apple", "banana"),
- bkey("apple", "pear"): bval("apple", "pear"),
- vkey("key", "apple"): "val",
- }
- require.Equal(t, expected, f.Dump())
-}
diff --git a/lnd/channeldb/kvdb/etcd/readwrite_tx.go b/lnd/channeldb/kvdb/etcd/readwrite_tx.go
deleted file mode 100644
index 5d10c463..00000000
--- a/lnd/channeldb/kvdb/etcd/readwrite_tx.go
+++ /dev/null
@@ -1,100 +0,0 @@
-// +build kvdb_etcd
-
-package etcd
-
-import (
- "github.com/pkt-cash/pktd/btcutil/er"
- "github.com/pkt-cash/pktd/pktwallet/walletdb"
-)
-
-// readWriteTx holds a reference to the STM transaction.
-type readWriteTx struct {
- // stm is the reference to the parent STM.
- stm STM
-
- // rootBucketID holds the sha256 hash of the root bucket id, which is used
- // for key space spearation.
- rootBucketID [bucketIDLength]byte
-
- // active is true if the transaction hasn't been committed yet.
- active bool
-}
-
-// newReadWriteTx creates an rw transaction with the passed STM.
-func newReadWriteTx(stm STM, prefix string) *readWriteTx {
- return &readWriteTx{
- stm: stm,
- active: true,
- rootBucketID: makeBucketID([]byte(prefix)),
- }
-}
-
-// rooBucket is a helper function to return the always present
-// pseudo root bucket.
-func rootBucket(tx *readWriteTx) *readWriteBucket {
- return newReadWriteBucket(tx, tx.rootBucketID[:], tx.rootBucketID[:])
-}
-
-// ReadBucket opens the root bucket for read only access. If the bucket
-// described by the key does not exist, nil is returned.
-func (tx *readWriteTx) ReadBucket(key []byte) walletdb.ReadBucket {
- return rootBucket(tx).NestedReadWriteBucket(key)
-}
-
-// Rollback closes the transaction, discarding changes (if any) if the
-// database was modified by a write transaction.
-func (tx *readWriteTx) Rollback() er.R {
- // If the transaction has been closed roolback will fail.
- if !tx.active {
- return walletdb.ErrTxClosed.Default()
- }
-
- // Rollback the STM and set the tx to inactive.
- tx.stm.Rollback()
- tx.active = false
-
- return nil
-}
-
-// ReadWriteBucket opens the root bucket for read/write access. If the
-// bucket described by the key does not exist, nil is returned.
-func (tx *readWriteTx) ReadWriteBucket(key []byte) walletdb.ReadWriteBucket {
- return rootBucket(tx).NestedReadWriteBucket(key)
-}
-
-// CreateTopLevelBucket creates the top level bucket for a key if it
-// does not exist. The newly-created bucket it returned.
-func (tx *readWriteTx) CreateTopLevelBucket(key []byte) (walletdb.ReadWriteBucket, er.R) {
- return rootBucket(tx).CreateBucketIfNotExists(key)
-}
-
-// DeleteTopLevelBucket deletes the top level bucket for a key. This
-// errors if the bucket can not be found or the key keys a single value
-// instead of a bucket.
-func (tx *readWriteTx) DeleteTopLevelBucket(key []byte) er.R {
- return rootBucket(tx).DeleteNestedBucket(key)
-}
-
-// Commit commits the transaction if not already committed. Will return
-// error if the underlying STM fails.
-func (tx *readWriteTx) Commit() er.R {
- // Commit will fail if the transaction is already committed.
- if !tx.active {
- return walletdb.ErrTxClosed.Default()
- }
-
- // Try committing the transaction.
- if err := tx.stm.Commit(); err != nil {
- return err
- }
-
- // Mark the transaction as not active after commit.
- tx.active = false
-
- return nil
-}
-
-// OnCommit sets the commit callback (overriding if already set).
-func (tx *readWriteTx) OnCommit(cb func()) {
- tx.stm.OnCommit(cb)
-}
diff --git a/lnd/channeldb/kvdb/etcd/readwrite_tx_test.go b/lnd/channeldb/kvdb/etcd/readwrite_tx_test.go
deleted file mode 100644
index 40a2bfc9..00000000
--- a/lnd/channeldb/kvdb/etcd/readwrite_tx_test.go
+++ /dev/null
@@ -1,158 +0,0 @@
-// +build kvdb_etcd
-
-package etcd
-
-import (
- "testing"
-
- "github.com/pkt-cash/pktd/btcutil/er"
- "github.com/pkt-cash/pktd/btcutil/util"
- "github.com/pkt-cash/pktd/pktwallet/walletdb"
- "github.com/stretchr/testify/require"
-)
-
-func TestTxManualCommit(t *testing.T) {
- t.Parallel()
-
- f := NewEtcdTestFixture(t)
- defer f.Cleanup()
-
- db, err := newEtcdBackend(f.BackendConfig())
- util.RequireNoErr(t, err)
-
- tx, err := db.BeginReadWriteTx()
- util.RequireNoErr(t, err)
- require.NotNil(t, tx)
-
- committed := false
-
- tx.OnCommit(func() {
- committed = true
- })
-
- apple, err := tx.CreateTopLevelBucket([]byte("apple"))
- util.RequireNoErr(t, err)
- require.NotNil(t, apple)
- util.RequireNoErr(t, apple.Put([]byte("testKey"), []byte("testVal")))
-
- banana, err := tx.CreateTopLevelBucket([]byte("banana"))
- util.RequireNoErr(t, err)
- require.NotNil(t, banana)
- util.RequireNoErr(t, banana.Put([]byte("testKey"), []byte("testVal")))
- util.RequireNoErr(t, tx.DeleteTopLevelBucket([]byte("banana")))
-
- util.RequireNoErr(t, tx.Commit())
- require.True(t, committed)
-
- expected := map[string]string{
- bkey("apple"): bval("apple"),
- vkey("testKey", "apple"): "testVal",
- }
- require.Equal(t, expected, f.Dump())
-}
-
-func TestTxRollback(t *testing.T) {
- t.Parallel()
-
- f := NewEtcdTestFixture(t)
- defer f.Cleanup()
-
- db, err := newEtcdBackend(f.BackendConfig())
- util.RequireNoErr(t, err)
-
- tx, err := db.BeginReadWriteTx()
- require.Nil(t, err)
- require.NotNil(t, tx)
-
- apple, err := tx.CreateTopLevelBucket([]byte("apple"))
- require.Nil(t, err)
- require.NotNil(t, apple)
-
- util.RequireNoErr(t, apple.Put([]byte("testKey"), []byte("testVal")))
-
- util.RequireNoErr(t, tx.Rollback())
- util.RequireErr(t, walletdb.ErrTxClosed, tx.Commit())
- require.Equal(t, map[string]string{}, f.Dump())
-}
-
-func TestChangeDuringManualTx(t *testing.T) {
- t.Parallel()
-
- f := NewEtcdTestFixture(t)
- defer f.Cleanup()
-
- db, err := newEtcdBackend(f.BackendConfig())
- util.RequireNoErr(t, err)
-
- tx, err := db.BeginReadWriteTx()
- require.Nil(t, err)
- require.NotNil(t, tx)
-
- apple, err := tx.CreateTopLevelBucket([]byte("apple"))
- require.Nil(t, err)
- require.NotNil(t, apple)
-
- util.RequireNoErr(t, apple.Put([]byte("testKey"), []byte("testVal")))
-
- // Try overwriting the bucket key.
- f.Put(bkey("apple"), "banana")
-
- // TODO: translate error
- require.NotNil(t, tx.Commit())
- require.Equal(t, map[string]string{
- bkey("apple"): "banana",
- }, f.Dump())
-}
-
-func TestChangeDuringUpdate(t *testing.T) {
- t.Parallel()
-
- f := NewEtcdTestFixture(t)
- defer f.Cleanup()
-
- db, err := newEtcdBackend(f.BackendConfig())
- util.RequireNoErr(t, err)
-
- count := 0
-
- err = db.Update(func(tx walletdb.ReadWriteTx) er.R {
- apple, err := tx.CreateTopLevelBucket([]byte("apple"))
- util.RequireNoErr(t, err)
- require.NotNil(t, apple)
-
- util.RequireNoErr(t, apple.Put([]byte("key"), []byte("value")))
-
- if count == 0 {
- f.Put(vkey("key", "apple"), "new_value")
- f.Put(vkey("key2", "apple"), "value2")
- }
-
- cursor := apple.ReadCursor()
- k, v := cursor.First()
- require.Equal(t, []byte("key"), k)
- require.Equal(t, []byte("value"), v)
- require.Equal(t, v, apple.Get([]byte("key")))
-
- k, v = cursor.Next()
- if count == 0 {
- require.Nil(t, k)
- require.Nil(t, v)
- } else {
- require.Equal(t, []byte("key2"), k)
- require.Equal(t, []byte("value2"), v)
- }
-
- count++
- return nil
- }, func() {})
-
- require.Nil(t, err)
- require.Equal(t, count, 2)
-
- expected := map[string]string{
- bkey("apple"): bval("apple"),
- vkey("key", "apple"): "value",
- vkey("key2", "apple"): "value2",
- }
- require.Equal(t, expected, f.Dump())
-}
diff --git a/lnd/channeldb/kvdb/etcd/stm.go b/lnd/channeldb/kvdb/etcd/stm.go
deleted file mode 100644
index de111297..00000000
--- a/lnd/channeldb/kvdb/etcd/stm.go
+++ /dev/null
@@ -1,806 +0,0 @@
-// +build kvdb_etcd
-
-package etcd
-
-import (
- "context"
- "fmt"
- "math"
- "strings"
-
- v3 "github.com/coreos/etcd/clientv3"
- "github.com/pkt-cash/pktd/btcutil/er"
-)
-
-type CommitStats struct {
- Rset int
- Wset int
- Retries int
-}
-
-// KV stores a key/value pair.
-type KV struct {
- key string
- val string
-}
-
-// STM is an interface for software transactional memory.
-// All calls that return error will do so only if STM is manually handled and
-// abort the apply closure otherwise. In both case the returned error is a
-// DatabaseError.
-type STM interface {
- // Get returns the value for a key and inserts the key in the txn's read
- // set. Returns nil if there's no matching key, or the key is empty.
- Get(key string) ([]byte, er.R)
-
- // Put adds a value for a key to the txn's write set.
- Put(key, val string)
-
- // Del adds a delete operation for the key to the txn's write set.
- Del(key string)
-
- // First returns the first k/v that begins with prefix or nil if there's
- // no such k/v pair. If the key is found it is inserted to the txn's
- // read set. Returns nil if there's no match.
- First(prefix string) (*KV, er.R)
-
- // Last returns the last k/v that begins with prefix or nil if there's
- // no such k/v pair. If the key is found it is inserted to the txn's
- // read set. Returns nil if there's no match.
- Last(prefix string) (*KV, er.R)
-
- // Prev returns the previous k/v before key that begins with prefix or
- // nil if there's no such k/v. If the key is found it is inserted to the
- // read set. Returns nil if there's no match.
- Prev(prefix, key string) (*KV, er.R)
-
- // Next returns the next k/v after key that begins with prefix or nil
- // if there's no such k/v. If the key is found it is inserted to the
- // txn's read set. Returns nil if there's no match.
- Next(prefix, key string) (*KV, er.R)
-
- // Seek will return k/v at key beginning with prefix. If the key doesn't
- // exists Seek will return the next k/v after key beginning with prefix.
- // If a matching k/v is found it is inserted to the txn's read set. Returns
- // nil if there's no match.
- Seek(prefix, key string) (*KV, er.R)
-
- // OnCommit calls the passed callback func upon commit.
- OnCommit(func())
-
- // Commit attempts to apply the txn's changes to the server.
- // Commit may return CommitError if transaction is outdated and needs retry.
- Commit() er.R
-
- // Rollback emties the read and write sets such that a subsequent commit
- // won't alter the database.
- Rollback()
-}
-
-// CommitError is used to check if there was an error
-// due to stale data in the transaction.
-type CommitError struct{}
-
-// Error returns a static string for CommitError for
-// debugging/logging purposes.
-func (e CommitError) Error() string {
- return "commit failed"
-}
-
-// DatabaseError is used to wrap errors that are not
-// related to stale data in the transaction.
-type DatabaseError struct {
- msg string
- err error
-}
-
-// Unwrap returns the wrapped error in a DatabaseError.
-func (e *DatabaseError) Unwrap() er.R {
- return e.err
-}
-
-// Error simply converts DatabaseError to a string that
-// includes both the message and the wrapped error.
-func (e DatabaseError) Error() string {
- return fmt.Sprintf("etcd error: %v - %v", e.msg, e.err)
-}
-
-// stmGet is the result of a read operation,
-// a value and the mod revision of the key/value.
-type stmGet struct {
- val string
- rev int64
-}
-
-// readSet stores all reads done in an STM.
-type readSet map[string]stmGet
-
-// stmPut stores a value and an operation (put/delete).
-type stmPut struct {
- val string
- op v3.Op
-}
-
-// writeSet stroes all writes done in an STM.
-type writeSet map[string]stmPut
-
-// stm implements repeatable-read software transactional memory
-// over etcd.
-type stm struct {
- // client is an etcd client handling all RPC communications
- // to the etcd instance/cluster.
- client *v3.Client
-
- // manual is set to true for manual transactions which don't
- // execute in the STM run loop.
- manual bool
-
- // txQueue is lightweight contention manager, which is used to detect
- // transaction conflicts and reduce retries.
- txQueue *commitQueue
-
- // options stores optional settings passed by the user.
- options *STMOptions
-
- // prefetch hold prefetched key values and revisions.
- prefetch readSet
-
- // rset holds read key values and revisions.
- rset readSet
-
- // wset holds overwritten keys and their values.
- wset writeSet
-
- // getOpts are the opts used for gets.
- getOpts []v3.OpOption
-
- // revision stores the snapshot revision after first read.
- revision int64
-
- // onCommit gets called upon commit.
- onCommit func()
-}
-
-// STMOptions can be used to pass optional settings
-// when an STM is created.
-type STMOptions struct {
- // ctx holds an externally provided abort context.
- ctx context.Context
- commitStatsCallback func(bool, CommitStats)
-}
-
-// STMOptionFunc is a function that updates the passed STMOptions.
-type STMOptionFunc func(*STMOptions)
-
-// WithAbortContext specifies the context for permanently
-// aborting the transaction.
-func WithAbortContext(ctx context.Context) STMOptionFunc {
- return func(so *STMOptions) {
- so.ctx = ctx
- }
-}
-
-func WithCommitStatsCallback(cb func(bool, CommitStats)) STMOptionFunc {
- return func(so *STMOptions) {
- so.commitStatsCallback = cb
- }
-}
-
-// RunSTM runs the apply function by creating an STM using serializable snapshot
-// isolation, passing it to the apply and handling commit errors and retries.
-func RunSTM(cli *v3.Client, apply func(STM) error, txQueue *commitQueue,
- so ...STMOptionFunc) er.R {
-
- return runSTM(makeSTM(cli, false, txQueue, so...), apply)
-}
-
-// NewSTM creates a new STM instance, using serializable snapshot isolation.
-func NewSTM(cli *v3.Client, txQueue *commitQueue, so ...STMOptionFunc) STM {
- return makeSTM(cli, true, txQueue, so...)
-}
-
-// makeSTM is the actual constructor of the stm. It first apply all passed
-// options then creates the stm object and resets it before returning.
-func makeSTM(cli *v3.Client, manual bool, txQueue *commitQueue,
- so ...STMOptionFunc) *stm {
-
- opts := &STMOptions{
- ctx: cli.Ctx(),
- }
-
- // Apply all functional options.
- for _, fo := range so {
- fo(opts)
- }
-
- s := &stm{
- client: cli,
- manual: manual,
- txQueue: txQueue,
- options: opts,
- prefetch: make(map[string]stmGet),
- }
-
- // Reset read and write set.
- s.Rollback()
-
- return s
-}
-
-// runSTM implements the run loop of the STM, running the apply func, catching
-// errors and handling commit. The loop will quit on every error except
-// CommitError which is used to indicate a necessary retry.
-func runSTM(s *stm, apply func(STM) error) er.R {
- var (
- retries int
- stats CommitStats
- executeErr error
- )
-
- done := make(chan struct{})
-
- execute := func() {
- defer close(done)
-
- for {
- select {
- // Check if the STM is aborted and break the retry loop
- // if it is.
- case <-s.options.ctx.Done():
- executeErr = er.Errorf("aborted")
- return
-
- default:
- }
-
- stats, executeErr = s.commit()
-
- // Re-apply only upon commit error (meaning the
- // keys were changed).
- if _, ok := executeErr.(CommitError); !ok {
- // Anything that's not a CommitError
- // aborts the transaction.
- return
- }
-
- // Rollback before trying to re-apply.
- s.Rollback()
- retries++
-
- // Re-apply the transaction closure.
- if executeErr = apply(s); executeErr != nil {
- return
- }
- }
- }
-
- // Run the tx closure to construct the read and write sets.
- // Also we expect that if there are no conflicting transactions
- // in the queue, then we only run apply once.
- if preApplyErr := apply(s); preApplyErr != nil {
- return preApplyErr
- }
-
- // Queue up the transaction for execution.
- s.txQueue.Add(execute, s.rset, s.wset)
-
- // Wait for the transaction to execute, or break if aborted.
- select {
- case <-done:
- case <-s.options.ctx.Done():
- }
-
- s.txQueue.Done(s.rset, s.wset)
-
- if s.options.commitStatsCallback != nil {
- stats.Retries = retries
- s.options.commitStatsCallback(executeErr == nil, stats)
- }
-
- return executeErr
-}
-
-// add inserts a txn response to the read set. This is useful when the txn
-// fails due to conflict where the txn response can be used to prefetch
-// key/values.
-func (rs readSet) add(txnResp *v3.TxnResponse) {
- for _, resp := range txnResp.Responses {
- getResp := (*v3.GetResponse)(resp.GetResponseRange())
- for _, kv := range getResp.Kvs {
- rs[string(kv.Key)] = stmGet{
- val: string(kv.Value),
- rev: kv.ModRevision,
- }
- }
- }
-}
-
-// gets is a helper to create an op slice for transaction
-// construction.
-func (rs readSet) gets() []v3.Op {
- ops := make([]v3.Op, 0, len(rs))
-
- for k := range rs {
- ops = append(ops, v3.OpGet(k))
- }
-
- return ops
-}
-
-// cmps returns a compare list which will serve as a precondition testing that
-// the values in the read set didn't change.
-func (rs readSet) cmps() []v3.Cmp {
- cmps := make([]v3.Cmp, 0, len(rs))
- for key, getValue := range rs {
- cmps = append(cmps, v3.Compare(
- v3.ModRevision(key), "=", getValue.rev,
- ))
- }
-
- return cmps
-}
-
-// cmps returns a cmp list testing no writes have happened past rev.
-func (ws writeSet) cmps(rev int64) []v3.Cmp {
- cmps := make([]v3.Cmp, 0, len(ws))
- for key := range ws {
- cmps = append(cmps, v3.Compare(v3.ModRevision(key), "<", rev))
- }
-
- return cmps
-}
-
-// puts is the list of ops for all pending writes.
-func (ws writeSet) puts() []v3.Op {
- puts := make([]v3.Op, 0, len(ws))
- for _, v := range ws {
- puts = append(puts, v.op)
- }
-
- return puts
-}
-
-// fetch is a helper to fetch key/value given options. If a value is returned
-// then fetch will try to fix the STM's snapshot revision (if not already set).
-// We'll also cache the returned key/value in the read set.
-func (s *stm) fetch(key string, opts ...v3.OpOption) ([]KV, er.R) {
- resp, err := s.client.Get(
- s.options.ctx, key, append(opts, s.getOpts...)...,
- )
- if err != nil {
- return nil, DatabaseError{
- msg: "stm.fetch() failed",
- err: err,
- }
- }
-
- // Set revison and serializable options upon first fetch
- // for any subsequent fetches.
- if s.getOpts == nil {
- s.revision = resp.Header.Revision
- s.getOpts = []v3.OpOption{
- v3.WithRev(s.revision),
- v3.WithSerializable(),
- }
- }
-
- if len(resp.Kvs) == 0 {
- // Add assertion to the read set which will extend our commit
- // constraint such that the commit will fail if the key is
- // present in the database.
- s.rset[key] = stmGet{
- rev: 0,
- }
- }
-
- var result []KV
-
- // Fill the read set with key/values returned.
- for _, kv := range resp.Kvs {
- // Remove from prefetch.
- key := string(kv.Key)
- val := string(kv.Value)
-
- delete(s.prefetch, key)
-
- // Add to read set.
- s.rset[key] = stmGet{
- val: val,
- rev: kv.ModRevision,
- }
-
- result = append(result, KV{key, val})
- }
-
- return result, nil
-}
-
-// Get returns the value for key. If there's no such
-// key/value in the database or the passed key is empty
-// Get will return nil.
-func (s *stm) Get(key string) ([]byte, er.R) {
- if key == "" {
- return nil, nil
- }
-
- // Return freshly written value if present.
- if put, ok := s.wset[key]; ok {
- if put.op.IsDelete() {
- return nil, nil
- }
-
- return []byte(put.val), nil
- }
-
- // Populate read set if key is present in
- // the prefetch set.
- if getValue, ok := s.prefetch[key]; ok {
- delete(s.prefetch, key)
-
- // Use the prefetched value only if it is for
- // an existing key.
- if getValue.rev != 0 {
- s.rset[key] = getValue
- }
- }
-
- // Return value if alread in read set.
- if getValue, ok := s.rset[key]; ok {
- // Return the value if the rset contains an existing key.
- if getValue.rev != 0 {
- return []byte(getValue.val), nil
- } else {
- return nil, nil
- }
- }
-
- // Fetch and return value.
- kvs, err := s.fetch(key)
- if err != nil {
- return nil, err
- }
-
- if len(kvs) > 0 {
- return []byte(kvs[0].val), nil
- }
-
- // Return empty result if key not in DB.
- return nil, nil
-}
-
-// First returns the first key/value matching prefix. If there's no key starting
-// with prefix, Last will return nil.
-func (s *stm) First(prefix string) (*KV, er.R) {
- return s.next(prefix, prefix, true)
-}
-
-// Last returns the last key/value with prefix. If there's no key starting with
-// prefix, Last will return nil.
-func (s *stm) Last(prefix string) (*KV, er.R) {
- // As we don't know the full range, fetch the last
- // key/value with this prefix first.
- resp, err := s.fetch(prefix, v3.WithLastKey()...)
- if err != nil {
- return nil, err
- }
-
- var (
- kv KV
- found bool
- )
-
- if len(resp) > 0 {
- kv = resp[0]
- found = true
- }
-
- // Now make sure there's nothing in the write set
- // that is a better match, meaning it has the same
- // prefix but is greater or equal than the current
- // best candidate. Note that this is not efficient
- // when the write set is large!
- for k, put := range s.wset {
- if put.op.IsDelete() {
- continue
- }
-
- if strings.HasPrefix(k, prefix) && k >= kv.key {
- kv.key = k
- kv.val = put.val
- found = true
- }
- }
-
- if found {
- return &kv, nil
- }
-
- return nil, nil
-}
-
-// Prev returns the prior key/value before key (with prefix). If there's no such
-// key Next will return nil.
-func (s *stm) Prev(prefix, startKey string) (*KV, er.R) {
- var result KV
-
- fetchKey := startKey
- matchFound := false
-
- for {
- // Ask etcd to retrieve one key that is a
- // match in descending order from the passed key.
- opts := []v3.OpOption{
- v3.WithRange(fetchKey),
- v3.WithSort(v3.SortByKey, v3.SortDescend),
- v3.WithLimit(1),
- }
-
- kvs, err := s.fetch(prefix, opts...)
- if err != nil {
- return nil, err
- }
-
- if len(kvs) == 0 {
- break
- }
-
- kv := &kvs[0]
-
- // WithRange and WithPrefix can't be used
- // together, so check prefix here. If the
- // returned key no longer has the prefix,
- // then break out.
- if !strings.HasPrefix(kv.key, prefix) {
- break
- }
-
- // Fetch the prior key if this is deleted.
- if put, ok := s.wset[kv.key]; ok && put.op.IsDelete() {
- fetchKey = kv.key
- continue
- }
-
- result = *kv
- matchFound = true
-
- break
- }
-
- // Closre holding all checks to find a possibly
- // better match.
- matches := func(key string) bool {
- if !strings.HasPrefix(key, prefix) {
- return false
- }
-
- if !matchFound {
- return key < startKey
- }
-
- // matchFound == true
- return result.key <= key && key < startKey
- }
-
- // Now go trough the write set and check
- // if there's an even better match.
- for k, put := range s.wset {
- if !put.op.IsDelete() && matches(k) {
- result.key = k
- result.val = put.val
- matchFound = true
- }
- }
-
- if !matchFound {
- return nil, nil
- }
-
- return &result, nil
-}
-
-// Next returns the next key/value after key (with prefix). If there's no such
-// key Next will return nil.
-func (s *stm) Next(prefix string, key string) (*KV, er.R) {
- return s.next(prefix, key, false)
-}
-
-// Seek "seeks" to the key (with prefix). If the key doesn't exists it'll get
-// the next key with the same prefix. If no key fills this criteria, Seek will
-// return nil.
-func (s *stm) Seek(prefix, key string) (*KV, er.R) {
- return s.next(prefix, key, true)
-}
-
-// next will try to retrieve the next match that has prefix and starts with the
-// passed startKey. If includeStartKey is set to true, it'll return the value
-// of startKey (essentially implementing seek).
-func (s *stm) next(prefix, startKey string, includeStartKey bool) (*KV, er.R) {
- var result KV
-
- fetchKey := startKey
- firstFetch := true
- matchFound := false
-
- for {
- // Ask etcd to retrieve one key that is a
- // match in ascending order from the passed key.
- opts := []v3.OpOption{
- v3.WithFromKey(),
- v3.WithSort(v3.SortByKey, v3.SortAscend),
- v3.WithLimit(1),
- }
-
- // By default we include the start key too
- // if it is a full match.
- if includeStartKey && firstFetch {
- firstFetch = false
- } else {
- // If we'd like to retrieve the first key
- // after the start key.
- fetchKey += "\x00"
- }
-
- kvs, err := s.fetch(fetchKey, opts...)
- if err != nil {
- return nil, err
- }
-
- if len(kvs) == 0 {
- break
- }
-
- kv := &kvs[0]
- // WithRange and WithPrefix can't be used
- // together, so check prefix here. If the
- // returned key no longer has the prefix,
- // then break the fetch loop.
- if !strings.HasPrefix(kv.key, prefix) {
- break
- }
-
- // Move on to fetch starting with the next
- // key if this one is marked deleted.
- if put, ok := s.wset[kv.key]; ok && put.op.IsDelete() {
- fetchKey = kv.key
- continue
- }
-
- result = *kv
- matchFound = true
-
- break
- }
-
- // Closure holding all checks to find a possibly
- // better match.
- matches := func(k string) bool {
- if !strings.HasPrefix(k, prefix) {
- return false
- }
-
- if includeStartKey && !matchFound {
- return startKey <= k
- }
-
- if !includeStartKey && !matchFound {
- return startKey < k
- }
-
- if includeStartKey && matchFound {
- return startKey <= k && k <= result.key
- }
-
- // !includeStartKey && matchFound.
- return startKey < k && k <= result.key
- }
-
- // Now go trough the write set and check
- // if there's an even better match.
- for k, put := range s.wset {
- if !put.op.IsDelete() && matches(k) {
- result.key = k
- result.val = put.val
- matchFound = true
- }
- }
-
- if !matchFound {
- return nil, nil
- }
-
- return &result, nil
-}
-
-// Put sets the value of the passed key. The actual put will happen upon commit.
-func (s *stm) Put(key, val string) {
- s.wset[key] = stmPut{
- val: val,
- op: v3.OpPut(key, val),
- }
-}
-
-// Del marks a key as deleted. The actual delete will happen upon commit.
-func (s *stm) Del(key string) {
- s.wset[key] = stmPut{
- val: "",
- op: v3.OpDelete(key),
- }
-}
-
-// OnCommit sets the callback that is called upon committing the STM
-// transaction.
-func (s *stm) OnCommit(cb func()) {
- s.onCommit = cb
-}
-
-// commit builds the final transaction and tries to execute it. If commit fails
-// because the keys have changed return a CommitError, otherwise return a
-// DatabaseError.
-func (s *stm) commit() (CommitStats, er.R) {
- rset := s.rset.cmps()
- wset := s.wset.cmps(s.revision + 1)
-
- stats := CommitStats{
- Rset: len(rset),
- Wset: len(wset),
- }
-
- // Create the compare set.
- cmps := append(rset, wset...)
- // Create a transaction with the optional abort context.
- txn := s.client.Txn(s.options.ctx)
-
- // If the compare set holds, try executing the puts.
- txn = txn.If(cmps...)
- txn = txn.Then(s.wset.puts()...)
-
- // Prefetch keys in case of conflict to save
- // a round trip to etcd.
- txn = txn.Else(s.rset.gets()...)
-
- txnresp, err := txn.Commit()
- if err != nil {
- return stats, DatabaseError{
- msg: "stm.Commit() failed",
- err: err,
- }
- }
-
- // Call the commit callback if the transaction
- // was successful.
- if txnresp.Succeeded {
- if s.onCommit != nil {
- s.onCommit()
- }
-
- return stats, nil
- }
-
- // Load prefetch before if commit failed.
- s.rset.add(txnresp)
- s.prefetch = s.rset
-
- // Return CommitError indicating that the transaction
- // can be retried.
- return stats, CommitError{}
-}
-
-// Commit simply calls commit and the commit stats callback if set.
-func (s *stm) Commit() er.R {
- stats, err := s.commit()
-
- if s.options.commitStatsCallback != nil {
- s.options.commitStatsCallback(err == nil, stats)
- }
-
- return err
-}
-
-// Rollback resets the STM. This is useful for uncommitted transaction rollback
-// and also used in the STM main loop to reset state if commit fails.
-func (s *stm) Rollback() {
- s.rset = make(map[string]stmGet)
- s.wset = make(map[string]stmPut)
- s.getOpts = nil
- s.revision = math.MaxInt64 - 1
-}
diff --git a/lnd/channeldb/kvdb/etcd/stm_test.go b/lnd/channeldb/kvdb/etcd/stm_test.go
deleted file mode 100644
index 1d1bd63a..00000000
--- a/lnd/channeldb/kvdb/etcd/stm_test.go
+++ /dev/null
@@ -1,366 +0,0 @@
-// +build kvdb_etcd
-
-package etcd
-
-import (
- "errors"
- "testing"
-
- "github.com/pkt-cash/pktd/btcutil/er"
- "github.com/pkt-cash/pktd/btcutil/util"
- "github.com/stretchr/testify/require"
-)
-
-func reverseKVs(a []KV) []KV {
- for i, j := 0, len(a)-1; i < j; i, j = i+1, j-1 {
- a[i], a[j] = a[j], a[i]
- }
-
- return a
-}
-
-func TestPutToEmpty(t *testing.T) {
- t.Parallel()
-
- f := NewEtcdTestFixture(t)
- txQueue := NewCommitQueue(f.config.Ctx)
- defer func() {
- f.Cleanup()
- txQueue.Wait()
- }()
-
- db, err := newEtcdBackend(f.BackendConfig())
- util.RequireNoErr(t, err)
-
- apply := func(stm STM) er.R {
- stm.Put("123", "abc")
- return nil
- }
-
- err = RunSTM(db.cli, apply, txQueue)
- util.RequireNoErr(t, err)
-
- require.Equal(t, "abc", f.Get("123"))
-}
-
-func TestGetPutDel(t *testing.T) {
- t.Parallel()
-
- f := NewEtcdTestFixture(t)
- txQueue := NewCommitQueue(f.config.Ctx)
- defer func() {
- f.Cleanup()
- txQueue.Wait()
- }()
-
- testKeyValues := []KV{
- {"a", "1"},
- {"b", "2"},
- {"c", "3"},
- {"d", "4"},
- {"e", "5"},
- }
-
- for _, kv := range testKeyValues {
- f.Put(kv.key, kv.val)
- }
-
- db, err := newEtcdBackend(f.BackendConfig())
- util.RequireNoErr(t, err)
-
- apply := func(stm STM) er.R {
- // Get some non existing keys.
- v, err := stm.Get("")
- util.RequireNoErr(t, err)
- require.Nil(t, v)
-
- v, err = stm.Get("x")
- util.RequireNoErr(t, err)
- require.Nil(t, v)
-
- // Get all existing keys.
- for _, kv := range testKeyValues {
- v, err = stm.Get(kv.key)
- util.RequireNoErr(t, err)
- require.Equal(t, []byte(kv.val), v)
- }
-
- // Overwrite, then delete an existing key.
- stm.Put("c", "6")
-
- v, err = stm.Get("c")
- util.RequireNoErr(t, err)
- require.Equal(t, []byte("6"), v)
-
- stm.Del("c")
-
- v, err = stm.Get("c")
- util.RequireNoErr(t, err)
- require.Nil(t, v)
-
- // Re-add the deleted key.
- stm.Put("c", "7")
-
- v, err = stm.Get("c")
- util.RequireNoErr(t, err)
- require.Equal(t, []byte("7"), v)
-
- // Add a new key.
- stm.Put("x", "x")
-
- v, err = stm.Get("x")
- util.RequireNoErr(t, err)
- require.Equal(t, []byte("x"), v)
-
- return nil
- }
-
- err = RunSTM(db.cli, apply, txQueue)
- util.RequireNoErr(t, err)
-
- require.Equal(t, "1", f.Get("a"))
- require.Equal(t, "2", f.Get("b"))
- require.Equal(t, "7", f.Get("c"))
- require.Equal(t, "4", f.Get("d"))
- require.Equal(t, "5", f.Get("e"))
- require.Equal(t, "x", f.Get("x"))
-}
-
-func TestFirstLastNextPrev(t *testing.T) {
- t.Parallel()
-
- f := NewEtcdTestFixture(t)
- txQueue := NewCommitQueue(f.config.Ctx)
- defer func() {
- f.Cleanup()
- txQueue.Wait()
- }()
-
- testKeyValues := []KV{
- {"kb", "1"},
- {"kc", "2"},
- {"kda", "3"},
- {"ke", "4"},
- {"w", "w"},
- }
- for _, kv := range testKeyValues {
- f.Put(kv.key, kv.val)
- }
-
- db, err := newEtcdBackend(f.BackendConfig())
- util.RequireNoErr(t, err)
-
- apply := func(stm STM) er.R {
- // First/Last on valid multi item interval.
- kv, err := stm.First("k")
- util.RequireNoErr(t, err)
- require.Equal(t, &KV{"kb", "1"}, kv)
-
- kv, err = stm.Last("k")
- util.RequireNoErr(t, err)
- require.Equal(t, &KV{"ke", "4"}, kv)
-
- // First/Last on single item interval.
- kv, err = stm.First("w")
- util.RequireNoErr(t, err)
- require.Equal(t, &KV{"w", "w"}, kv)
-
- kv, err = stm.Last("w")
- util.RequireNoErr(t, err)
- require.Equal(t, &KV{"w", "w"}, kv)
-
- // Next/Prev on start/end.
- kv, err = stm.Next("k", "ke")
- util.RequireNoErr(t, err)
- require.Nil(t, kv)
-
- kv, err = stm.Prev("k", "kb")
- util.RequireNoErr(t, err)
- require.Nil(t, kv)
-
- // Next/Prev in the middle.
- kv, err = stm.Next("k", "kc")
- util.RequireNoErr(t, err)
- require.Equal(t, &KV{"kda", "3"}, kv)
-
- kv, err = stm.Prev("k", "ke")
- util.RequireNoErr(t, err)
- require.Equal(t, &KV{"kda", "3"}, kv)
-
- // Delete first item, then add an item before the
- // deleted one. Check that First/Next will "jump"
- // over the deleted item and return the new first.
- stm.Del("kb")
- stm.Put("ka", "0")
-
- kv, err = stm.First("k")
- util.RequireNoErr(t, err)
- require.Equal(t, &KV{"ka", "0"}, kv)
-
- kv, err = stm.Prev("k", "kc")
- util.RequireNoErr(t, err)
- require.Equal(t, &KV{"ka", "0"}, kv)
-
- // Similarly test that a new end is returned if
- // the old end is deleted first.
- stm.Del("ke")
- stm.Put("kf", "5")
-
- kv, err = stm.Last("k")
- util.RequireNoErr(t, err)
- require.Equal(t, &KV{"kf", "5"}, kv)
-
- kv, err = stm.Next("k", "kda")
- util.RequireNoErr(t, err)
- require.Equal(t, &KV{"kf", "5"}, kv)
-
- // Overwrite one in the middle.
- stm.Put("kda", "6")
-
- kv, err = stm.Next("k", "kc")
- util.RequireNoErr(t, err)
- require.Equal(t, &KV{"kda", "6"}, kv)
-
- // Add three in the middle, then delete one.
- stm.Put("kdb", "7")
- stm.Put("kdc", "8")
- stm.Put("kdd", "9")
- stm.Del("kdc")
-
- // Check that stepping from first to last returns
- // the expected sequence.
- var kvs []KV
-
- curr, err := stm.First("k")
- util.RequireNoErr(t, err)
-
- for curr != nil {
- kvs = append(kvs, *curr)
- curr, err = stm.Next("k", curr.key)
- util.RequireNoErr(t, err)
- }
-
- expected := []KV{
- {"ka", "0"},
- {"kc", "2"},
- {"kda", "6"},
- {"kdb", "7"},
- {"kdd", "9"},
- {"kf", "5"},
- }
- require.Equal(t, expected, kvs)
-
- // Similarly check that stepping from last to first
- // returns the expected sequence.
- kvs = []KV{}
-
- curr, err = stm.Last("k")
- util.RequireNoErr(t, err)
-
- for curr != nil {
- kvs = append(kvs, *curr)
- curr, err = stm.Prev("k", curr.key)
- util.RequireNoErr(t, err)
- }
-
- expected = reverseKVs(expected)
- require.Equal(t, expected, kvs)
-
- return nil
- }
-
- err = RunSTM(db.cli, apply, txQueue)
- util.RequireNoErr(t, err)
-
- require.Equal(t, "0", f.Get("ka"))
- require.Equal(t, "2", f.Get("kc"))
- require.Equal(t, "6", f.Get("kda"))
- require.Equal(t, "7", f.Get("kdb"))
- require.Equal(t, "9", f.Get("kdd"))
- require.Equal(t, "5", f.Get("kf"))
- require.Equal(t, "w", f.Get("w"))
-}
-
-func TestCommitError(t *testing.T) {
- t.Parallel()
-
- f := NewEtcdTestFixture(t)
- txQueue := NewCommitQueue(f.config.Ctx)
- defer func() {
- f.Cleanup()
- txQueue.Wait()
- }()
-
- db, err := newEtcdBackend(f.BackendConfig())
- util.RequireNoErr(t, err)
-
- // Preset DB state.
- f.Put("123", "xyz")
-
- // Count the number of applies.
- cnt := 0
-
- apply := func(stm STM) er.R {
- // STM must have the key/value.
- val, err := stm.Get("123")
- util.RequireNoErr(t, err)
-
- if cnt == 0 {
- require.Equal(t, []byte("xyz"), val)
-
- // Put a conflicting key/value during the first apply.
- f.Put("123", "def")
- }
-
- // We'd expect to
- stm.Put("123", "abc")
-
- cnt++
- return nil
- }
-
- err = RunSTM(db.cli, apply, txQueue)
- util.RequireNoErr(t, err)
- require.Equal(t, 2, cnt)
-
- require.Equal(t, "abc", f.Get("123"))
-}
-
-func TestManualTxError(t *testing.T) {
- t.Parallel()
-
- f := NewEtcdTestFixture(t)
- txQueue := NewCommitQueue(f.config.Ctx)
- defer func() {
- f.Cleanup()
- txQueue.Wait()
- }()
-
- db, err := newEtcdBackend(f.BackendConfig())
- util.RequireNoErr(t, err)
-
- // Preset DB state.
- f.Put("123", "xyz")
-
- stm := NewSTM(db.cli, txQueue)
-
- val, err := stm.Get("123")
- util.RequireNoErr(t, err)
- require.Equal(t, []byte("xyz"), val)
-
- // Put a conflicting key/value.
- f.Put("123", "def")
-
- // Should still get the original version.
- val, err = stm.Get("123")
- util.RequireNoErr(t, err)
- require.Equal(t, []byte("xyz"), val)
-
- // Commit will fail with CommitError.
- err = stm.Commit()
- var e CommitError
- require.True(t, errors.As(err, &e))
-
- // We expect that the transacton indeed did not commit.
- require.Equal(t, "def", f.Get("123"))
-}
diff --git a/lnd/channeldb/kvdb/etcd/walletdb_interface_test.go b/lnd/channeldb/kvdb/etcd/walletdb_interface_test.go
deleted file mode 100644
index aeb06d72..00000000
--- a/lnd/channeldb/kvdb/etcd/walletdb_interface_test.go
+++ /dev/null
@@ -1,17 +0,0 @@
-// +build kvdb_etcd
-
-package etcd
-
-import (
- "testing"
-
- "github.com/pkt-cash/pktd/pktwallet/walletdb/walletdbtest"
-)
-
-// TestWalletDBInterface performs the WalletDB interface test suite for the
-// etcd database driver.
-func TestWalletDBInterface(t *testing.T) {
- f := NewEtcdTestFixture(t)
- defer f.Cleanup()
- walletdbtest.TestInterface(t, dbType, f.BackendConfig())
-}
diff --git a/lnd/channeldb/kvdb/interface.go b/lnd/channeldb/kvdb/interface.go
deleted file mode 100644
index ffb855c0..00000000
--- a/lnd/channeldb/kvdb/interface.go
+++ /dev/null
@@ -1,142 +0,0 @@
-package kvdb
-
-import (
- "github.com/pkt-cash/pktd/btcutil/er"
- "github.com/pkt-cash/pktd/pktwallet/walletdb"
- _ "github.com/pkt-cash/pktd/pktwallet/walletdb/bdb" // Import to register backend.
-)
-
-// Update opens a database read/write transaction and executes the function f
-// with the transaction passed as a parameter. After f exits, if f did not
-// error, the transaction is committed. Otherwise, if f did error, the
-// transaction is rolled back. If the rollback fails, the original error
-// returned by f is still returned. If the commit fails, the commit error is
-// returned. As callers may expect retries of the f closure (depending on the
-// database backend used), the reset function will be called before each retry
-// respectively.
-func Update(db Backend, f func(tx RwTx) er.R, reset func()) er.R {
- if extendedDB, ok := db.(ExtendedBackend); ok {
- return extendedDB.Update(f, reset)
- }
-
- reset()
- return walletdb.Update(db, f)
-}
-
-// View opens a database read transaction and executes the function f with the
-// transaction passed as a parameter. After f exits, the transaction is rolled
-// back. If f errors, its error is returned, not a rollback error (if any
-// occur). The passed reset function is called before the start of the
-// transaction and can be used to reset intermediate state. As callers may
-// expect retries of the f closure (depending on the database backend used), the
-// reset function will be called before each retry respectively.
-func View(db Backend, f func(tx RTx) er.R, reset func()) er.R {
- if extendedDB, ok := db.(ExtendedBackend); ok {
- return extendedDB.View(f, reset)
- }
-
- // Since we know that walletdb simply calls into bbolt which never
- // retries transactions, we'll call the reset function here before View.
- reset()
-
- return walletdb.View(db, f)
-}
-
-// Batch is identical to the Update call, but it attempts to combine several
-// individual Update transactions into a single write database transaction on
-// an optimistic basis. This only has benefits if multiple goroutines call
-// Batch.
-var Batch = walletdb.Batch
-
-// Create initializes and opens a database for the specified type. The
-// arguments are specific to the database type driver. See the documentation
-// for the database driver for further details.
-//
-// ErrDbUnknownType will be returned if the database type is not registered.
-var Create = walletdb.Create
-
-// Backend represents an ACID database. All database access is performed
-// through read or read+write transactions.
-type Backend = walletdb.DB
-
-// ExtendedBackend is and interface that supports View and Update and also able
-// to collect database access patterns.
-type ExtendedBackend interface {
- Backend
-
- // PrintStats returns all collected stats pretty printed into a string.
- PrintStats() string
-
- // View opens a database read transaction and executes the function f
- // with the transaction passed as a parameter. After f exits, the
- // transaction is rolled back. If f errors, its error is returned, not a
- // rollback error (if any occur). The passed reset function is called
- // before the start of the transaction and can be used to reset
- // intermediate state. As callers may expect retries of the f closure
- // (depending on the database backend used), the reset function will be
- //called before each retry respectively.
- View(f func(tx walletdb.ReadTx) er.R, reset func()) er.R
-
- // Update opens a database read/write transaction and executes the
- // function f with the transaction passed as a parameter. After f exits,
- // if f did not error, the transaction is committed. Otherwise, if f did
- // error, the transaction is rolled back. If the rollback fails, the
- // original error returned by f is still returned. If the commit fails,
- // the commit error is returned. As callers may expect retries of the f
- // closure (depending on the database backend used), the reset function
- // will be called before each retry respectively.
- Update(f func(tx walletdb.ReadWriteTx) er.R, reset func()) er.R
-}
-
-// Open opens an existing database for the specified type. The arguments are
-// specific to the database type driver. See the documentation for the database
-// driver for further details.
-//
-// ErrDbUnknownType will be returned if the database type is not registered.
-var Open = walletdb.Open
-
-// Driver defines a structure for backend drivers to use when they registered
-// themselves as a backend which implements the Backend interface.
-type Driver = walletdb.Driver
-
-// RBucket represents a bucket (a hierarchical structure within the
-// database) that is only allowed to perform read operations.
-type RBucket = walletdb.ReadBucket
-
-// RCursor represents a bucket cursor that can be positioned at the start or
-// end of the bucket's key/value pairs and iterate over pairs in the bucket.
-// This type is only allowed to perform database read operations.
-type RCursor = walletdb.ReadCursor
-
-// RTx represents a database transaction that can only be used for reads. If
-// a database update must occur, use a RwTx.
-type RTx = walletdb.ReadTx
-
-// RwBucket represents a bucket (a hierarchical structure within the database)
-// that is allowed to perform both read and write operations.
-type RwBucket = walletdb.ReadWriteBucket
-
-// RwCursor represents a bucket cursor that can be positioned at the start or
-// end of the bucket's key/value pairs and iterate over pairs in the bucket.
-// This abstraction is allowed to perform both database read and write
-// operations.
-type RwCursor = walletdb.ReadWriteCursor
-
-// ReadWriteTx represents a database transaction that can be used for both
-// reads and writes. When only reads are necessary, consider using a RTx
-// instead.
-type RwTx = walletdb.ReadWriteTx
-
-var (
- // ErrBucketNotFound is returned when trying to access a bucket that
- // has not been created yet.
- ErrBucketNotFound = walletdb.ErrBucketNotFound
-
- // ErrBucketExists is returned when creating a bucket that already
- // exists.
- ErrBucketExists = walletdb.ErrBucketExists
-
- // ErrDatabaseNotOpen is returned when a database instance is accessed
- // before it is opened or after it is closed.
- ErrDatabaseNotOpen = walletdb.ErrDbNotOpen
-)
diff --git a/lnd/channeldb/kvdb/kvdb_etcd.go b/lnd/channeldb/kvdb/kvdb_etcd.go
deleted file mode 100644
index d923d414..00000000
--- a/lnd/channeldb/kvdb/kvdb_etcd.go
+++ /dev/null
@@ -1,54 +0,0 @@
-// +build kvdb_etcd
-
-package kvdb
-
-import (
- "context"
-
- "github.com/pkt-cash/pktd/lnd/channeldb/kvdb/etcd"
-)
-
-// TestBackend is conditionally set to etcd when the kvdb_etcd build tag is
-// defined, allowing testing our database code with etcd backend.
-const TestBackend = EtcdBackendName
-
-// GetEtcdBackend returns an etcd backend configured according to the
-// passed etcdConfig.
-func GetEtcdBackend(ctx context.Context, prefix string,
- etcdConfig *EtcdConfig) (Backend, er.R) {
-
- // Config translation is needed here in order to keep the
- // etcd package fully independent from the rest of the source tree.
- backendConfig := etcd.BackendConfig{
- Ctx: ctx,
- Host: etcdConfig.Host,
- User: etcdConfig.User,
- Pass: etcdConfig.Pass,
- CertFile: etcdConfig.CertFile,
- KeyFile: etcdConfig.KeyFile,
- InsecureSkipVerify: etcdConfig.InsecureSkipVerify,
- Prefix: prefix,
- CollectCommitStats: etcdConfig.CollectStats,
- }
-
- return Open(EtcdBackendName, backendConfig)
-}
-
-// GetEtcdTestBackend creates an embedded etcd backend for testing
-// storig the database at the passed path.
-func GetEtcdTestBackend(path, name string) (Backend, func(), er.R) {
- empty := func() {}
-
- config, cleanup, err := etcd.NewEmbeddedEtcdInstance(path)
- if err != nil {
- return nil, empty, err
- }
-
- backend, err := Open(EtcdBackendName, *config)
- if err != nil {
- cleanup()
- return nil, empty, err
- }
-
- return backend, cleanup, nil
-}
diff --git a/lnd/channeldb/kvdb/kvdb_no_etcd.go b/lnd/channeldb/kvdb/kvdb_no_etcd.go
deleted file mode 100644
index edba337b..00000000
--- a/lnd/channeldb/kvdb/kvdb_no_etcd.go
+++ /dev/null
@@ -1,30 +0,0 @@
-// +build !kvdb_etcd
-
-package kvdb
-
-import (
- "context"
-
- "github.com/pkt-cash/pktd/btcutil/er"
-)
-
-// TestBackend is conditionally set to bdb when the kvdb_etcd build tag is
-// not defined, allowing testing our database code with bolt backend.
-const TestBackend = BoltBackendName
-
-var errEtcdNotAvailable = er.GenericErrorType.CodeWithDetail(
- "errEtcdNotAvailable",
- "etcd backend not available")
-
-// GetEtcdBackend is a stub returning nil and errEtcdNotAvailable error.
-func GetEtcdBackend(ctx context.Context, prefix string,
- etcdConfig *EtcdConfig) (Backend, er.R) {
-
- return nil, errEtcdNotAvailable.Default()
-}
-
-// GetTestEtcdBackend is a stub returning nil, an empty closure and an
-// errEtcdNotAvailable error.
-func GetEtcdTestBackend(path, name string) (Backend, func(), er.R) {
- return nil, func() {}, errEtcdNotAvailable.Default()
-}
diff --git a/lnd/channeldb/legacy_serialization.go b/lnd/channeldb/legacy_serialization.go
deleted file mode 100644
index 6ff54b32..00000000
--- a/lnd/channeldb/legacy_serialization.go
+++ /dev/null
@@ -1,57 +0,0 @@
-package channeldb
-
-import (
- "io"
-
- "github.com/pkt-cash/pktd/btcutil/er"
-)
-
-// deserializeCloseChannelSummaryV6 reads the v6 database format for
-// ChannelCloseSummary.
-//
-// NOTE: deprecated, only for migration.
-func deserializeCloseChannelSummaryV6(r io.Reader) (*ChannelCloseSummary, er.R) {
- c := &ChannelCloseSummary{}
-
- err := ReadElements(r,
- &c.ChanPoint, &c.ShortChanID, &c.ChainHash, &c.ClosingTXID,
- &c.CloseHeight, &c.RemotePub, &c.Capacity, &c.SettledBalance,
- &c.TimeLockedBalance, &c.CloseType, &c.IsPending,
- )
- if err != nil {
- return nil, err
- }
-
- // We'll now check to see if the channel close summary was encoded with
- // any of the additional optional fields.
- err = ReadElements(r, &c.RemoteCurrentRevocation)
- switch {
- case er.EOF.Is(err):
- return c, nil
-
- // If we got a non-eof error, then we know there's an actually issue.
- // Otherwise, it may have been the case that this summary didn't have
- // the set of optional fields.
- case err != nil:
- return nil, err
- }
-
- if err := readChanConfig(r, &c.LocalChanConfig); err != nil {
- return nil, err
- }
-
- // Finally, we'll attempt to read the next unrevoked commitment point
- // for the remote party. If we closed the channel before receiving a
- // funding locked message, then this can be nil. As a result, we'll use
- // the same technique to read the field, only if there's still data
- // left in the buffer.
- err = ReadElements(r, &c.RemoteNextRevocation)
- if err != nil && !er.EOF.Is(err) {
- // If we got a non-eof error, then we know there's an actually
- // issue. Otherwise, it may have been the case that this
- // summary didn't have the set of optional fields.
- return nil, err
- }
-
- return c, nil
-}
diff --git a/lnd/channeldb/meta.go b/lnd/channeldb/meta.go
deleted file mode 100644
index 78adf908..00000000
--- a/lnd/channeldb/meta.go
+++ /dev/null
@@ -1,83 +0,0 @@
-package channeldb
-
-import (
- "github.com/pkt-cash/pktd/btcutil/er"
- "github.com/pkt-cash/pktd/lnd/channeldb/kvdb"
-)
-
-var (
- // metaBucket stores all the meta information concerning the state of
- // the database.
- metaBucket = []byte("metadata")
-
- // dbVersionKey is a boltdb key and it's used for storing/retrieving
- // current database version.
- dbVersionKey = []byte("dbp")
-)
-
-// Meta structure holds the database meta information.
-type Meta struct {
- // DbVersionNumber is the current schema version of the database.
- DbVersionNumber uint32
-}
-
-// FetchMeta fetches the meta data from boltdb and returns filled meta
-// structure.
-func (d *DB) FetchMeta(tx kvdb.RTx) (*Meta, er.R) {
- var meta *Meta
-
- err := kvdb.View(d, func(tx kvdb.RTx) er.R {
- return fetchMeta(meta, tx)
- }, func() {
- meta = &Meta{}
- })
- if err != nil {
- return nil, err
- }
-
- return meta, nil
-}
-
-// fetchMeta is an internal helper function used in order to allow callers to
-// re-use a database transaction. See the publicly exported FetchMeta method
-// for more information.
-func fetchMeta(meta *Meta, tx kvdb.RTx) er.R {
- metaBucket := tx.ReadBucket(metaBucket)
- if metaBucket == nil {
- return ErrMetaNotFound.Default()
- }
-
- data := metaBucket.Get(dbVersionKey)
- if data == nil {
- meta.DbVersionNumber = getLatestDBVersion(dbVersions)
- } else {
- meta.DbVersionNumber = byteOrder.Uint32(data)
- }
-
- return nil
-}
-
-// PutMeta writes the passed instance of the database met-data struct to disk.
-func (d *DB) PutMeta(meta *Meta) er.R {
- return kvdb.Update(d, func(tx kvdb.RwTx) er.R {
- return putMeta(meta, tx)
- }, func() {})
-}
-
-// putMeta is an internal helper function used in order to allow callers to
-// re-use a database transaction. See the publicly exported PutMeta method for
-// more information.
-func putMeta(meta *Meta, tx kvdb.RwTx) er.R {
- metaBucket, err := tx.CreateTopLevelBucket(metaBucket)
- if err != nil {
- return err
- }
-
- return putDbVersion(metaBucket, meta)
-}
-
-func putDbVersion(metaBucket kvdb.RwBucket, meta *Meta) er.R {
- scratch := make([]byte, 4)
- byteOrder.PutUint32(scratch, meta.DbVersionNumber)
- return metaBucket.Put(dbVersionKey, scratch)
-}
diff --git a/lnd/channeldb/meta_test.go b/lnd/channeldb/meta_test.go
deleted file mode 100644
index 095cb624..00000000
--- a/lnd/channeldb/meta_test.go
+++ /dev/null
@@ -1,508 +0,0 @@
-package channeldb
-
-import (
- "bytes"
- "io/ioutil"
- "os"
- "testing"
-
- "github.com/pkt-cash/pktd/btcutil/er"
- "github.com/pkt-cash/pktd/lnd/channeldb/kvdb"
- "github.com/pkt-cash/pktd/pktlog/log"
-)
-
-// applyMigration is a helper test function that encapsulates the general steps
-// which are needed to properly check the result of applying migration function.
-func applyMigration(t *testing.T, beforeMigration, afterMigration func(d *DB),
- migrationFunc migration, shouldFail bool, dryRun bool) {
-
- cdb, cleanUp, err := MakeTestDB()
- defer cleanUp()
- if err != nil {
- t.Fatal(err)
- }
- cdb.dryRun = dryRun
-
- // Create a test node that will be our source node.
- testNode, err := createTestVertex(cdb)
- if err != nil {
- t.Fatal(err)
- }
- graph := cdb.ChannelGraph()
- if err := graph.SetSourceNode(testNode); err != nil {
- t.Fatal(err)
- }
-
- // beforeMigration usually used for populating the database
- // with test data.
- beforeMigration(cdb)
-
- // Create test meta info with zero database version and put it on disk.
- // Than creating the version list pretending that new version was added.
- meta := &Meta{DbVersionNumber: 0}
- if err := cdb.PutMeta(meta); err != nil {
- t.Fatalf("unable to store meta data: %v", err)
- }
-
- versions := []version{
- {
- number: 0,
- migration: nil,
- },
- {
- number: 1,
- migration: migrationFunc,
- },
- }
-
- defer func() {
- if r := recover(); r != nil {
- if dryRun && !ErrDryRunMigrationOK.Is(err) {
- t.Fatalf("expected dry run migration OK")
- }
- err = er.Errorf("%v", r)
- }
-
- if err == nil && shouldFail {
- t.Fatal("error wasn't received on migration stage")
- } else if err != nil && !shouldFail {
- t.Fatalf("error was received on migration stage: %v", err)
- }
-
- // afterMigration usually used for checking the database state and
- // throwing the error if something went wrong.
- afterMigration(cdb)
- }()
-
- // Sync with the latest version - applying migration function.
- err = cdb.syncVersions(versions)
- if err != nil {
- log.Error(err)
- }
-}
-
-// TestVersionFetchPut checks the propernces of fetch/put methods
-// and also initialization of meta data in case if don't have any in
-// database.
-func TestVersionFetchPut(t *testing.T) {
- t.Parallel()
-
- db, cleanUp, err := MakeTestDB()
- defer cleanUp()
- if err != nil {
- t.Fatal(err)
- }
-
- meta, err := db.FetchMeta(nil)
- if err != nil {
- t.Fatal(err)
- }
-
- if meta.DbVersionNumber != getLatestDBVersion(dbVersions) {
- t.Fatal("initialization of meta information wasn't performed")
- }
-
- newVersion := getLatestDBVersion(dbVersions) + 1
- meta.DbVersionNumber = newVersion
-
- if err := db.PutMeta(meta); err != nil {
- t.Fatalf("update of meta failed %v", err)
- }
-
- meta, err = db.FetchMeta(nil)
- if err != nil {
- t.Fatal(err)
- }
-
- if meta.DbVersionNumber != newVersion {
- t.Fatal("update of meta information wasn't performed")
- }
-}
-
-// TestOrderOfMigrations checks that migrations are applied in proper order.
-func TestOrderOfMigrations(t *testing.T) {
- t.Parallel()
-
- appliedMigration := -1
- versions := []version{
- {0, nil},
- {1, nil},
- {2, func(tx kvdb.RwTx) er.R {
- appliedMigration = 2
- return nil
- }},
- {3, func(tx kvdb.RwTx) er.R {
- appliedMigration = 3
- return nil
- }},
- }
-
- // Retrieve the migration that should be applied to db, as far as
- // current version is 1, we skip zero and first versions.
- migrations, _ := getMigrationsToApply(versions, 1)
-
- if len(migrations) != 2 {
- t.Fatal("incorrect number of migrations to apply")
- }
-
- // Apply first migration.
- migrations[0](nil)
-
- // Check that first migration corresponds to the second version.
- if appliedMigration != 2 {
- t.Fatal("incorrect order of applying migrations")
- }
-
- // Apply second migration.
- migrations[1](nil)
-
- // Check that second migration corresponds to the third version.
- if appliedMigration != 3 {
- t.Fatal("incorrect order of applying migrations")
- }
-}
-
-// TestGlobalVersionList checks that there is no mistake in global version list
-// in terms of version ordering.
-func TestGlobalVersionList(t *testing.T) {
- t.Parallel()
-
- if dbVersions == nil {
- t.Fatal("can't find versions list")
- }
-
- if len(dbVersions) == 0 {
- t.Fatal("db versions list is empty")
- }
-
- prev := dbVersions[0].number
- for i := 1; i < len(dbVersions); i++ {
- version := dbVersions[i].number
-
- if version == prev {
- t.Fatal("duplicates db versions")
- }
- if version < prev {
- t.Fatal("order of db versions is wrong")
- }
-
- prev = version
- }
-}
-
-// TestMigrationWithPanic asserts that if migration logic panics, we will return
-// to the original state unaltered.
-func TestMigrationWithPanic(t *testing.T) {
- t.Parallel()
-
- bucketPrefix := []byte("somebucket")
- keyPrefix := []byte("someprefix")
- beforeMigration := []byte("beforemigration")
- afterMigration := []byte("aftermigration")
-
- beforeMigrationFunc := func(d *DB) {
- // Insert data in database and in order then make sure that the
- // key isn't changes in case of panic or fail.
- err := kvdb.Update(d, func(tx kvdb.RwTx) er.R {
- bucket, err := tx.CreateTopLevelBucket(bucketPrefix)
- if err != nil {
- return err
- }
-
- return bucket.Put(keyPrefix, beforeMigration)
- }, func() {})
- if err != nil {
- t.Fatalf("unable to insert: %v", err)
- }
- }
-
- // Create migration function which changes the initially created data and
- // throw the panic, in this case we pretending that something goes.
- migrationWithPanic := func(tx kvdb.RwTx) er.R {
- bucket, err := tx.CreateTopLevelBucket(bucketPrefix)
- if err != nil {
- return err
- }
-
- bucket.Put(keyPrefix, afterMigration)
- panic("panic!")
- }
-
- // Check that version of database and data wasn't changed.
- afterMigrationFunc := func(d *DB) {
- meta, err := d.FetchMeta(nil)
- if err != nil {
- t.Fatal(err)
- }
-
- if meta.DbVersionNumber != 0 {
- t.Fatal("migration panicked but version is changed")
- }
-
- err = kvdb.Update(d, func(tx kvdb.RwTx) er.R {
- bucket, err := tx.CreateTopLevelBucket(bucketPrefix)
- if err != nil {
- return err
- }
-
- value := bucket.Get(keyPrefix)
- if !bytes.Equal(value, beforeMigration) {
- return er.New("migration failed but data is " +
- "changed")
- }
-
- return nil
- }, func() {})
- if err != nil {
- t.Fatal(err)
- }
- }
-
- applyMigration(t,
- beforeMigrationFunc,
- afterMigrationFunc,
- migrationWithPanic,
- true,
- false)
-}
-
-// TestMigrationWithFatal asserts that migrations which fail do not modify the
-// database.
-func TestMigrationWithFatal(t *testing.T) {
- t.Parallel()
-
- bucketPrefix := []byte("somebucket")
- keyPrefix := []byte("someprefix")
- beforeMigration := []byte("beforemigration")
- afterMigration := []byte("aftermigration")
-
- beforeMigrationFunc := func(d *DB) {
- err := kvdb.Update(d, func(tx kvdb.RwTx) er.R {
- bucket, err := tx.CreateTopLevelBucket(bucketPrefix)
- if err != nil {
- return err
- }
-
- return bucket.Put(keyPrefix, beforeMigration)
- }, func() {})
- if err != nil {
- t.Fatalf("unable to insert pre migration key: %v", err)
- }
- }
-
- // Create migration function which changes the initially created data and
- // return the error, in this case we pretending that something goes
- // wrong.
- migrationWithFatal := func(tx kvdb.RwTx) er.R {
- bucket, err := tx.CreateTopLevelBucket(bucketPrefix)
- if err != nil {
- return err
- }
-
- bucket.Put(keyPrefix, afterMigration)
- return er.New("some error")
- }
-
- // Check that version of database and initial data wasn't changed.
- afterMigrationFunc := func(d *DB) {
- meta, err := d.FetchMeta(nil)
- if err != nil {
- t.Fatal(err)
- }
-
- if meta.DbVersionNumber != 0 {
- t.Fatal("migration failed but version is changed")
- }
-
- err = kvdb.Update(d, func(tx kvdb.RwTx) er.R {
- bucket, err := tx.CreateTopLevelBucket(bucketPrefix)
- if err != nil {
- return err
- }
-
- value := bucket.Get(keyPrefix)
- if !bytes.Equal(value, beforeMigration) {
- return er.New("migration failed but data is " +
- "changed")
- }
-
- return nil
- }, func() {})
- if err != nil {
- t.Fatal(err)
- }
- }
-
- applyMigration(t,
- beforeMigrationFunc,
- afterMigrationFunc,
- migrationWithFatal,
- true,
- false)
-}
-
-// TestMigrationWithoutErrors asserts that a successful migration has its
-// changes applied to the database.
-func TestMigrationWithoutErrors(t *testing.T) {
- t.Parallel()
-
- bucketPrefix := []byte("somebucket")
- keyPrefix := []byte("someprefix")
- beforeMigration := []byte("beforemigration")
- afterMigration := []byte("aftermigration")
-
- // Populate database with initial data.
- beforeMigrationFunc := func(d *DB) {
- err := kvdb.Update(d, func(tx kvdb.RwTx) er.R {
- bucket, err := tx.CreateTopLevelBucket(bucketPrefix)
- if err != nil {
- return err
- }
-
- return bucket.Put(keyPrefix, beforeMigration)
- }, func() {})
- if err != nil {
- t.Fatalf("unable to update db pre migration: %v", err)
- }
- }
-
- // Create migration function which changes the initially created data.
- migrationWithoutErrors := func(tx kvdb.RwTx) er.R {
- bucket, err := tx.CreateTopLevelBucket(bucketPrefix)
- if err != nil {
- return err
- }
-
- bucket.Put(keyPrefix, afterMigration)
- return nil
- }
-
- // Check that version of database and data was properly changed.
- afterMigrationFunc := func(d *DB) {
- meta, err := d.FetchMeta(nil)
- if err != nil {
- t.Fatal(err)
- }
-
- if meta.DbVersionNumber != 1 {
- t.Fatal("version number isn't changed after " +
- "successfully applied migration")
- }
-
- err = kvdb.Update(d, func(tx kvdb.RwTx) er.R {
- bucket, err := tx.CreateTopLevelBucket(bucketPrefix)
- if err != nil {
- return err
- }
-
- value := bucket.Get(keyPrefix)
- if !bytes.Equal(value, afterMigration) {
- return er.New("migration wasn't applied " +
- "properly")
- }
-
- return nil
- }, func() {})
- if err != nil {
- t.Fatal(err)
- }
- }
-
- applyMigration(t,
- beforeMigrationFunc,
- afterMigrationFunc,
- migrationWithoutErrors,
- false,
- false)
-}
-
-// TestMigrationReversion tests after performing a migration to a higher
-// database version, opening the database with a lower latest db version returns
-// ErrDBReversion.
-func TestMigrationReversion(t *testing.T) {
- t.Parallel()
-
- tempDirName, errr := ioutil.TempDir("", "channeldb")
- defer func() {
- os.RemoveAll(tempDirName)
- }()
- if errr != nil {
- t.Fatalf("unable to create temp dir: %v", errr)
- }
-
- backend, cleanup, err := kvdb.GetTestBackend(tempDirName, "cdb")
- if err != nil {
- t.Fatalf("unable to get test db backend: %v", err)
- }
-
- cdb, err := CreateWithBackend(backend)
- if err != nil {
- cleanup()
- t.Fatalf("unable to open channeldb: %v", err)
- }
-
- // Update the database metadata to point to one more than the highest
- // known version.
- err = kvdb.Update(cdb, func(tx kvdb.RwTx) er.R {
- newMeta := &Meta{
- DbVersionNumber: getLatestDBVersion(dbVersions) + 1,
- }
-
- return putMeta(newMeta, tx)
- }, func() {})
-
- // Close the database. Even if we succeeded, our next step is to reopen.
- cdb.Close()
- cleanup()
-
- if err != nil {
- t.Fatalf("unable to increase db version: %v", err)
- }
-
- backend, cleanup, err = kvdb.GetTestBackend(tempDirName, "cdb")
- if err != nil {
- t.Fatalf("unable to get test db backend: %v", err)
- }
- defer cleanup()
-
- _, err = CreateWithBackend(backend)
- if !ErrDBReversion.Is(err) {
- t.Fatalf("unexpected error when opening channeldb, "+
- "want: %v, got: %v", ErrDBReversion, err)
- }
-}
-
-// TestMigrationDryRun ensures that opening the database in dry run migration
-// mode will fail and not commit the migration.
-func TestMigrationDryRun(t *testing.T) {
- t.Parallel()
-
- // Nothing to do, will inspect version number.
- beforeMigrationFunc := func(d *DB) {}
-
- // Check that version of database version is not modified.
- afterMigrationFunc := func(d *DB) {
- err := kvdb.View(d, func(tx kvdb.RTx) er.R {
- meta, err := d.FetchMeta(nil)
- if err != nil {
- t.Fatal(err)
- }
-
- if meta.DbVersionNumber != 0 {
- t.Fatal("dry run migration was not aborted")
- }
-
- return nil
- }, func() {})
- if err != nil {
- t.Fatalf("unable to apply after func: %v", err)
- }
- }
-
- applyMigration(t,
- beforeMigrationFunc,
- afterMigrationFunc,
- func(kvdb.RwTx) er.R { return nil },
- true,
- true)
-}
diff --git a/lnd/channeldb/migration/create_tlb.go b/lnd/channeldb/migration/create_tlb.go
deleted file mode 100644
index aad0a210..00000000
--- a/lnd/channeldb/migration/create_tlb.go
+++ /dev/null
@@ -1,27 +0,0 @@
-package migration
-
-import (
- "github.com/pkt-cash/pktd/btcutil/er"
- "github.com/pkt-cash/pktd/lnd/channeldb/kvdb"
- "github.com/pkt-cash/pktd/pktlog/log"
-)
-
-// CreateTLB creates a new top-level bucket with the passed bucket identifier.
-func CreateTLB(bucket []byte) func(kvdb.RwTx) er.R {
- return func(tx kvdb.RwTx) er.R {
- log.Infof("Creating top-level bucket: \"%s\" ...", bucket)
-
- if tx.ReadBucket(bucket) != nil {
- return er.Errorf("top-level bucket \"%s\" "+
- "already exists", bucket)
- }
-
- _, err := tx.CreateTopLevelBucket(bucket)
- if err != nil {
- return err
- }
-
- log.Infof("Created top-level bucket: \"%s\"", bucket)
- return nil
- }
-}
diff --git a/lnd/channeldb/migration/create_tlb_test.go b/lnd/channeldb/migration/create_tlb_test.go
deleted file mode 100644
index dc32c011..00000000
--- a/lnd/channeldb/migration/create_tlb_test.go
+++ /dev/null
@@ -1,57 +0,0 @@
-package migration_test
-
-import (
- "testing"
-
- "github.com/pkt-cash/pktd/btcutil/er"
- "github.com/pkt-cash/pktd/lnd/channeldb/kvdb"
- "github.com/pkt-cash/pktd/lnd/channeldb/migration"
- "github.com/pkt-cash/pktd/lnd/channeldb/migtest"
-)
-
-// TestCreateTLB asserts that a CreateTLB properly initializes a new top-level
-// bucket, and that it succeeds even if the bucket already exists. It would
-// probably be better if the latter failed, but the kvdb abstraction doesn't
-// support this.
-func TestCreateTLB(t *testing.T) {
- newBucket := []byte("hello")
-
- tests := []struct {
- name string
- beforeMigration func(kvdb.RwTx) er.R
- shouldFail bool
- }{
- {
- name: "already exists",
- beforeMigration: func(tx kvdb.RwTx) er.R {
- _, err := tx.CreateTopLevelBucket(newBucket)
- return err
- },
- shouldFail: true,
- },
- {
- name: "does not exist",
- beforeMigration: func(_ kvdb.RwTx) er.R { return nil },
- shouldFail: false,
- },
- }
-
- for _, test := range tests {
- test := test
- t.Run(test.name, func(t *testing.T) {
- migtest.ApplyMigration(
- t,
- test.beforeMigration,
- func(tx kvdb.RwTx) er.R {
- if tx.ReadBucket(newBucket) != nil {
- return nil
- }
- return er.Errorf("bucket \"%s\" not "+
- "created", newBucket)
- },
- migration.CreateTLB(newBucket),
- test.shouldFail,
- )
- })
- }
-}
diff --git a/lnd/channeldb/migration12/invoices.go b/lnd/channeldb/migration12/invoices.go
deleted file mode 100644
index 3474494e..00000000
--- a/lnd/channeldb/migration12/invoices.go
+++ /dev/null
@@ -1,320 +0,0 @@
-package migration12
-
-import (
- "bytes"
- "encoding/binary"
- "io"
- "time"
-
- "github.com/pkt-cash/pktd/btcutil/er"
- "github.com/pkt-cash/pktd/btcutil/util"
- "github.com/pkt-cash/pktd/lnd/lntypes"
- "github.com/pkt-cash/pktd/lnd/lnwire"
- "github.com/pkt-cash/pktd/lnd/tlv"
- "github.com/pkt-cash/pktd/wire"
-)
-
-const (
- // MaxMemoSize is maximum size of the memo field within invoices stored
- // in the database.
- MaxMemoSize = 1024
-
- // maxReceiptSize is the maximum size of the payment receipt stored
- // within the database along side incoming/outgoing invoices.
- maxReceiptSize = 1024
-
- // MaxPaymentRequestSize is the max size of a payment request for
- // this invoice.
- // TODO(halseth): determine the max length payment request when field
- // lengths are final.
- MaxPaymentRequestSize = 4096
-
- memoType tlv.Type = 0
- payReqType tlv.Type = 1
- createTimeType tlv.Type = 2
- settleTimeType tlv.Type = 3
- addIndexType tlv.Type = 4
- settleIndexType tlv.Type = 5
- preimageType tlv.Type = 6
- valueType tlv.Type = 7
- cltvDeltaType tlv.Type = 8
- expiryType tlv.Type = 9
- paymentAddrType tlv.Type = 10
- featuresType tlv.Type = 11
- invStateType tlv.Type = 12
- amtPaidType tlv.Type = 13
-)
-
-var (
- // invoiceBucket is the name of the bucket within the database that
- // stores all data related to invoices no matter their final state.
- // Within the invoice bucket, each invoice is keyed by its invoice ID
- // which is a monotonically increasing uint32.
- invoiceBucket = []byte("invoices")
-
- // Big endian is the preferred byte order, due to cursor scans over
- // integer keys iterating in order.
- byteOrder = binary.BigEndian
-)
-
-// ContractState describes the state the invoice is in.
-type ContractState uint8
-
-// ContractTerm is a companion struct to the Invoice struct. This struct houses
-// the necessary conditions required before the invoice can be considered fully
-// settled by the payee.
-type ContractTerm struct {
- // PaymentPreimage is the preimage which is to be revealed in the
- // occasion that an HTLC paying to the hash of this preimage is
- // extended.
- PaymentPreimage lntypes.Preimage
-
- // Value is the expected amount of milli-satoshis to be paid to an HTLC
- // which can be satisfied by the above preimage.
- Value lnwire.MilliSatoshi
-
- // State describes the state the invoice is in.
- State ContractState
-
- // PaymentAddr is a randomly generated value include in the MPP record
- // by the sender to prevent probing of the receiver.
- PaymentAddr [32]byte
-
- // Features is the feature vectors advertised on the payment request.
- Features *lnwire.FeatureVector
-}
-
-// Invoice is a payment invoice generated by a payee in order to request
-// payment for some good or service. The inclusion of invoices within Lightning
-// creates a payment work flow for merchants very similar to that of the
-// existing financial system within PayPal, etc. Invoices are added to the
-// database when a payment is requested, then can be settled manually once the
-// payment is received at the upper layer. For record keeping purposes,
-// invoices are never deleted from the database, instead a bit is toggled
-// denoting the invoice has been fully settled. Within the database, all
-// invoices must have a unique payment hash which is generated by taking the
-// sha256 of the payment preimage.
-type Invoice struct {
- // Memo is an optional memo to be stored along side an invoice. The
- // memo may contain further details pertaining to the invoice itself,
- // or any other message which fits within the size constraints.
- Memo []byte
-
- // PaymentRequest is an optional field where a payment request created
- // for this invoice can be stored.
- PaymentRequest []byte
-
- // FinalCltvDelta is the minimum required number of blocks before htlc
- // expiry when the invoice is accepted.
- FinalCltvDelta int32
-
- // Expiry defines how long after creation this invoice should expire.
- Expiry time.Duration
-
- // CreationDate is the exact time the invoice was created.
- CreationDate time.Time
-
- // SettleDate is the exact time the invoice was settled.
- SettleDate time.Time
-
- // Terms are the contractual payment terms of the invoice. Once all the
- // terms have been satisfied by the payer, then the invoice can be
- // considered fully fulfilled.
- //
- // TODO(roasbeef): later allow for multiple terms to fulfill the final
- // invoice: payment fragmentation, etc.
- Terms ContractTerm
-
- // AddIndex is an auto-incrementing integer that acts as a
- // monotonically increasing sequence number for all invoices created.
- // Clients can then use this field as a "checkpoint" of sorts when
- // implementing a streaming RPC to notify consumers of instances where
- // an invoice has been added before they re-connected.
- //
- // NOTE: This index starts at 1.
- AddIndex uint64
-
- // SettleIndex is an auto-incrementing integer that acts as a
- // monotonically increasing sequence number for all settled invoices.
- // Clients can then use this field as a "checkpoint" of sorts when
- // implementing a streaming RPC to notify consumers of instances where
- // an invoice has been settled before they re-connected.
- //
- // NOTE: This index starts at 1.
- SettleIndex uint64
-
- // AmtPaid is the final amount that we ultimately accepted for pay for
- // this invoice. We specify this value independently as it's possible
- // that the invoice originally didn't specify an amount, or the sender
- // overpaid.
- AmtPaid lnwire.MilliSatoshi
-
- // Htlcs records all htlcs that paid to this invoice. Some of these
- // htlcs may have been marked as canceled.
- Htlcs []byte
-}
-
-// LegacyDeserializeInvoice decodes an invoice from the passed io.Reader using
-// the pre-TLV serialization.
-func LegacyDeserializeInvoice(r io.Reader) (Invoice, er.R) {
- var err er.R
- invoice := Invoice{}
-
- // TODO(roasbeef): use read full everywhere
- invoice.Memo, err = wire.ReadVarBytes(r, 0, MaxMemoSize, "")
- if err != nil {
- return invoice, err
- }
- _, err = wire.ReadVarBytes(r, 0, maxReceiptSize, "")
- if err != nil {
- return invoice, err
- }
-
- invoice.PaymentRequest, err = wire.ReadVarBytes(r, 0, MaxPaymentRequestSize, "")
- if err != nil {
- return invoice, err
- }
-
- if err := util.ReadBin(r, byteOrder, &invoice.FinalCltvDelta); err != nil {
- return invoice, err
- }
-
- var expiry int64
- if err := util.ReadBin(r, byteOrder, &expiry); err != nil {
- return invoice, err
- }
- invoice.Expiry = time.Duration(expiry)
-
- birthBytes, err := wire.ReadVarBytes(r, 0, 300, "birth")
- if err != nil {
- return invoice, err
- }
- if err := invoice.CreationDate.UnmarshalBinary(birthBytes); err != nil {
- return invoice, er.E(err)
- }
-
- settledBytes, err := wire.ReadVarBytes(r, 0, 300, "settled")
- if err != nil {
- return invoice, err
- }
- if err := invoice.SettleDate.UnmarshalBinary(settledBytes); err != nil {
- return invoice, er.E(err)
- }
-
- if _, err := util.ReadFull(r, invoice.Terms.PaymentPreimage[:]); err != nil {
- return invoice, err
- }
- var scratch [8]byte
- if _, err := util.ReadFull(r, scratch[:]); err != nil {
- return invoice, err
- }
- invoice.Terms.Value = lnwire.MilliSatoshi(byteOrder.Uint64(scratch[:]))
-
- if err := util.ReadBin(r, byteOrder, &invoice.Terms.State); err != nil {
- return invoice, err
- }
-
- if err := util.ReadBin(r, byteOrder, &invoice.AddIndex); err != nil {
- return invoice, err
- }
- if err := util.ReadBin(r, byteOrder, &invoice.SettleIndex); err != nil {
- return invoice, err
- }
- if err := util.ReadBin(r, byteOrder, &invoice.AmtPaid); err != nil {
- return invoice, err
- }
-
- invoice.Htlcs, err = deserializeHtlcs(r)
- if err != nil {
- return Invoice{}, err
- }
-
- return invoice, nil
-}
-
-// deserializeHtlcs reads a list of invoice htlcs from a reader and returns it
-// as a flattened byte slice.
-func deserializeHtlcs(r io.Reader) ([]byte, er.R) {
- var b bytes.Buffer
- _, err := io.Copy(&b, r)
- return b.Bytes(), er.E(err)
-}
-
-// SerializeInvoice serializes an invoice to a writer.
-//
-// nolint: dupl
-func SerializeInvoice(w io.Writer, i *Invoice) er.R {
- creationDateBytes, errr := i.CreationDate.MarshalBinary()
- if errr != nil {
- return er.E(errr)
- }
-
- settleDateBytes, errr := i.SettleDate.MarshalBinary()
- if errr != nil {
- return er.E(errr)
- }
-
- var fb bytes.Buffer
- err := i.Terms.Features.EncodeBase256(&fb)
- if err != nil {
- return err
- }
- featureBytes := fb.Bytes()
-
- preimage := [32]byte(i.Terms.PaymentPreimage)
- value := uint64(i.Terms.Value)
- cltvDelta := uint32(i.FinalCltvDelta)
- expiry := uint64(i.Expiry)
-
- amtPaid := uint64(i.AmtPaid)
- state := uint8(i.Terms.State)
-
- tlvStream, err := tlv.NewStream(
- // Memo and payreq.
- tlv.MakePrimitiveRecord(memoType, &i.Memo),
- tlv.MakePrimitiveRecord(payReqType, &i.PaymentRequest),
-
- // Add/settle metadata.
- tlv.MakePrimitiveRecord(createTimeType, &creationDateBytes),
- tlv.MakePrimitiveRecord(settleTimeType, &settleDateBytes),
- tlv.MakePrimitiveRecord(addIndexType, &i.AddIndex),
- tlv.MakePrimitiveRecord(settleIndexType, &i.SettleIndex),
-
- // Terms.
- tlv.MakePrimitiveRecord(preimageType, &preimage),
- tlv.MakePrimitiveRecord(valueType, &value),
- tlv.MakePrimitiveRecord(cltvDeltaType, &cltvDelta),
- tlv.MakePrimitiveRecord(expiryType, &expiry),
- tlv.MakePrimitiveRecord(paymentAddrType, &i.Terms.PaymentAddr),
- tlv.MakePrimitiveRecord(featuresType, &featureBytes),
-
- // Invoice state.
- tlv.MakePrimitiveRecord(invStateType, &state),
- tlv.MakePrimitiveRecord(amtPaidType, &amtPaid),
- )
- if err != nil {
- return err
- }
-
- var b bytes.Buffer
- if err = tlvStream.Encode(&b); err != nil {
- return err
- }
-
- err = util.WriteBin(w, byteOrder, uint64(b.Len()))
- if err != nil {
- return err
- }
-
- if _, err = util.Write(w, b.Bytes()); err != nil {
- return err
- }
-
- return serializeHtlcs(w, i.Htlcs)
-}
-
-// serializeHtlcs writes a serialized list of invoice htlcs into a writer.
-func serializeHtlcs(w io.Writer, htlcs []byte) er.R {
- _, err := util.Write(w, htlcs)
- return err
-}
diff --git a/lnd/channeldb/migration12/migration.go b/lnd/channeldb/migration12/migration.go
deleted file mode 100644
index 28d9fec3..00000000
--- a/lnd/channeldb/migration12/migration.go
+++ /dev/null
@@ -1,76 +0,0 @@
-package migration12
-
-import (
- "bytes"
-
- "github.com/pkt-cash/pktd/btcutil/er"
- "github.com/pkt-cash/pktd/lnd/channeldb/kvdb"
- "github.com/pkt-cash/pktd/lnd/lnwire"
- "github.com/pkt-cash/pktd/pktlog/log"
-)
-
-var emptyFeatures = lnwire.NewFeatureVector(nil, nil)
-
-// MigrateInvoiceTLV migrates all existing invoice bodies over to be serialized
-// in a single TLV stream. In the process, we drop the Receipt field and add
-// PaymentAddr and Features to the invoice Terms.
-func MigrateInvoiceTLV(tx kvdb.RwTx) er.R {
- log.Infof("Migrating invoice bodies to TLV, " +
- "adding payment addresses and feature vectors.")
-
- invoiceB := tx.ReadWriteBucket(invoiceBucket)
- if invoiceB == nil {
- return nil
- }
-
- type keyedInvoice struct {
- key []byte
- invoice Invoice
- }
-
- // Read in all existing invoices using the old format.
- var invoices []keyedInvoice
- err := invoiceB.ForEach(func(k, v []byte) er.R {
- if v == nil {
- return nil
- }
-
- invoiceReader := bytes.NewReader(v)
- invoice, err := LegacyDeserializeInvoice(invoiceReader)
- if err != nil {
- return err
- }
-
- // Insert an empty feature vector on all old payments.
- invoice.Terms.Features = emptyFeatures
-
- invoices = append(invoices, keyedInvoice{
- key: k,
- invoice: invoice,
- })
-
- return nil
- })
- if err != nil {
- return err
- }
-
- // Write out each one under its original key using TLV.
- for _, ki := range invoices {
- var b bytes.Buffer
- errr := SerializeInvoice(&b, &ki.invoice)
- if errr != nil {
- return errr
- }
-
- err = invoiceB.Put(ki.key, b.Bytes())
- if err != nil {
- return err
- }
- }
-
- log.Infof("Migration to TLV invoice bodies, " +
- "payment address, and features complete!")
-
- return nil
-}
diff --git a/lnd/channeldb/migration12/migration_test.go b/lnd/channeldb/migration12/migration_test.go
deleted file mode 100644
index 6e9a20d7..00000000
--- a/lnd/channeldb/migration12/migration_test.go
+++ /dev/null
@@ -1,206 +0,0 @@
-package migration12_test
-
-import (
- "bytes"
- "testing"
-
- "github.com/pkt-cash/pktd/btcutil/er"
- "github.com/pkt-cash/pktd/lnd/channeldb/kvdb"
- "github.com/pkt-cash/pktd/lnd/channeldb/migration12"
- "github.com/pkt-cash/pktd/lnd/channeldb/migtest"
- "github.com/pkt-cash/pktd/lnd/lntypes"
-)
-
-var (
- // invoiceBucket is the name of the bucket within the database that
- // stores all data related to invoices no matter their final state.
- // Within the invoice bucket, each invoice is keyed by its invoice ID
- // which is a monotonically increasing uint32.
- invoiceBucket = []byte("invoices")
-
- preimage = lntypes.Preimage{
- 0x42, 0x42, 0x42, 0x42, 0x42, 0x42, 0x42, 0x42,
- 0x42, 0x42, 0x42, 0x42, 0x42, 0x42, 0x42, 0x42,
- 0x42, 0x42, 0x42, 0x42, 0x42, 0x42, 0x42, 0x42,
- 0x42, 0x42, 0x42, 0x42, 0x42, 0x42, 0x42, 0x42,
- }
-
- hash = preimage.Hash()
-
- beforeInvoice0Htlcs = []byte{
- 0x0b, 0x68, 0x65, 0x6c, 0x6c, 0x6f, 0x20, 0x77, 0x6f, 0x72,
- 0x6c, 0x64, 0x09, 0x62, 0x79, 0x65, 0x20, 0x77, 0x6f, 0x72,
- 0x6c, 0x64, 0x06, 0x70, 0x61, 0x79, 0x72, 0x65, 0x71, 0x00,
- 0x00, 0x00, 0x20, 0x00, 0x00, 0x4e, 0x94, 0x91, 0x4f, 0x00,
- 0x00, 0x0f, 0x01, 0x00, 0x00, 0x00, 0x0e, 0x77, 0xc4, 0xd3,
- 0xd5, 0x00, 0x00, 0x00, 0x00, 0xfe, 0x20, 0x0f, 0x01, 0x00,
- 0x00, 0x00, 0x0e, 0x77, 0xd5, 0xc8, 0x1c, 0x00, 0x00, 0x00,
- 0x00, 0xfe, 0x20, 0x42, 0x42, 0x42, 0x42, 0x42, 0x42, 0x42,
- 0x42, 0x42, 0x42, 0x42, 0x42, 0x42, 0x42, 0x42, 0x42, 0x42,
- 0x42, 0x42, 0x42, 0x42, 0x42, 0x42, 0x42, 0x42, 0x42, 0x42,
- 0x42, 0x42, 0x42, 0x42, 0x42, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x03, 0xe8, 0x03, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x05, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x06,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0xa4,
- }
-
- afterInvoice0Htlcs = []byte{
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xb8, 0x00, 0x0b,
- 0x68, 0x65, 0x6c, 0x6c, 0x6f, 0x20, 0x77, 0x6f, 0x72, 0x6c,
- 0x64, 0x01, 0x06, 0x70, 0x61, 0x79, 0x72, 0x65, 0x71, 0x02,
- 0x0f, 0x01, 0x00, 0x00, 0x00, 0x0e, 0x77, 0xc4, 0xd3, 0xd5,
- 0x00, 0x00, 0x00, 0x00, 0xfe, 0x20, 0x03, 0x0f, 0x01, 0x00,
- 0x00, 0x00, 0x0e, 0x77, 0xd5, 0xc8, 0x1c, 0x00, 0x00, 0x00,
- 0x00, 0xfe, 0x20, 0x04, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x05, 0x05, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x06, 0x06, 0x20, 0x42, 0x42, 0x42, 0x42, 0x42,
- 0x42, 0x42, 0x42, 0x42, 0x42, 0x42, 0x42, 0x42, 0x42, 0x42,
- 0x42, 0x42, 0x42, 0x42, 0x42, 0x42, 0x42, 0x42, 0x42, 0x42,
- 0x42, 0x42, 0x42, 0x42, 0x42, 0x42, 0x42, 0x07, 0x08, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, 0xe8, 0x08, 0x04, 0x00,
- 0x00, 0x00, 0x20, 0x09, 0x08, 0x00, 0x00, 0x4e, 0x94, 0x91,
- 0x4f, 0x00, 0x00, 0x0a, 0x20, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0b, 0x00, 0x0c,
- 0x01, 0x03, 0x0d, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x01, 0xa4,
- }
-
- testHtlc = []byte{
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x41,
- 0x01, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08,
- 0x03, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x09,
- 0x05, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x64,
- 0x07, 0x04, 0x00, 0x00, 0x00, 0x58, 0x09, 0x08, 0x00, 0x13,
- 0xbc, 0xbf, 0x72, 0x4e, 0x1e, 0x00, 0x0b, 0x08, 0x00, 0x17,
- 0xaf, 0x4c, 0x22, 0xc4, 0x24, 0x00, 0x0d, 0x04, 0x00, 0x00,
- 0x23, 0x1d, 0x0f, 0x01, 0x02,
- }
-
- beforeInvoice1Htlc = append([]byte{
- 0x0b, 0x68, 0x65, 0x6c, 0x6c, 0x6f, 0x20, 0x77, 0x6f, 0x72,
- 0x6c, 0x64, 0x09, 0x62, 0x79, 0x65, 0x20, 0x77, 0x6f, 0x72,
- 0x6c, 0x64, 0x06, 0x70, 0x61, 0x79, 0x72, 0x65, 0x71, 0x00,
- 0x00, 0x00, 0x20, 0x00, 0x00, 0x4e, 0x94, 0x91, 0x4f, 0x00,
- 0x00, 0x0f, 0x01, 0x00, 0x00, 0x00, 0x0e, 0x77, 0xc4, 0xd3,
- 0xd5, 0x00, 0x00, 0x00, 0x00, 0xfe, 0x20, 0x0f, 0x01, 0x00,
- 0x00, 0x00, 0x0e, 0x77, 0xd5, 0xc8, 0x1c, 0x00, 0x00, 0x00,
- 0x00, 0xfe, 0x20, 0x42, 0x42, 0x42, 0x42, 0x42, 0x42, 0x42,
- 0x42, 0x42, 0x42, 0x42, 0x42, 0x42, 0x42, 0x42, 0x42, 0x42,
- 0x42, 0x42, 0x42, 0x42, 0x42, 0x42, 0x42, 0x42, 0x42, 0x42,
- 0x42, 0x42, 0x42, 0x42, 0x42, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x03, 0xe8, 0x03, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x05, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x06,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0xa4,
- }, testHtlc...)
-
- afterInvoice1Htlc = append([]byte{
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xb8, 0x00, 0x0b,
- 0x68, 0x65, 0x6c, 0x6c, 0x6f, 0x20, 0x77, 0x6f, 0x72, 0x6c,
- 0x64, 0x01, 0x06, 0x70, 0x61, 0x79, 0x72, 0x65, 0x71, 0x02,
- 0x0f, 0x01, 0x00, 0x00, 0x00, 0x0e, 0x77, 0xc4, 0xd3, 0xd5,
- 0x00, 0x00, 0x00, 0x00, 0xfe, 0x20, 0x03, 0x0f, 0x01, 0x00,
- 0x00, 0x00, 0x0e, 0x77, 0xd5, 0xc8, 0x1c, 0x00, 0x00, 0x00,
- 0x00, 0xfe, 0x20, 0x04, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x05, 0x05, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x06, 0x06, 0x20, 0x42, 0x42, 0x42, 0x42, 0x42,
- 0x42, 0x42, 0x42, 0x42, 0x42, 0x42, 0x42, 0x42, 0x42, 0x42,
- 0x42, 0x42, 0x42, 0x42, 0x42, 0x42, 0x42, 0x42, 0x42, 0x42,
- 0x42, 0x42, 0x42, 0x42, 0x42, 0x42, 0x42, 0x07, 0x08, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, 0xe8, 0x08, 0x04, 0x00,
- 0x00, 0x00, 0x20, 0x09, 0x08, 0x00, 0x00, 0x4e, 0x94, 0x91,
- 0x4f, 0x00, 0x00, 0x0a, 0x20, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0b, 0x00, 0x0c,
- 0x01, 0x03, 0x0d, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x01, 0xa4,
- }, testHtlc...)
-)
-
-type migrationTest struct {
- name string
- beforeMigration func(kvdb.RwTx) er.R
- afterMigration func(kvdb.RwTx) er.R
-}
-
-var migrationTests = []migrationTest{
- {
- name: "no invoices",
- beforeMigration: func(kvdb.RwTx) er.R { return nil },
- afterMigration: func(kvdb.RwTx) er.R { return nil },
- },
- {
- name: "zero htlcs",
- beforeMigration: genBeforeMigration(beforeInvoice0Htlcs),
- afterMigration: genAfterMigration(afterInvoice0Htlcs),
- },
- {
- name: "one htlc",
- beforeMigration: genBeforeMigration(beforeInvoice1Htlc),
- afterMigration: genAfterMigration(afterInvoice1Htlc),
- },
-}
-
-// genBeforeMigration creates a closure that inserts an invoice serialized under
-// the old format under the test payment hash.
-func genBeforeMigration(beforeBytes []byte) func(kvdb.RwTx) er.R {
- return func(tx kvdb.RwTx) er.R {
- invoices, err := tx.CreateTopLevelBucket(
- invoiceBucket,
- )
- if err != nil {
- return err
- }
-
- return invoices.Put(hash[:], beforeBytes)
- }
-}
-
-// genAfterMigration creates a closure that verifies the tlv invoice migration
-// succeeded, but comparing the resulting encoding of the invoice to the
-// expected serialization. In addition, the decoded invoice is compared against
-// the expected invoice for equality.
-func genAfterMigration(afterBytes []byte) func(kvdb.RwTx) er.R {
- return func(tx kvdb.RwTx) er.R {
- invoices := tx.ReadWriteBucket(invoiceBucket)
- if invoices == nil {
- return er.Errorf("invoice bucket not found")
- }
-
- // Fetch the new invoice bytes and check that they match our
- // expected serialization.
- invoiceBytes := invoices.Get(hash[:])
- if !bytes.Equal(invoiceBytes, afterBytes) {
- return er.Errorf("invoice bytes mismatch, "+
- "want: %x, got: %x",
- invoiceBytes, afterBytes)
- }
-
- return nil
- }
-}
-
-// TestTLVInvoiceMigration executes a suite of migration tests for moving
-// invoices to use TLV for their bodies. In the process, feature bits and
-// payment addresses are added to the invoice while the receipt field is
-// dropped. We test a few different invoices with a varying number of HTLCs, as
-// well as the case where there are no invoices present.
-//
-// NOTE: The test vectors each include a receipt that is not present on the
-// final struct, but verifies that the field is properly removed.
-func TestTLVInvoiceMigration(t *testing.T) {
- for _, test := range migrationTests {
- test := test
- t.Run(test.name, func(t *testing.T) {
- migtest.ApplyMigration(
- t,
- test.beforeMigration,
- test.afterMigration,
- migration12.MigrateInvoiceTLV,
- false,
- )
- })
- }
-}
diff --git a/lnd/channeldb/migration13/migration.go b/lnd/channeldb/migration13/migration.go
deleted file mode 100644
index 916d95bb..00000000
--- a/lnd/channeldb/migration13/migration.go
+++ /dev/null
@@ -1,203 +0,0 @@
-package migration13
-
-import (
- "encoding/binary"
-
- "github.com/pkt-cash/pktd/btcutil/er"
- "github.com/pkt-cash/pktd/lnd/channeldb/kvdb"
- "github.com/pkt-cash/pktd/pktlog/log"
-)
-
-var (
- paymentsRootBucket = []byte("payments-root-bucket")
-
- // paymentCreationInfoKey is a key used in the payment's sub-bucket to
- // store the creation info of the payment.
- paymentCreationInfoKey = []byte("payment-creation-info")
-
- // paymentFailInfoKey is a key used in the payment's sub-bucket to
- // store information about the reason a payment failed.
- paymentFailInfoKey = []byte("payment-fail-info")
-
- // paymentAttemptInfoKey is a key used in the payment's sub-bucket to
- // store the info about the latest attempt that was done for the
- // payment in question.
- paymentAttemptInfoKey = []byte("payment-attempt-info")
-
- // paymentSettleInfoKey is a key used in the payment's sub-bucket to
- // store the settle info of the payment.
- paymentSettleInfoKey = []byte("payment-settle-info")
-
- // paymentHtlcsBucket is a bucket where we'll store the information
- // about the HTLCs that were attempted for a payment.
- paymentHtlcsBucket = []byte("payment-htlcs-bucket")
-
- // htlcAttemptInfoKey is a key used in a HTLC's sub-bucket to store the
- // info about the attempt that was done for the HTLC in question.
- htlcAttemptInfoKey = []byte("htlc-attempt-info")
-
- // htlcSettleInfoKey is a key used in a HTLC's sub-bucket to store the
- // settle info, if any.
- htlcSettleInfoKey = []byte("htlc-settle-info")
-
- // htlcFailInfoKey is a key used in a HTLC's sub-bucket to store
- // failure information, if any.
- htlcFailInfoKey = []byte("htlc-fail-info")
-
- byteOrder = binary.BigEndian
-)
-
-// MigrateMPP migrates the payments to a new structure that accommodates for mpp
-// payments.
-func MigrateMPP(tx kvdb.RwTx) er.R {
- log.Infof("Migrating payments to mpp structure")
-
- // Iterate over all payments and store their indexing keys. This is
- // needed, because no modifications are allowed inside a Bucket.ForEach
- // loop.
- paymentsBucket := tx.ReadWriteBucket(paymentsRootBucket)
- if paymentsBucket == nil {
- return nil
- }
-
- var paymentKeys [][]byte
- err := paymentsBucket.ForEach(func(k, v []byte) er.R {
- paymentKeys = append(paymentKeys, k)
- return nil
- })
- if err != nil {
- return err
- }
-
- // With all keys retrieved, start the migration.
- for _, k := range paymentKeys {
- bucket := paymentsBucket.NestedReadWriteBucket(k)
-
- // We only expect sub-buckets to be found in
- // this top-level bucket.
- if bucket == nil {
- return er.Errorf("non bucket element in " +
- "payments bucket")
- }
-
- // Fetch old format creation info.
- creationInfo := bucket.Get(paymentCreationInfoKey)
- if creationInfo == nil {
- return er.Errorf("creation info not found")
- }
-
- // Make a copy because bbolt doesn't allow this value to be
- // changed in-place.
- newCreationInfo := make([]byte, len(creationInfo))
- copy(newCreationInfo, creationInfo)
-
- // Convert to nano seconds.
- timeBytes := newCreationInfo[32+8 : 32+8+8]
- time := byteOrder.Uint64(timeBytes)
- timeNs := time * 1000000000
- byteOrder.PutUint64(timeBytes, timeNs)
-
- // Write back new format creation info.
- err := bucket.Put(paymentCreationInfoKey, newCreationInfo)
- if err != nil {
- return err
- }
-
- // No migration needed if there is no attempt stored.
- attemptInfo := bucket.Get(paymentAttemptInfoKey)
- if attemptInfo == nil {
- continue
- }
-
- // Delete attempt info on the payment level.
- if err := bucket.Delete(paymentAttemptInfoKey); err != nil {
- return err
- }
-
- // Save attempt id for later use.
- attemptID := attemptInfo[:8]
-
- // Discard attempt id. It will become a bucket key in the new
- // structure.
- attemptInfo = attemptInfo[8:]
-
- // Append unknown (zero) attempt time.
- var zero [8]byte
- attemptInfo = append(attemptInfo, zero[:]...)
-
- // Create bucket that contains all htlcs.
- htlcsBucket, err := bucket.CreateBucket(paymentHtlcsBucket)
- if err != nil {
- return err
- }
-
- // Create an htlc for this attempt.
- htlcBucket, err := htlcsBucket.CreateBucket(attemptID)
- if err != nil {
- return err
- }
-
- // Save migrated attempt info.
- err = htlcBucket.Put(htlcAttemptInfoKey, attemptInfo)
- if err != nil {
- return err
- }
-
- // Migrate settle info.
- settleInfo := bucket.Get(paymentSettleInfoKey)
- if settleInfo != nil {
- // Payment-level settle info can be deleted.
- err := bucket.Delete(paymentSettleInfoKey)
- if err != nil {
- return err
- }
-
- // Append unknown (zero) settle time.
- settleInfo = append(settleInfo, zero[:]...)
-
- // Save settle info.
- err = htlcBucket.Put(htlcSettleInfoKey, settleInfo)
- if err != nil {
- return err
- }
-
- // Migration for settled htlc completed.
- continue
- }
-
- // If there is no payment-level failure reason, the payment is
- // still in flight and nothing else needs to be migrated.
- // Otherwise the payment-level failure reason can remain
- // unchanged.
- inFlight := bucket.Get(paymentFailInfoKey) == nil
- if inFlight {
- continue
- }
-
- // The htlc failed. Add htlc fail info with reason unknown. We
- // don't have access to the original failure reason anymore.
- failInfo := []byte{
- // Fail time unknown.
- 0, 0, 0, 0, 0, 0, 0, 0,
-
- // Zero length wire message.
- 0,
-
- // Failure reason unknown.
- 0,
-
- // Failure source index zero.
- 0, 0, 0, 0,
- }
-
- // Save fail info.
- err = htlcBucket.Put(htlcFailInfoKey, failInfo)
- if err != nil {
- return err
- }
- }
-
- log.Infof("Migration of payments to mpp structure complete!")
-
- return nil
-}
diff --git a/lnd/channeldb/migration13/migration_test.go b/lnd/channeldb/migration13/migration_test.go
deleted file mode 100644
index 101c008c..00000000
--- a/lnd/channeldb/migration13/migration_test.go
+++ /dev/null
@@ -1,124 +0,0 @@
-package migration13
-
-import (
- "testing"
-
- "github.com/pkt-cash/pktd/btcutil/er"
- "github.com/pkt-cash/pktd/lnd/channeldb/kvdb"
- "github.com/pkt-cash/pktd/lnd/channeldb/migtest"
-)
-
-var (
- hex = migtest.Hex
-
- zeroTime = hex("0000000000000000")
- noFailureMessage = hex("00")
- failureReasonUnknown = hex("00")
- zeroFailureSourceIdx = hex("00000000")
-
- hash1 = hex("02acee76ebd53d00824410cf6adecad4f50334dac702bd5a2d3ba01b91709f0e")
- creationInfoAmt1 = hex("00000000004c4b40")
- creationInfoTime1 = hex("000000005e4fb7ab") // 1582282667 (decimal)
- creationInfoTimeNano1 = hex("15f565b3cccaee00") // 1582282667000000000 (decimal)
- creationInfoPayReq1 = hex("00000000")
- attemptInfo1 = hex("2997a72e129fc9d638ef2fa4e233567d808d4f18a4f087637582427962eb3bf800005ce600000000004c4b402102ec12e83eafe27ce6d03bbe0c0de4b79fe2b9934615c8aa7693f73d2e41b089700000000121028c2dd128c7a6c1a0fceb3e3eb5ed55e0a0ae1a939eb786b097322d830d47db75005ca4000001000000005ce600000000004c4b400000000000")
- attemptID1 = hex("0000000000000001")
- paymentID1 = hex("0000000000000001")
-
- hash2 = hex("62eb3f0a48f954e495d0c14ac63df04a67cefa59dafdbcd3d5046d1f5647840c")
- preimage2 = hex("479593b7d3cbb45beb22d448451a2f3619b2095adfb38f4d92e9886e96534368")
- attemptID2 = hex("00000000000003e8")
- paymentID2 = hex("0000000000000002")
- attemptInfo2 = hex("8de663f9bb4b8d1ebdb496d22dc1cb657a346215607308549f41b01e2adf2ce900005ce600000000005b8d802102ec12e83eafe27ce6d03bbe0c0de4b79fe2b9934615c8aa7693f73d2e41b089700000000121028c2dd128c7a6c1a0fceb3e3eb5ed55e0a0ae1a939eb786b097322d830d47db75005ca4000001000000005ce600000000005b8d8000000000010000000000000008233d281e2cbe01f0b82dd6750967c9233426b98ae6549c696365f57f86f942a3795b8d80")
- creationInfoAmt2 = hex("00000000005b8d80")
- creationInfoTime2 = hex("000000005e4fb97f") // 1582283135 (decimal)
- creationInfoTimeNano2 = hex("15F56620C3C43600") // 1582283135000000000 (decimal)
- creationInfoPayReq2 = hex("000000fc6c6e62637274363075317030796c7774367070357674346e377a6a676c39327766397773633939767630307366666e7561376a656d74376d6535373471336b3337346a387373787164717163717a70677370353835357075743937713863747374776b7735796b306a667278736e746e7a6878326a77786a636d3937346c636437327a3564757339717939717371653872336b3578733379367868667366366d6a6e706d717172306661797a677a63336a6b663571787a6c376866787a6666763578667a7679647564327275767974706571787072376868796830726a747574373033333274737774686661616e303773766b6667716b7174667275")
-
- hash3 = hex("62eb3f0a48f954e495d0c14ac63df04a67cefa59dafdbcd3d5046d1f5647840d")
- attemptInfo3 = hex("53ce0a4c1507cc5ea00ec88b76bd43a3978ac13605497030b821af6ce9c110f300005ce600000000006acfc02102ec12e83eafe27ce6d03bbe0c0de4b79fe2b9934615c8aa7693f73d2e41b089700000000121028c2dd128c7a6c1a0fceb3e3eb5ed55e0a0ae1a939eb786b097322d830d47db75005ca4000001000000005ce600000000006acfc000000000010000000000000008233044f235354472318b381fad3e21eb5a58f5099918868b0610e7b7bcb7a4adc96acfc0")
- attemptID3 = hex("00000000000003e9")
- paymentID3 = hex("0000000000000003")
- creationInfoAmt3 = hex("00000000006acfc0")
- creationInfoTime3 = hex("000000005e4fb98d") // 1582283149
- creationInfoTimeNano3 = hex("15F56624063B4200") // 1582283149000000000 (decimal)
- creationInfoPayReq3 = hex("000000fc6c6e62637274373075317030796c7776327070357674346e377a6a676c39327766397773633939767630307366666e7561376a656d74376d6535373471336b3337346a387373787364717163717a706773703578707a307964663467336572727a656372376b6e7567307474667630327a7665727a72676b70737375376d6d6564617934687973397179397173717774656479336e666c323534787a36787a75763974746767757a647473356e617a7461616a6735667772686438396b336d70753971726d7a6c3779637a306e30666e6e763077753032726632706e64636c393761646c667636376a7a6e7063677477356434366771323571326e32")
-
- // pre is the data in the payments root bucket in database version 12 format.
- pre = map[string]interface{}{
- // A failed payment.
- hash1: map[string]interface{}{
- "payment-attempt-info": attemptID1 + attemptInfo1,
- "payment-creation-info": hash1 + creationInfoAmt1 + creationInfoTime1 + creationInfoPayReq1,
- "payment-fail-info": hex("03"),
- "payment-sequence-key": paymentID1,
- },
-
- // A settled payment.
- hash2: map[string]interface{}{
- "payment-attempt-info": attemptID2 + attemptInfo2,
- "payment-creation-info": hash2 + creationInfoAmt2 + creationInfoTime2 + creationInfoPayReq2,
- "payment-sequence-key": paymentID2,
- "payment-settle-info": preimage2,
- },
-
- // An in-flight payment.
- hash3: map[string]interface{}{
- "payment-attempt-info": attemptID3 + attemptInfo3,
- "payment-creation-info": hash3 + creationInfoAmt3 + creationInfoTime3 + creationInfoPayReq3,
- "payment-sequence-key": paymentID3,
- },
- }
-
- // post is the expected data after migration.
- post = map[string]interface{}{
- hash1: map[string]interface{}{
- "payment-creation-info": hash1 + creationInfoAmt1 + creationInfoTimeNano1 + creationInfoPayReq1,
- "payment-fail-info": hex("03"),
- "payment-htlcs-bucket": map[string]interface{}{
- attemptID1: map[string]interface{}{
- "htlc-attempt-info": attemptInfo1 + zeroTime,
- "htlc-fail-info": zeroTime + noFailureMessage + failureReasonUnknown + zeroFailureSourceIdx,
- },
- },
- "payment-sequence-key": paymentID1,
- },
- hash2: map[string]interface{}{
- "payment-creation-info": hash2 + creationInfoAmt2 + creationInfoTimeNano2 + creationInfoPayReq2,
- "payment-htlcs-bucket": map[string]interface{}{
- attemptID2: map[string]interface{}{
- "htlc-attempt-info": attemptInfo2 + zeroTime,
- "htlc-settle-info": preimage2 + zeroTime,
- },
- },
- "payment-sequence-key": paymentID2,
- },
- hash3: map[string]interface{}{
- "payment-creation-info": hash3 + creationInfoAmt3 + creationInfoTimeNano3 + creationInfoPayReq3,
- "payment-htlcs-bucket": map[string]interface{}{
- attemptID3: map[string]interface{}{
- "htlc-attempt-info": attemptInfo3 + zeroTime,
- },
- },
- "payment-sequence-key": paymentID3,
- },
- }
-)
-
-// TestMigrateMpp asserts that the database is properly migrated to the mpp
-// payment structure.
-func TestMigrateMpp(t *testing.T) {
- var paymentsRootBucket = []byte("payments-root-bucket")
-
- migtest.ApplyMigration(
- t,
- func(tx kvdb.RwTx) er.R {
- return migtest.RestoreDB(tx, paymentsRootBucket, pre)
- },
- func(tx kvdb.RwTx) er.R {
- return migtest.VerifyDB(tx, paymentsRootBucket, post)
- },
- MigrateMPP,
- false,
- )
-}
diff --git a/lnd/channeldb/migration16/migration.go b/lnd/channeldb/migration16/migration.go
deleted file mode 100644
index 22c4868b..00000000
--- a/lnd/channeldb/migration16/migration.go
+++ /dev/null
@@ -1,192 +0,0 @@
-package migration16
-
-import (
- "bytes"
- "encoding/binary"
-
- "github.com/pkt-cash/pktd/btcutil/er"
- "github.com/pkt-cash/pktd/btcutil/util"
- "github.com/pkt-cash/pktd/lnd/channeldb/kvdb"
- "github.com/pkt-cash/pktd/pktlog/log"
- "github.com/pkt-cash/pktd/wire"
-)
-
-var (
- paymentsRootBucket = []byte("payments-root-bucket")
-
- paymentSequenceKey = []byte("payment-sequence-key")
-
- duplicatePaymentsBucket = []byte("payment-duplicate-bucket")
-
- paymentsIndexBucket = []byte("payments-index-bucket")
-
- byteOrder = binary.BigEndian
-)
-
-// paymentIndexType indicates the type of index we have recorded in the payment
-// indexes bucket.
-type paymentIndexType uint8
-
-// paymentIndexTypeHash is a payment index type which indicates that we have
-// created an index of payment sequence number to payment hash.
-const paymentIndexTypeHash paymentIndexType = 0
-
-// paymentIndex stores all the information we require to create an index by
-// sequence number for a payment.
-type paymentIndex struct {
- // paymentHash is the hash of the payment, which is its key in the
- // payment root bucket.
- paymentHash []byte
-
- // sequenceNumbers is the set of sequence numbers associated with this
- // payment hash. There will be more than one sequence number in the
- // case where duplicate payments are present.
- sequenceNumbers [][]byte
-}
-
-// MigrateSequenceIndex migrates the payments db to contain a new bucket which
-// provides an index from sequence number to payment hash. This is required
-// for more efficient sequential lookup of payments, which are keyed by payment
-// hash before this migration.
-func MigrateSequenceIndex(tx kvdb.RwTx) er.R {
- log.Infof("Migrating payments to add sequence number index")
-
- // Get a list of indices we need to write.
- indexList, err := getPaymentIndexList(tx)
- if err != nil {
- return err
- }
-
- // Create the top level bucket that we will use to index payments in.
- bucket, err := tx.CreateTopLevelBucket(paymentsIndexBucket)
- if err != nil {
- return err
- }
-
- // Write an index for each of our payments.
- for _, index := range indexList {
- // Write indexes for each of our sequence numbers.
- for _, seqNr := range index.sequenceNumbers {
- err := putIndex(bucket, seqNr, index.paymentHash)
- if err != nil {
- return err
- }
- }
- }
-
- return nil
-}
-
-// putIndex performs a sanity check that ensures we are not writing duplicate
-// indexes to disk then creates the index provided.
-func putIndex(bucket kvdb.RwBucket, sequenceNr, paymentHash []byte) er.R {
- // Add a sanity check that we do not already have an entry with
- // this sequence number.
- existingEntry := bucket.Get(sequenceNr)
- if existingEntry != nil {
- return er.Errorf("sequence number: %x duplicated",
- sequenceNr)
- }
-
- bytes, err := serializePaymentIndexEntry(paymentHash)
- if err != nil {
- return err
- }
-
- return bucket.Put(sequenceNr, bytes)
-}
-
-// serializePaymentIndexEntry serializes a payment hash typed index. The value
-// produced contains a payment index type (which can be used in future to
-// signal different payment index types) and the payment hash.
-func serializePaymentIndexEntry(hash []byte) ([]byte, er.R) {
- var b bytes.Buffer
-
- err := util.WriteBin(&b, byteOrder, paymentIndexTypeHash)
- if err != nil {
- return nil, err
- }
-
- if err := wire.WriteVarBytes(&b, 0, hash); err != nil {
- return nil, err
- }
-
- return b.Bytes(), nil
-}
-
-// getPaymentIndexList gets a list of indices we need to write for our current
-// set of payments.
-func getPaymentIndexList(tx kvdb.RTx) ([]paymentIndex, er.R) {
- // Iterate over all payments and store their indexing keys. This is
- // needed, because no modifications are allowed inside a Bucket.ForEach
- // loop.
- paymentsBucket := tx.ReadBucket(paymentsRootBucket)
- if paymentsBucket == nil {
- return nil, nil
- }
-
- var indexList []paymentIndex
- err := paymentsBucket.ForEach(func(k, v []byte) er.R {
- // Get the bucket which contains the payment, fail if the key
- // does not have a bucket.
- bucket := paymentsBucket.NestedReadBucket(k)
- if bucket == nil {
- return er.Errorf("non bucket element in " +
- "payments bucket")
- }
- seqBytes := bucket.Get(paymentSequenceKey)
- if seqBytes == nil {
- return er.Errorf("nil sequence number bytes")
- }
-
- seqNrs, err := fetchSequenceNumbers(bucket)
- if err != nil {
- return err
- }
-
- // Create an index object with our payment hash and sequence
- // numbers and append it to our set of indexes.
- index := paymentIndex{
- paymentHash: k,
- sequenceNumbers: seqNrs,
- }
-
- indexList = append(indexList, index)
- return nil
- })
- if err != nil {
- return nil, err
- }
-
- return indexList, nil
-}
-
-// fetchSequenceNumbers fetches all the sequence numbers associated with a
-// payment, including those belonging to any duplicate payments.
-func fetchSequenceNumbers(paymentBucket kvdb.RBucket) ([][]byte, er.R) {
- seqNum := paymentBucket.Get(paymentSequenceKey)
- if seqNum == nil {
- return nil, er.New("expected sequence number")
- }
-
- sequenceNumbers := [][]byte{seqNum}
-
- // Get the duplicate payments bucket, if it has no duplicates, just
- // return early with the payment sequence number.
- duplicates := paymentBucket.NestedReadBucket(duplicatePaymentsBucket)
- if duplicates == nil {
- return sequenceNumbers, nil
- }
-
- // If we do have duplicated, they are keyed by sequence number, so we
- // iterate through the duplicates bucket and add them to our set of
- // sequence numbers.
- if err := duplicates.ForEach(func(k, v []byte) er.R {
- sequenceNumbers = append(sequenceNumbers, k)
- return nil
- }); err != nil {
- return nil, err
- }
-
- return sequenceNumbers, nil
-}
diff --git a/lnd/channeldb/migration16/migration_test.go b/lnd/channeldb/migration16/migration_test.go
deleted file mode 100644
index d20e80ac..00000000
--- a/lnd/channeldb/migration16/migration_test.go
+++ /dev/null
@@ -1,145 +0,0 @@
-package migration16
-
-import (
- "testing"
-
- "github.com/pkt-cash/pktd/btcutil/er"
- "github.com/pkt-cash/pktd/btcutil/util"
- "github.com/pkt-cash/pktd/lnd/channeldb/kvdb"
- "github.com/pkt-cash/pktd/lnd/channeldb/migtest"
-)
-
-var (
- hexStr = migtest.Hex
-
- hash1Str = "02acee76ebd53d00824410cf6adecad4f50334dac702bd5a2d3ba01b91709f0e"
- hash1 = hexStr(hash1Str)
- paymentID1 = hexStr("0000000000000001")
-
- hash2Str = "62eb3f0a48f954e495d0c14ac63df04a67cefa59dafdbcd3d5046d1f5647840c"
- hash2 = hexStr(hash2Str)
- paymentID2 = hexStr("0000000000000002")
-
- paymentID3 = hexStr("0000000000000003")
-
- // pre is the data in the payments root bucket in database version 13 format.
- pre = map[string]interface{}{
- // A payment without duplicates.
- hash1: map[string]interface{}{
- "payment-sequence-key": paymentID1,
- },
-
- // A payment with a duplicate.
- hash2: map[string]interface{}{
- "payment-sequence-key": paymentID2,
- "payment-duplicate-bucket": map[string]interface{}{
- paymentID3: map[string]interface{}{
- "payment-sequence-key": paymentID3,
- },
- },
- },
- }
-
- preFails = map[string]interface{}{
- // A payment without duplicates.
- hash1: map[string]interface{}{
- "payment-sequence-key": paymentID1,
- "payment-duplicate-bucket": map[string]interface{}{
- paymentID1: map[string]interface{}{
- "payment-sequence-key": paymentID1,
- },
- },
- },
- }
-
- // post is the expected data after migration.
- post = map[string]interface{}{
- paymentID1: paymentHashIndex(hash1Str),
- paymentID2: paymentHashIndex(hash2Str),
- paymentID3: paymentHashIndex(hash2Str),
- }
-)
-
-// paymentHashIndex produces a string that represents the value we expect for
-// our payment indexes from a hex encoded payment hash string.
-func paymentHashIndex(hashStr string) string {
- hash, err := util.DecodeHex(hashStr)
- if err != nil {
- panic(err)
- }
-
- bytes, err := serializePaymentIndexEntry(hash)
- if err != nil {
- panic(err)
- }
-
- return string(bytes)
-}
-
-// MigrateSequenceIndex asserts that the database is properly migrated to
-// contain a payments index.
-func TestMigrateSequenceIndex(t *testing.T) {
- tests := []struct {
- name string
- shouldFail bool
- pre map[string]interface{}
- post map[string]interface{}
- }{
- {
- name: "migration ok",
- shouldFail: false,
- pre: pre,
- post: post,
- },
- {
- name: "duplicate sequence number",
- shouldFail: true,
- pre: preFails,
- post: post,
- },
- {
- name: "no payments",
- shouldFail: false,
- pre: nil,
- post: nil,
- },
- }
-
- for _, test := range tests {
- test := test
-
- t.Run(test.name, func(t *testing.T) {
- // Before the migration we have a payments bucket.
- before := func(tx kvdb.RwTx) er.R {
- return migtest.RestoreDB(
- tx, paymentsRootBucket, test.pre,
- )
- }
-
- // After the migration, we should have an untouched
- // payments bucket and a new index bucket.
- after := func(tx kvdb.RwTx) er.R {
- if err := migtest.VerifyDB(
- tx, paymentsRootBucket, test.pre,
- ); err != nil {
- return err
- }
-
- // If we expect our migration to fail, we don't
- // expect an index bucket.
- if test.shouldFail {
- return nil
- }
-
- return migtest.VerifyDB(
- tx, paymentsIndexBucket, test.post,
- )
- }
-
- migtest.ApplyMigration(
- t, before, after, MigrateSequenceIndex,
- test.shouldFail,
- )
- })
- }
-}
diff --git a/lnd/channeldb/migration_01_to_11/addr.go b/lnd/channeldb/migration_01_to_11/addr.go
deleted file mode 100644
index 3af04fd6..00000000
--- a/lnd/channeldb/migration_01_to_11/addr.go
+++ /dev/null
@@ -1,221 +0,0 @@
-package migration_01_to_11
-
-import (
- "encoding/binary"
- "io"
- "net"
-
- "github.com/pkt-cash/pktd/btcutil/er"
- "github.com/pkt-cash/pktd/btcutil/util"
- "github.com/pkt-cash/pktd/lnd/tor"
-)
-
-// addressType specifies the network protocol and version that should be used
-// when connecting to a node at a particular address.
-type addressType uint8
-
-const (
- // tcp4Addr denotes an IPv4 TCP address.
- tcp4Addr addressType = 0
-
- // tcp6Addr denotes an IPv6 TCP address.
- tcp6Addr addressType = 1
-
- // v2OnionAddr denotes a version 2 Tor onion service address.
- v2OnionAddr addressType = 2
-
- // v3OnionAddr denotes a version 3 Tor (prop224) onion service address.
- v3OnionAddr addressType = 3
-)
-
-// encodeTCPAddr serializes a TCP address into its compact raw bytes
-// representation.
-func encodeTCPAddr(w io.Writer, addr *net.TCPAddr) er.R {
- var (
- addrType byte
- ip []byte
- )
-
- if addr.IP.To4() != nil {
- addrType = byte(tcp4Addr)
- ip = addr.IP.To4()
- } else {
- addrType = byte(tcp6Addr)
- ip = addr.IP.To16()
- }
-
- if ip == nil {
- return er.Errorf("unable to encode IP %v", addr.IP)
- }
-
- if _, err := util.Write(w, []byte{addrType}); err != nil {
- return err
- }
-
- if _, err := util.Write(w, ip); err != nil {
- return err
- }
-
- var port [2]byte
- byteOrder.PutUint16(port[:], uint16(addr.Port))
- if _, err := util.Write(w, port[:]); err != nil {
- return err
- }
-
- return nil
-}
-
-// encodeOnionAddr serializes an onion address into its compact raw bytes
-// representation.
-func encodeOnionAddr(w io.Writer, addr *tor.OnionAddr) er.R {
- var suffixIndex int
- hostLen := len(addr.OnionService)
- switch hostLen {
- case tor.V2Len:
- if _, err := util.Write(w, []byte{byte(v2OnionAddr)}); err != nil {
- return err
- }
- suffixIndex = tor.V2Len - tor.OnionSuffixLen
- case tor.V3Len:
- if _, err := util.Write(w, []byte{byte(v3OnionAddr)}); err != nil {
- return err
- }
- suffixIndex = tor.V3Len - tor.OnionSuffixLen
- default:
- return er.New("unknown onion service length")
- }
-
- suffix := addr.OnionService[suffixIndex:]
- if suffix != tor.OnionSuffix {
- return er.Errorf("invalid suffix \"%v\"", suffix)
- }
-
- host, err := tor.Base32Encoding.DecodeString(
- addr.OnionService[:suffixIndex],
- )
- if err != nil {
- return er.E(err)
- }
-
- // Sanity check the decoded length.
- switch {
- case hostLen == tor.V2Len && len(host) != tor.V2DecodedLen:
- return er.Errorf("onion service %v decoded to invalid host %x",
- addr.OnionService, host)
-
- case hostLen == tor.V3Len && len(host) != tor.V3DecodedLen:
- return er.Errorf("onion service %v decoded to invalid host %x",
- addr.OnionService, host)
- }
-
- if _, err := util.Write(w, host); err != nil {
- return err
- }
-
- var port [2]byte
- byteOrder.PutUint16(port[:], uint16(addr.Port))
- if _, err := util.Write(w, port[:]); err != nil {
- return err
- }
-
- return nil
-}
-
-// deserializeAddr reads the serialized raw representation of an address and
-// deserializes it into the actual address. This allows us to avoid address
-// resolution within the channeldb package.
-func deserializeAddr(r io.Reader) (net.Addr, er.R) {
- var addrType [1]byte
- if _, err := r.Read(addrType[:]); err != nil {
- return nil, er.E(err)
- }
-
- var address net.Addr
- switch addressType(addrType[0]) {
- case tcp4Addr:
- var ip [4]byte
- if _, err := r.Read(ip[:]); err != nil {
- return nil, er.E(err)
- }
-
- var port [2]byte
- if _, err := r.Read(port[:]); err != nil {
- return nil, er.E(err)
- }
-
- address = &net.TCPAddr{
- IP: net.IP(ip[:]),
- Port: int(binary.BigEndian.Uint16(port[:])),
- }
- case tcp6Addr:
- var ip [16]byte
- if _, err := r.Read(ip[:]); err != nil {
- return nil, er.E(err)
- }
-
- var port [2]byte
- if _, err := r.Read(port[:]); err != nil {
- return nil, er.E(err)
- }
-
- address = &net.TCPAddr{
- IP: net.IP(ip[:]),
- Port: int(binary.BigEndian.Uint16(port[:])),
- }
- case v2OnionAddr:
- var h [tor.V2DecodedLen]byte
- if _, err := r.Read(h[:]); err != nil {
- return nil, er.E(err)
- }
-
- var p [2]byte
- if _, err := r.Read(p[:]); err != nil {
- return nil, er.E(err)
- }
-
- onionService := tor.Base32Encoding.EncodeToString(h[:])
- onionService += tor.OnionSuffix
- port := int(binary.BigEndian.Uint16(p[:]))
-
- address = &tor.OnionAddr{
- OnionService: onionService,
- Port: port,
- }
- case v3OnionAddr:
- var h [tor.V3DecodedLen]byte
- if _, err := r.Read(h[:]); err != nil {
- return nil, er.E(err)
- }
-
- var p [2]byte
- if _, err := r.Read(p[:]); err != nil {
- return nil, er.E(err)
- }
-
- onionService := tor.Base32Encoding.EncodeToString(h[:])
- onionService += tor.OnionSuffix
- port := int(binary.BigEndian.Uint16(p[:]))
-
- address = &tor.OnionAddr{
- OnionService: onionService,
- Port: port,
- }
- default:
- return nil, ErrUnknownAddressType.Default()
- }
-
- return address, nil
-}
-
-// serializeAddr serializes an address into its raw bytes representation so that
-// it can be deserialized without requiring address resolution.
-func serializeAddr(w io.Writer, address net.Addr) er.R {
- switch addr := address.(type) {
- case *net.TCPAddr:
- return encodeTCPAddr(w, addr)
- case *tor.OnionAddr:
- return encodeOnionAddr(w, addr)
- default:
- return ErrUnknownAddressType.Default()
- }
-}
diff --git a/lnd/channeldb/migration_01_to_11/channel.go b/lnd/channeldb/migration_01_to_11/channel.go
deleted file mode 100644
index 4f2c6010..00000000
--- a/lnd/channeldb/migration_01_to_11/channel.go
+++ /dev/null
@@ -1,751 +0,0 @@
-package migration_01_to_11
-
-import (
- "fmt"
- "io"
- "strconv"
- "strings"
- "sync"
-
- "github.com/pkt-cash/pktd/btcec"
- "github.com/pkt-cash/pktd/btcutil"
- "github.com/pkt-cash/pktd/btcutil/er"
- "github.com/pkt-cash/pktd/chaincfg/chainhash"
- "github.com/pkt-cash/pktd/lnd/keychain"
- "github.com/pkt-cash/pktd/lnd/lnwire"
- "github.com/pkt-cash/pktd/lnd/shachain"
- "github.com/pkt-cash/pktd/wire"
-)
-
-var (
- // closedChannelBucket stores summarization information concerning
- // previously open, but now closed channels.
- closedChannelBucket = []byte("closed-chan-bucket")
-
- // openChanBucket stores all the currently open channels. This bucket
- // has a second, nested bucket which is keyed by a node's ID. Within
- // that node ID bucket, all attributes required to track, update, and
- // close a channel are stored.
- //
- // openChan -> nodeID -> chanPoint
- //
- // TODO(roasbeef): flesh out comment
- openChannelBucket = []byte("open-chan-bucket")
-)
-
-// ChannelType is an enum-like type that describes one of several possible
-// channel types. Each open channel is associated with a particular type as the
-// channel type may determine how higher level operations are conducted such as
-// fee negotiation, channel closing, the format of HTLCs, etc.
-// TODO(roasbeef): split up per-chain?
-type ChannelType uint8
-
-const (
- // NOTE: iota isn't used here for this enum needs to be stable
- // long-term as it will be persisted to the database.
-
- // SingleFunder represents a channel wherein one party solely funds the
- // entire capacity of the channel.
- SingleFunder ChannelType = 0
-)
-
-// ChannelConstraints represents a set of constraints meant to allow a node to
-// limit their exposure, enact flow control and ensure that all HTLCs are
-// economically relevant. This struct will be mirrored for both sides of the
-// channel, as each side will enforce various constraints that MUST be adhered
-// to for the life time of the channel. The parameters for each of these
-// constraints are static for the duration of the channel, meaning the channel
-// must be torn down for them to change.
-type ChannelConstraints struct {
- // DustLimit is the threshold (in satoshis) below which any outputs
- // should be trimmed. When an output is trimmed, it isn't materialized
- // as an actual output, but is instead burned to miner's fees.
- DustLimit btcutil.Amount
-
- // ChanReserve is an absolute reservation on the channel for the
- // owner of this set of constraints. This means that the current
- // settled balance for this node CANNOT dip below the reservation
- // amount. This acts as a defense against costless attacks when
- // either side no longer has any skin in the game.
- ChanReserve btcutil.Amount
-
- // MaxPendingAmount is the maximum pending HTLC value that the
- // owner of these constraints can offer the remote node at a
- // particular time.
- MaxPendingAmount lnwire.MilliSatoshi
-
- // MinHTLC is the minimum HTLC value that the owner of these
- // constraints can offer the remote node. If any HTLCs below this
- // amount are offered, then the HTLC will be rejected. This, in
- // tandem with the dust limit allows a node to regulate the
- // smallest HTLC that it deems economically relevant.
- MinHTLC lnwire.MilliSatoshi
-
- // MaxAcceptedHtlcs is the maximum number of HTLCs that the owner of
- // this set of constraints can offer the remote node. This allows each
- // node to limit their over all exposure to HTLCs that may need to be
- // acted upon in the case of a unilateral channel closure or a contract
- // breach.
- MaxAcceptedHtlcs uint16
-
- // CsvDelay is the relative time lock delay expressed in blocks. Any
- // settled outputs that pay to the owner of this channel configuration
- // MUST ensure that the delay branch uses this value as the relative
- // time lock. Similarly, any HTLC's offered by this node should use
- // this value as well.
- CsvDelay uint16
-}
-
-// ChannelConfig is a struct that houses the various configuration opens for
-// channels. Each side maintains an instance of this configuration file as it
-// governs: how the funding and commitment transaction to be created, the
-// nature of HTLC's allotted, the keys to be used for delivery, and relative
-// time lock parameters.
-type ChannelConfig struct {
- // ChannelConstraints is the set of constraints that must be upheld for
- // the duration of the channel for the owner of this channel
- // configuration. Constraints govern a number of flow control related
- // parameters, also including the smallest HTLC that will be accepted
- // by a participant.
- ChannelConstraints
-
- // MultiSigKey is the key to be used within the 2-of-2 output script
- // for the owner of this channel config.
- MultiSigKey keychain.KeyDescriptor
-
- // RevocationBasePoint is the base public key to be used when deriving
- // revocation keys for the remote node's commitment transaction. This
- // will be combined along with a per commitment secret to derive a
- // unique revocation key for each state.
- RevocationBasePoint keychain.KeyDescriptor
-
- // PaymentBasePoint is the base public key to be used when deriving
- // the key used within the non-delayed pay-to-self output on the
- // commitment transaction for a node. This will be combined with a
- // tweak derived from the per-commitment point to ensure unique keys
- // for each commitment transaction.
- PaymentBasePoint keychain.KeyDescriptor
-
- // DelayBasePoint is the base public key to be used when deriving the
- // key used within the delayed pay-to-self output on the commitment
- // transaction for a node. This will be combined with a tweak derived
- // from the per-commitment point to ensure unique keys for each
- // commitment transaction.
- DelayBasePoint keychain.KeyDescriptor
-
- // HtlcBasePoint is the base public key to be used when deriving the
- // local HTLC key. The derived key (combined with the tweak derived
- // from the per-commitment point) is used within the "to self" clause
- // within any HTLC output scripts.
- HtlcBasePoint keychain.KeyDescriptor
-}
-
-// ChannelCommitment is a snapshot of the commitment state at a particular
-// point in the commitment chain. With each state transition, a snapshot of the
-// current state along with all non-settled HTLCs are recorded. These snapshots
-// detail the state of the _remote_ party's commitment at a particular state
-// number. For ourselves (the local node) we ONLY store our most recent
-// (unrevoked) state for safety purposes.
-type ChannelCommitment struct {
- // CommitHeight is the update number that this ChannelDelta represents
- // the total number of commitment updates to this point. This can be
- // viewed as sort of a "commitment height" as this number is
- // monotonically increasing.
- CommitHeight uint64
-
- // LocalLogIndex is the cumulative log index index of the local node at
- // this point in the commitment chain. This value will be incremented
- // for each _update_ added to the local update log.
- LocalLogIndex uint64
-
- // LocalHtlcIndex is the current local running HTLC index. This value
- // will be incremented for each outgoing HTLC the local node offers.
- LocalHtlcIndex uint64
-
- // RemoteLogIndex is the cumulative log index index of the remote node
- // at this point in the commitment chain. This value will be
- // incremented for each _update_ added to the remote update log.
- RemoteLogIndex uint64
-
- // RemoteHtlcIndex is the current remote running HTLC index. This value
- // will be incremented for each outgoing HTLC the remote node offers.
- RemoteHtlcIndex uint64
-
- // LocalBalance is the current available settled balance within the
- // channel directly spendable by us.
- LocalBalance lnwire.MilliSatoshi
-
- // RemoteBalance is the current available settled balance within the
- // channel directly spendable by the remote node.
- RemoteBalance lnwire.MilliSatoshi
-
- // CommitFee is the amount calculated to be paid in fees for the
- // current set of commitment transactions. The fee amount is persisted
- // with the channel in order to allow the fee amount to be removed and
- // recalculated with each channel state update, including updates that
- // happen after a system restart.
- CommitFee btcutil.Amount
-
- // FeePerKw is the min satoshis/kilo-weight that should be paid within
- // the commitment transaction for the entire duration of the channel's
- // lifetime. This field may be updated during normal operation of the
- // channel as on-chain conditions change.
- //
- // TODO(halseth): make this SatPerKWeight. Cannot be done atm because
- // this will cause the import cycle lnwallet<->channeldb. Fee
- // estimation stuff should be in its own package.
- FeePerKw btcutil.Amount
-
- // CommitTx is the latest version of the commitment state, broadcast
- // able by us.
- CommitTx *wire.MsgTx
-
- // CommitSig is one half of the signature required to fully complete
- // the script for the commitment transaction above. This is the
- // signature signed by the remote party for our version of the
- // commitment transactions.
- CommitSig []byte
-
- // Htlcs is the set of HTLC's that are pending at this particular
- // commitment height.
- Htlcs []HTLC
-
- // TODO(roasbeef): pending commit pointer?
- // * lets just walk through
-}
-
-// ChannelStatus is a bit vector used to indicate whether an OpenChannel is in
-// the default usable state, or a state where it shouldn't be used.
-type ChannelStatus uint8
-
-var (
- // ChanStatusDefault is the normal state of an open channel.
- ChanStatusDefault ChannelStatus
-
- // ChanStatusBorked indicates that the channel has entered an
- // irreconcilable state, triggered by a state desynchronization or
- // channel breach. Channels in this state should never be added to the
- // htlc switch.
- ChanStatusBorked ChannelStatus = 1
-
- // ChanStatusCommitBroadcasted indicates that a commitment for this
- // channel has been broadcasted.
- ChanStatusCommitBroadcasted ChannelStatus = 1 << 1
-
- // ChanStatusLocalDataLoss indicates that we have lost channel state
- // for this channel, and broadcasting our latest commitment might be
- // considered a breach.
- //
- // TODO(halseh): actually enforce that we are not force closing such a
- // channel.
- ChanStatusLocalDataLoss ChannelStatus = 1 << 2
-
- // ChanStatusRestored is a status flag that signals that the channel
- // has been restored, and doesn't have all the fields a typical channel
- // will have.
- ChanStatusRestored ChannelStatus = 1 << 3
-)
-
-// chanStatusStrings maps a ChannelStatus to a human friendly string that
-// describes that status.
-var chanStatusStrings = map[ChannelStatus]string{
- ChanStatusDefault: "ChanStatusDefault",
- ChanStatusBorked: "ChanStatusBorked",
- ChanStatusCommitBroadcasted: "ChanStatusCommitBroadcasted",
- ChanStatusLocalDataLoss: "ChanStatusLocalDataLoss",
- ChanStatusRestored: "ChanStatusRestored",
-}
-
-// orderedChanStatusFlags is an in-order list of all that channel status flags.
-var orderedChanStatusFlags = []ChannelStatus{
- ChanStatusDefault,
- ChanStatusBorked,
- ChanStatusCommitBroadcasted,
- ChanStatusLocalDataLoss,
- ChanStatusRestored,
-}
-
-// String returns a human-readable representation of the ChannelStatus.
-func (c ChannelStatus) String() string {
- // If no flags are set, then this is the default case.
- if c == 0 {
- return chanStatusStrings[ChanStatusDefault]
- }
-
- // Add individual bit flags.
- statusStr := ""
- for _, flag := range orderedChanStatusFlags {
- if c&flag == flag {
- statusStr += chanStatusStrings[flag] + "|"
- c -= flag
- }
- }
-
- // Remove anything to the right of the final bar, including it as well.
- statusStr = strings.TrimRight(statusStr, "|")
-
- // Add any remaining flags which aren't accounted for as hex.
- if c != 0 {
- statusStr += "|0x" + strconv.FormatUint(uint64(c), 16)
- }
-
- // If this was purely an unknown flag, then remove the extra bar at the
- // start of the string.
- statusStr = strings.TrimLeft(statusStr, "|")
-
- return statusStr
-}
-
-// OpenChannel encapsulates the persistent and dynamic state of an open channel
-// with a remote node. An open channel supports several options for on-disk
-// serialization depending on the exact context. Full (upon channel creation)
-// state commitments, and partial (due to a commitment update) writes are
-// supported. Each partial write due to a state update appends the new update
-// to an on-disk log, which can then subsequently be queried in order to
-// "time-travel" to a prior state.
-type OpenChannel struct {
- // ChanType denotes which type of channel this is.
- ChanType ChannelType
-
- // ChainHash is a hash which represents the blockchain that this
- // channel will be opened within. This value is typically the genesis
- // hash. In the case that the original chain went through a contentious
- // hard-fork, then this value will be tweaked using the unique fork
- // point on each branch.
- ChainHash chainhash.Hash
-
- // FundingOutpoint is the outpoint of the final funding transaction.
- // This value uniquely and globally identifies the channel within the
- // target blockchain as specified by the chain hash parameter.
- FundingOutpoint wire.OutPoint
-
- // ShortChannelID encodes the exact location in the chain in which the
- // channel was initially confirmed. This includes: the block height,
- // transaction index, and the output within the target transaction.
- ShortChannelID lnwire.ShortChannelID
-
- // IsPending indicates whether a channel's funding transaction has been
- // confirmed.
- IsPending bool
-
- // IsInitiator is a bool which indicates if we were the original
- // initiator for the channel. This value may affect how higher levels
- // negotiate fees, or close the channel.
- IsInitiator bool
-
- // FundingBroadcastHeight is the height in which the funding
- // transaction was broadcast. This value can be used by higher level
- // sub-systems to determine if a channel is stale and/or should have
- // been confirmed before a certain height.
- FundingBroadcastHeight uint32
-
- // NumConfsRequired is the number of confirmations a channel's funding
- // transaction must have received in order to be considered available
- // for normal transactional use.
- NumConfsRequired uint16
-
- // ChannelFlags holds the flags that were sent as part of the
- // open_channel message.
- ChannelFlags lnwire.FundingFlag
-
- // IdentityPub is the identity public key of the remote node this
- // channel has been established with.
- IdentityPub *btcec.PublicKey
-
- // Capacity is the total capacity of this channel.
- Capacity btcutil.Amount
-
- // TotalMSatSent is the total number of milli-satoshis we've sent
- // within this channel.
- TotalMSatSent lnwire.MilliSatoshi
-
- // TotalMSatReceived is the total number of milli-satoshis we've
- // received within this channel.
- TotalMSatReceived lnwire.MilliSatoshi
-
- // LocalChanCfg is the channel configuration for the local node.
- LocalChanCfg ChannelConfig
-
- // RemoteChanCfg is the channel configuration for the remote node.
- RemoteChanCfg ChannelConfig
-
- // LocalCommitment is the current local commitment state for the local
- // party. This is stored distinct from the state of the remote party
- // as there are certain asymmetric parameters which affect the
- // structure of each commitment.
- LocalCommitment ChannelCommitment
-
- // RemoteCommitment is the current remote commitment state for the
- // remote party. This is stored distinct from the state of the local
- // party as there are certain asymmetric parameters which affect the
- // structure of each commitment.
- RemoteCommitment ChannelCommitment
-
- // RemoteCurrentRevocation is the current revocation for their
- // commitment transaction. However, since this the derived public key,
- // we don't yet have the private key so we aren't yet able to verify
- // that it's actually in the hash chain.
- RemoteCurrentRevocation *btcec.PublicKey
-
- // RemoteNextRevocation is the revocation key to be used for the *next*
- // commitment transaction we create for the local node. Within the
- // specification, this value is referred to as the
- // per-commitment-point.
- RemoteNextRevocation *btcec.PublicKey
-
- // RevocationProducer is used to generate the revocation in such a way
- // that remote side might store it efficiently and have the ability to
- // restore the revocation by index if needed. Current implementation of
- // secret producer is shachain producer.
- RevocationProducer shachain.Producer
-
- // RevocationStore is used to efficiently store the revocations for
- // previous channels states sent to us by remote side. Current
- // implementation of secret store is shachain store.
- RevocationStore shachain.Store
-
- // FundingTxn is the transaction containing this channel's funding
- // outpoint. Upon restarts, this txn will be rebroadcast if the channel
- // is found to be pending.
- //
- // NOTE: This value will only be populated for single-funder channels
- // for which we are the initiator.
- FundingTxn *wire.MsgTx
-
- // TODO(roasbeef): eww
- Db *DB
-
- // TODO(roasbeef): just need to store local and remote HTLC's?
-
- sync.RWMutex
-}
-
-// ShortChanID returns the current ShortChannelID of this channel.
-func (c *OpenChannel) ShortChanID() lnwire.ShortChannelID {
- c.RLock()
- defer c.RUnlock()
-
- return c.ShortChannelID
-}
-
-// HTLC is the on-disk representation of a hash time-locked contract. HTLCs are
-// contained within ChannelDeltas which encode the current state of the
-// commitment between state updates.
-//
-// TODO(roasbeef): save space by using smaller ints at tail end?
-type HTLC struct {
- // Signature is the signature for the second level covenant transaction
- // for this HTLC. The second level transaction is a timeout tx in the
- // case that this is an outgoing HTLC, and a success tx in the case
- // that this is an incoming HTLC.
- //
- // TODO(roasbeef): make [64]byte instead?
- Signature []byte
-
- // RHash is the payment hash of the HTLC.
- RHash [32]byte
-
- // Amt is the amount of milli-satoshis this HTLC escrows.
- Amt lnwire.MilliSatoshi
-
- // RefundTimeout is the absolute timeout on the HTLC that the sender
- // must wait before reclaiming the funds in limbo.
- RefundTimeout uint32
-
- // OutputIndex is the output index for this particular HTLC output
- // within the commitment transaction.
- OutputIndex int32
-
- // Incoming denotes whether we're the receiver or the sender of this
- // HTLC.
- Incoming bool
-
- // OnionBlob is an opaque blob which is used to complete multi-hop
- // routing.
- OnionBlob []byte
-
- // HtlcIndex is the HTLC counter index of this active, outstanding
- // HTLC. This differs from the LogIndex, as the HtlcIndex is only
- // incremented for each offered HTLC, while they LogIndex is
- // incremented for each update (includes settle+fail).
- HtlcIndex uint64
-
- // LogIndex is the cumulative log index of this HTLC. This differs
- // from the HtlcIndex as this will be incremented for each new log
- // update added.
- LogIndex uint64
-}
-
-// CircuitKey is used by a channel to uniquely identify the HTLCs it receives
-// from the switch, and is used to purge our in-memory state of HTLCs that have
-// already been processed by a link. Two list of CircuitKeys are included in
-// each CommitDiff to allow a link to determine which in-memory htlcs directed
-// the opening and closing of circuits in the switch's circuit map.
-type CircuitKey struct {
- // ChanID is the short chanid indicating the HTLC's origin.
- //
- // NOTE: It is fine for this value to be blank, as this indicates a
- // locally-sourced payment.
- ChanID lnwire.ShortChannelID
-
- // HtlcID is the unique htlc index predominately assigned by links,
- // though can also be assigned by switch in the case of locally-sourced
- // payments.
- HtlcID uint64
-}
-
-// String returns a string representation of the CircuitKey.
-func (k CircuitKey) String() string {
- return fmt.Sprintf("(Chan ID=%s, HTLC ID=%d)", k.ChanID, k.HtlcID)
-}
-
-// ClosureType is an enum like structure that details exactly _how_ a channel
-// was closed. Three closure types are currently possible: none, cooperative,
-// local force close, remote force close, and (remote) breach.
-type ClosureType uint8
-
-const (
- // RemoteForceClose indicates that the remote peer has unilaterally
- // broadcast their current commitment state on-chain.
- RemoteForceClose ClosureType = 4
-)
-
-// ChannelCloseSummary contains the final state of a channel at the point it
-// was closed. Once a channel is closed, all the information pertaining to that
-// channel within the openChannelBucket is deleted, and a compact summary is
-// put in place instead.
-type ChannelCloseSummary struct {
- // ChanPoint is the outpoint for this channel's funding transaction,
- // and is used as a unique identifier for the channel.
- ChanPoint wire.OutPoint
-
- // ShortChanID encodes the exact location in the chain in which the
- // channel was initially confirmed. This includes: the block height,
- // transaction index, and the output within the target transaction.
- ShortChanID lnwire.ShortChannelID
-
- // ChainHash is the hash of the genesis block that this channel resides
- // within.
- ChainHash chainhash.Hash
-
- // ClosingTXID is the txid of the transaction which ultimately closed
- // this channel.
- ClosingTXID chainhash.Hash
-
- // RemotePub is the public key of the remote peer that we formerly had
- // a channel with.
- RemotePub *btcec.PublicKey
-
- // Capacity was the total capacity of the channel.
- Capacity btcutil.Amount
-
- // CloseHeight is the height at which the funding transaction was
- // spent.
- CloseHeight uint32
-
- // SettledBalance is our total balance settled balance at the time of
- // channel closure. This _does not_ include the sum of any outputs that
- // have been time-locked as a result of the unilateral channel closure.
- SettledBalance btcutil.Amount
-
- // TimeLockedBalance is the sum of all the time-locked outputs at the
- // time of channel closure. If we triggered the force closure of this
- // channel, then this value will be non-zero if our settled output is
- // above the dust limit. If we were on the receiving side of a channel
- // force closure, then this value will be non-zero if we had any
- // outstanding outgoing HTLC's at the time of channel closure.
- TimeLockedBalance btcutil.Amount
-
- // CloseType details exactly _how_ the channel was closed. Five closure
- // types are possible: cooperative, local force, remote force, breach
- // and funding canceled.
- CloseType ClosureType
-
- // IsPending indicates whether this channel is in the 'pending close'
- // state, which means the channel closing transaction has been
- // confirmed, but not yet been fully resolved. In the case of a channel
- // that has been cooperatively closed, it will go straight into the
- // fully resolved state as soon as the closing transaction has been
- // confirmed. However, for channels that have been force closed, they'll
- // stay marked as "pending" until _all_ the pending funds have been
- // swept.
- IsPending bool
-
- // RemoteCurrentRevocation is the current revocation for their
- // commitment transaction. However, since this is the derived public key,
- // we don't yet have the private key so we aren't yet able to verify
- // that it's actually in the hash chain.
- RemoteCurrentRevocation *btcec.PublicKey
-
- // RemoteNextRevocation is the revocation key to be used for the *next*
- // commitment transaction we create for the local node. Within the
- // specification, this value is referred to as the
- // per-commitment-point.
- RemoteNextRevocation *btcec.PublicKey
-
- // LocalChanCfg is the channel configuration for the local node.
- LocalChanConfig ChannelConfig
-
- // LastChanSyncMsg is the ChannelReestablish message for this channel
- // for the state at the point where it was closed.
- LastChanSyncMsg *lnwire.ChannelReestablish
-}
-
-func serializeChannelCloseSummary(w io.Writer, cs *ChannelCloseSummary) er.R {
- err := WriteElements(w,
- cs.ChanPoint, cs.ShortChanID, cs.ChainHash, cs.ClosingTXID,
- cs.CloseHeight, cs.RemotePub, cs.Capacity, cs.SettledBalance,
- cs.TimeLockedBalance, cs.CloseType, cs.IsPending,
- )
- if err != nil {
- return err
- }
-
- // If this is a close channel summary created before the addition of
- // the new fields, then we can exit here.
- if cs.RemoteCurrentRevocation == nil {
- return WriteElements(w, false)
- }
-
- // If fields are present, write boolean to indicate this, and continue.
- if err := WriteElements(w, true); err != nil {
- return err
- }
-
- if err := WriteElements(w, cs.RemoteCurrentRevocation); err != nil {
- return err
- }
-
- if err := writeChanConfig(w, &cs.LocalChanConfig); err != nil {
- return err
- }
-
- // The RemoteNextRevocation field is optional, as it's possible for a
- // channel to be closed before we learn of the next unrevoked
- // revocation point for the remote party. Write a boolen indicating
- // whether this field is present or not.
- if err := WriteElements(w, cs.RemoteNextRevocation != nil); err != nil {
- return err
- }
-
- // Write the field, if present.
- if cs.RemoteNextRevocation != nil {
- if err = WriteElements(w, cs.RemoteNextRevocation); err != nil {
- return err
- }
- }
-
- // Write whether the channel sync message is present.
- if err := WriteElements(w, cs.LastChanSyncMsg != nil); err != nil {
- return err
- }
-
- // Write the channel sync message, if present.
- if cs.LastChanSyncMsg != nil {
- if err := WriteElements(w, cs.LastChanSyncMsg); err != nil {
- return err
- }
- }
-
- return nil
-}
-
-func deserializeCloseChannelSummary(r io.Reader) (*ChannelCloseSummary, er.R) {
- c := &ChannelCloseSummary{}
-
- err := ReadElements(r,
- &c.ChanPoint, &c.ShortChanID, &c.ChainHash, &c.ClosingTXID,
- &c.CloseHeight, &c.RemotePub, &c.Capacity, &c.SettledBalance,
- &c.TimeLockedBalance, &c.CloseType, &c.IsPending,
- )
- if err != nil {
- return nil, err
- }
-
- // We'll now check to see if the channel close summary was encoded with
- // any of the additional optional fields.
- var hasNewFields bool
- err = ReadElements(r, &hasNewFields)
- if err != nil {
- return nil, err
- }
-
- // If fields are not present, we can return.
- if !hasNewFields {
- return c, nil
- }
-
- // Otherwise read the new fields.
- if err := ReadElements(r, &c.RemoteCurrentRevocation); err != nil {
- return nil, err
- }
-
- if err := readChanConfig(r, &c.LocalChanConfig); err != nil {
- return nil, err
- }
-
- // Finally, we'll attempt to read the next unrevoked commitment point
- // for the remote party. If we closed the channel before receiving a
- // funding locked message then this might not be present. A boolean
- // indicating whether the field is present will come first.
- var hasRemoteNextRevocation bool
- err = ReadElements(r, &hasRemoteNextRevocation)
- if err != nil {
- return nil, err
- }
-
- // If this field was written, read it.
- if hasRemoteNextRevocation {
- err = ReadElements(r, &c.RemoteNextRevocation)
- if err != nil {
- return nil, err
- }
- }
-
- // Check if we have a channel sync message to read.
- var hasChanSyncMsg bool
- err = ReadElements(r, &hasChanSyncMsg)
- if er.Wrapped(err) == io.EOF {
- return c, nil
- } else if err != nil {
- return nil, err
- }
-
- // If a chan sync message is present, read it.
- if hasChanSyncMsg {
- // We must pass in reference to a lnwire.Message for the codec
- // to support it.
- var msg lnwire.Message
- if err := ReadElements(r, &msg); err != nil {
- return nil, err
- }
-
- chanSync, ok := msg.(*lnwire.ChannelReestablish)
- if !ok {
- return nil, er.New("unable cast db Message to " +
- "ChannelReestablish")
- }
- c.LastChanSyncMsg = chanSync
- }
-
- return c, nil
-}
-
-func writeChanConfig(b io.Writer, c *ChannelConfig) er.R {
- return WriteElements(b,
- c.DustLimit, c.MaxPendingAmount, c.ChanReserve, c.MinHTLC,
- c.MaxAcceptedHtlcs, c.CsvDelay, c.MultiSigKey,
- c.RevocationBasePoint, c.PaymentBasePoint, c.DelayBasePoint,
- c.HtlcBasePoint,
- )
-}
-
-func readChanConfig(b io.Reader, c *ChannelConfig) er.R {
- return ReadElements(b,
- &c.DustLimit, &c.MaxPendingAmount, &c.ChanReserve,
- &c.MinHTLC, &c.MaxAcceptedHtlcs, &c.CsvDelay,
- &c.MultiSigKey, &c.RevocationBasePoint,
- &c.PaymentBasePoint, &c.DelayBasePoint,
- &c.HtlcBasePoint,
- )
-}
diff --git a/lnd/channeldb/migration_01_to_11/channel_test.go b/lnd/channeldb/migration_01_to_11/channel_test.go
deleted file mode 100644
index f65bec8f..00000000
--- a/lnd/channeldb/migration_01_to_11/channel_test.go
+++ /dev/null
@@ -1,222 +0,0 @@
-package migration_01_to_11
-
-import (
- "bytes"
- "io/ioutil"
- "math/rand"
- "os"
-
- "github.com/pkt-cash/pktd/btcec"
- "github.com/pkt-cash/pktd/btcutil"
- "github.com/pkt-cash/pktd/btcutil/er"
- "github.com/pkt-cash/pktd/chaincfg/chainhash"
- "github.com/pkt-cash/pktd/lnd/keychain"
- "github.com/pkt-cash/pktd/lnd/lnwire"
- "github.com/pkt-cash/pktd/lnd/shachain"
- _ "github.com/pkt-cash/pktd/pktwallet/walletdb/bdb"
- "github.com/pkt-cash/pktd/wire"
-)
-
-var (
- key = [chainhash.HashSize]byte{
- 0x81, 0xb6, 0x37, 0xd8, 0xfc, 0xd2, 0xc6, 0xda,
- 0x68, 0x59, 0xe6, 0x96, 0x31, 0x13, 0xa1, 0x17,
- 0xd, 0xe7, 0x93, 0xe4, 0xb7, 0x25, 0xb8, 0x4d,
- 0x1e, 0xb, 0x4c, 0xf9, 0x9e, 0xc5, 0x8c, 0xe9,
- }
- rev = [chainhash.HashSize]byte{
- 0x51, 0xb6, 0x37, 0xd8, 0xfc, 0xd2, 0xc6, 0xda,
- 0x48, 0x59, 0xe6, 0x96, 0x31, 0x13, 0xa1, 0x17,
- 0x2d, 0xe7, 0x93, 0xe4,
- }
- testTx = &wire.MsgTx{
- Version: 1,
- TxIn: []*wire.TxIn{
- {
- PreviousOutPoint: wire.OutPoint{
- Hash: chainhash.Hash{},
- Index: 0xffffffff,
- },
- SignatureScript: []byte{0x04, 0x31, 0xdc, 0x00, 0x1b, 0x01, 0x62},
- Sequence: 0xffffffff,
- },
- },
- TxOut: []*wire.TxOut{
- {
- Value: 5000000000,
- PkScript: []byte{
- 0x41, // OP_DATA_65
- 0x04, 0xd6, 0x4b, 0xdf, 0xd0, 0x9e, 0xb1, 0xc5,
- 0xfe, 0x29, 0x5a, 0xbd, 0xeb, 0x1d, 0xca, 0x42,
- 0x81, 0xbe, 0x98, 0x8e, 0x2d, 0xa0, 0xb6, 0xc1,
- 0xc6, 0xa5, 0x9d, 0xc2, 0x26, 0xc2, 0x86, 0x24,
- 0xe1, 0x81, 0x75, 0xe8, 0x51, 0xc9, 0x6b, 0x97,
- 0x3d, 0x81, 0xb0, 0x1c, 0xc3, 0x1f, 0x04, 0x78,
- 0x34, 0xbc, 0x06, 0xd6, 0xd6, 0xed, 0xf6, 0x20,
- 0xd1, 0x84, 0x24, 0x1a, 0x6a, 0xed, 0x8b, 0x63,
- 0xa6, // 65-byte signature
- 0xac, // OP_CHECKSIG
- },
- },
- },
- LockTime: 5,
- }
- privKey, pubKey = btcec.PrivKeyFromBytes(btcec.S256(), key[:])
-)
-
-// makeTestDB creates a new instance of the ChannelDB for testing purposes. A
-// callback which cleans up the created temporary directories is also returned
-// and intended to be executed after the test completes.
-func makeTestDB() (*DB, func(), er.R) {
- // First, create a temporary directory to be used for the duration of
- // this test.
- tempDirName, errr := ioutil.TempDir("", "channeldb")
- if errr != nil {
- return nil, nil, er.E(errr)
- }
-
- // Next, create channeldb for the first time.
- cdb, err := Open(tempDirName)
- if err != nil {
- return nil, nil, err
- }
-
- cleanUp := func() {
- cdb.Close()
- os.RemoveAll(tempDirName)
- }
-
- return cdb, cleanUp, nil
-}
-
-func createTestChannelState(cdb *DB) (*OpenChannel, er.R) {
- // Simulate 1000 channel updates.
- producer, err := shachain.NewRevocationProducerFromBytes(key[:])
- if err != nil {
- return nil, err
- }
- store := shachain.NewRevocationStore()
- for i := 0; i < 1; i++ {
- preImage, err := producer.AtIndex(uint64(i))
- if err != nil {
- return nil, err
- }
-
- if err := store.AddNextEntry(preImage); err != nil {
- return nil, err
- }
- }
-
- localCfg := ChannelConfig{
- ChannelConstraints: ChannelConstraints{
- DustLimit: btcutil.Amount(rand.Int63()),
- MaxPendingAmount: lnwire.MilliSatoshi(rand.Int63()),
- ChanReserve: btcutil.Amount(rand.Int63()),
- MinHTLC: lnwire.MilliSatoshi(rand.Int63()),
- MaxAcceptedHtlcs: uint16(rand.Int31()),
- CsvDelay: uint16(rand.Int31()),
- },
- MultiSigKey: keychain.KeyDescriptor{
- PubKey: privKey.PubKey(),
- },
- RevocationBasePoint: keychain.KeyDescriptor{
- PubKey: privKey.PubKey(),
- },
- PaymentBasePoint: keychain.KeyDescriptor{
- PubKey: privKey.PubKey(),
- },
- DelayBasePoint: keychain.KeyDescriptor{
- PubKey: privKey.PubKey(),
- },
- HtlcBasePoint: keychain.KeyDescriptor{
- PubKey: privKey.PubKey(),
- },
- }
- remoteCfg := ChannelConfig{
- ChannelConstraints: ChannelConstraints{
- DustLimit: btcutil.Amount(rand.Int63()),
- MaxPendingAmount: lnwire.MilliSatoshi(rand.Int63()),
- ChanReserve: btcutil.Amount(rand.Int63()),
- MinHTLC: lnwire.MilliSatoshi(rand.Int63()),
- MaxAcceptedHtlcs: uint16(rand.Int31()),
- CsvDelay: uint16(rand.Int31()),
- },
- MultiSigKey: keychain.KeyDescriptor{
- PubKey: privKey.PubKey(),
- KeyLocator: keychain.KeyLocator{
- Family: keychain.KeyFamilyMultiSig,
- Index: 9,
- },
- },
- RevocationBasePoint: keychain.KeyDescriptor{
- PubKey: privKey.PubKey(),
- KeyLocator: keychain.KeyLocator{
- Family: keychain.KeyFamilyRevocationBase,
- Index: 8,
- },
- },
- PaymentBasePoint: keychain.KeyDescriptor{
- PubKey: privKey.PubKey(),
- KeyLocator: keychain.KeyLocator{
- Family: keychain.KeyFamilyPaymentBase,
- Index: 7,
- },
- },
- DelayBasePoint: keychain.KeyDescriptor{
- PubKey: privKey.PubKey(),
- KeyLocator: keychain.KeyLocator{
- Family: keychain.KeyFamilyDelayBase,
- Index: 6,
- },
- },
- HtlcBasePoint: keychain.KeyDescriptor{
- PubKey: privKey.PubKey(),
- KeyLocator: keychain.KeyLocator{
- Family: keychain.KeyFamilyHtlcBase,
- Index: 5,
- },
- },
- }
-
- chanID := lnwire.NewShortChanIDFromInt(uint64(rand.Int63()))
-
- return &OpenChannel{
- ChanType: SingleFunder,
- ChainHash: key,
- FundingOutpoint: wire.OutPoint{Hash: key, Index: rand.Uint32()},
- ShortChannelID: chanID,
- IsInitiator: true,
- IsPending: true,
- IdentityPub: pubKey,
- Capacity: btcutil.Amount(10000),
- LocalChanCfg: localCfg,
- RemoteChanCfg: remoteCfg,
- TotalMSatSent: 8,
- TotalMSatReceived: 2,
- LocalCommitment: ChannelCommitment{
- CommitHeight: 0,
- LocalBalance: lnwire.MilliSatoshi(9000),
- RemoteBalance: lnwire.MilliSatoshi(3000),
- CommitFee: btcutil.Amount(rand.Int63()),
- FeePerKw: btcutil.Amount(5000),
- CommitTx: testTx,
- CommitSig: bytes.Repeat([]byte{1}, 71),
- },
- RemoteCommitment: ChannelCommitment{
- CommitHeight: 0,
- LocalBalance: lnwire.MilliSatoshi(3000),
- RemoteBalance: lnwire.MilliSatoshi(9000),
- CommitFee: btcutil.Amount(rand.Int63()),
- FeePerKw: btcutil.Amount(5000),
- CommitTx: testTx,
- CommitSig: bytes.Repeat([]byte{1}, 71),
- },
- NumConfsRequired: 4,
- RemoteCurrentRevocation: privKey.PubKey(),
- RemoteNextRevocation: privKey.PubKey(),
- RevocationProducer: producer,
- RevocationStore: store,
- Db: cdb,
- FundingTxn: testTx,
- }, nil
-}
diff --git a/lnd/channeldb/migration_01_to_11/codec.go b/lnd/channeldb/migration_01_to_11/codec.go
deleted file mode 100644
index 7892cb2d..00000000
--- a/lnd/channeldb/migration_01_to_11/codec.go
+++ /dev/null
@@ -1,449 +0,0 @@
-package migration_01_to_11
-
-import (
- "fmt"
- "io"
- "net"
-
- "github.com/pkt-cash/pktd/btcec"
- "github.com/pkt-cash/pktd/btcutil"
- "github.com/pkt-cash/pktd/btcutil/er"
- "github.com/pkt-cash/pktd/btcutil/util"
- "github.com/pkt-cash/pktd/chaincfg/chainhash"
- "github.com/pkt-cash/pktd/lnd/keychain"
- "github.com/pkt-cash/pktd/lnd/lnwire"
- "github.com/pkt-cash/pktd/lnd/shachain"
- "github.com/pkt-cash/pktd/wire"
-)
-
-// writeOutpoint writes an outpoint to the passed writer using the minimal
-// amount of bytes possible.
-func writeOutpoint(w io.Writer, o *wire.OutPoint) er.R {
- if _, err := util.Write(w, o.Hash[:]); err != nil {
- return err
- }
- if err := util.WriteBin(w, byteOrder, o.Index); err != nil {
- return err
- }
-
- return nil
-}
-
-// readOutpoint reads an outpoint from the passed reader that was previously
-// written using the writeOutpoint struct.
-func readOutpoint(r io.Reader, o *wire.OutPoint) er.R {
- if _, err := util.ReadFull(r, o.Hash[:]); err != nil {
- return err
- }
- if err := util.ReadBin(r, byteOrder, &o.Index); err != nil {
- return err
- }
-
- return nil
-}
-
-// UnknownElementType is an error returned when the codec is unable to encode or
-// decode a particular type.
-type UnknownElementType struct {
- method string
- element interface{}
-}
-
-// Error returns the name of the method that encountered the error, as well as
-// the type that was unsupported.
-func (e UnknownElementType) Error() string {
- return fmt.Sprintf("Unknown type in %s: %T", e.method, e.element)
-}
-
-// WriteElement is a one-stop shop to write the big endian representation of
-// any element which is to be serialized for storage on disk. The passed
-// io.Writer should be backed by an appropriately sized byte slice, or be able
-// to dynamically expand to accommodate additional data.
-func WriteElement(w io.Writer, element interface{}) er.R {
- switch e := element.(type) {
- case keychain.KeyDescriptor:
- if err := util.WriteBin(w, byteOrder, e.Family); err != nil {
- return err
- }
- if err := util.WriteBin(w, byteOrder, e.Index); err != nil {
- return err
- }
-
- if e.PubKey != nil {
- if err := util.WriteBin(w, byteOrder, true); err != nil {
- return er.Errorf("error writing serialized element: %s", err)
- }
-
- return WriteElement(w, e.PubKey)
- }
-
- return util.WriteBin(w, byteOrder, false)
- case ChannelType:
- if err := util.WriteBin(w, byteOrder, e); err != nil {
- return err
- }
-
- case chainhash.Hash:
- if _, err := util.Write(w, e[:]); err != nil {
- return err
- }
-
- case wire.OutPoint:
- return writeOutpoint(w, &e)
-
- case lnwire.ShortChannelID:
- if err := util.WriteBin(w, byteOrder, e.ToUint64()); err != nil {
- return err
- }
-
- case lnwire.ChannelID:
- if _, err := util.Write(w, e[:]); err != nil {
- return err
- }
-
- case int64, uint64:
- if err := util.WriteBin(w, byteOrder, e); err != nil {
- return err
- }
-
- case uint32:
- if err := util.WriteBin(w, byteOrder, e); err != nil {
- return err
- }
-
- case int32:
- if err := util.WriteBin(w, byteOrder, e); err != nil {
- return err
- }
-
- case uint16:
- if err := util.WriteBin(w, byteOrder, e); err != nil {
- return err
- }
-
- case uint8:
- if err := util.WriteBin(w, byteOrder, e); err != nil {
- return err
- }
-
- case bool:
- if err := util.WriteBin(w, byteOrder, e); err != nil {
- return err
- }
-
- case btcutil.Amount:
- if err := util.WriteBin(w, byteOrder, uint64(e)); err != nil {
- return err
- }
-
- case lnwire.MilliSatoshi:
- if err := util.WriteBin(w, byteOrder, uint64(e)); err != nil {
- return err
- }
-
- case *btcec.PrivateKey:
- b := e.Serialize()
- if _, err := util.Write(w, b); err != nil {
- return err
- }
-
- case *btcec.PublicKey:
- b := e.SerializeCompressed()
- if _, err := util.Write(w, b); err != nil {
- return err
- }
-
- case shachain.Producer:
- return e.Encode(w)
-
- case shachain.Store:
- return e.Encode(w)
-
- case *wire.MsgTx:
- return e.Serialize(w)
-
- case [32]byte:
- if _, err := util.Write(w, e[:]); err != nil {
- return err
- }
-
- case []byte:
- if err := wire.WriteVarBytes(w, 0, e); err != nil {
- return err
- }
-
- case lnwire.Message:
- if _, err := lnwire.WriteMessage(w, e, 0); err != nil {
- return err
- }
-
- case ChannelStatus:
- if err := util.WriteBin(w, byteOrder, e); err != nil {
- return err
- }
-
- case ClosureType:
- if err := util.WriteBin(w, byteOrder, e); err != nil {
- return err
- }
-
- case lnwire.FundingFlag:
- if err := util.WriteBin(w, byteOrder, e); err != nil {
- return err
- }
-
- case net.Addr:
- if err := serializeAddr(w, e); err != nil {
- return err
- }
-
- case []net.Addr:
- if err := WriteElement(w, uint32(len(e))); err != nil {
- return err
- }
-
- for _, addr := range e {
- if err := serializeAddr(w, addr); err != nil {
- return err
- }
- }
-
- default:
- return er.E(UnknownElementType{"WriteElement", e})
- }
-
- return nil
-}
-
-// WriteElements is writes each element in the elements slice to the passed
-// io.Writer using WriteElement.
-func WriteElements(w io.Writer, elements ...interface{}) er.R {
- for _, element := range elements {
- err := WriteElement(w, element)
- if err != nil {
- return err
- }
- }
- return nil
-}
-
-// ReadElement is a one-stop utility function to deserialize any datastructure
-// encoded using the serialization format of the database.
-func ReadElement(r io.Reader, element interface{}) er.R {
- switch e := element.(type) {
- case *keychain.KeyDescriptor:
- if err := util.ReadBin(r, byteOrder, &e.Family); err != nil {
- return err
- }
- if err := util.ReadBin(r, byteOrder, &e.Index); err != nil {
- return err
- }
-
- var hasPubKey bool
- if err := util.ReadBin(r, byteOrder, &hasPubKey); err != nil {
- return err
- }
-
- if hasPubKey {
- return ReadElement(r, &e.PubKey)
- }
-
- case *ChannelType:
- if err := util.ReadBin(r, byteOrder, e); err != nil {
- return err
- }
-
- case *chainhash.Hash:
- if _, err := util.ReadFull(r, e[:]); err != nil {
- return err
- }
-
- case *wire.OutPoint:
- return readOutpoint(r, e)
-
- case *lnwire.ShortChannelID:
- var a uint64
- if err := util.ReadBin(r, byteOrder, &a); err != nil {
- return err
- }
- *e = lnwire.NewShortChanIDFromInt(a)
-
- case *lnwire.ChannelID:
- if _, err := util.ReadFull(r, e[:]); err != nil {
- return err
- }
-
- case *int64, *uint64:
- if err := util.ReadBin(r, byteOrder, e); err != nil {
- return err
- }
-
- case *uint32:
- if err := util.ReadBin(r, byteOrder, e); err != nil {
- return err
- }
-
- case *int32:
- if err := util.ReadBin(r, byteOrder, e); err != nil {
- return err
- }
-
- case *uint16:
- if err := util.ReadBin(r, byteOrder, e); err != nil {
- return err
- }
-
- case *uint8:
- if err := util.ReadBin(r, byteOrder, e); err != nil {
- return err
- }
-
- case *bool:
- if err := util.ReadBin(r, byteOrder, e); err != nil {
- return err
- }
-
- case *btcutil.Amount:
- var a uint64
- if err := util.ReadBin(r, byteOrder, &a); err != nil {
- return err
- }
-
- *e = btcutil.Amount(a)
-
- case *lnwire.MilliSatoshi:
- var a uint64
- if err := util.ReadBin(r, byteOrder, &a); err != nil {
- return err
- }
-
- *e = lnwire.MilliSatoshi(a)
-
- case **btcec.PrivateKey:
- var b [btcec.PrivKeyBytesLen]byte
- if _, err := util.ReadFull(r, b[:]); err != nil {
- return err
- }
-
- priv, _ := btcec.PrivKeyFromBytes(btcec.S256(), b[:])
- *e = priv
-
- case **btcec.PublicKey:
- var b [btcec.PubKeyBytesLenCompressed]byte
- if _, err := util.ReadFull(r, b[:]); err != nil {
- return err
- }
-
- pubKey, err := btcec.ParsePubKey(b[:], btcec.S256())
- if err != nil {
- return err
- }
- *e = pubKey
-
- case *shachain.Producer:
- var root [32]byte
- if _, err := util.ReadFull(r, root[:]); err != nil {
- return err
- }
-
- // TODO(roasbeef): remove
- producer, err := shachain.NewRevocationProducerFromBytes(root[:])
- if err != nil {
- return err
- }
-
- *e = producer
-
- case *shachain.Store:
- store, err := shachain.NewRevocationStoreFromBytes(r)
- if err != nil {
- return err
- }
-
- *e = store
-
- case **wire.MsgTx:
- tx := wire.NewMsgTx(2)
- if err := tx.Deserialize(r); err != nil {
- return err
- }
-
- *e = tx
-
- case *[32]byte:
- if _, err := util.ReadFull(r, e[:]); err != nil {
- return err
- }
-
- case *[]byte:
- bytes, err := wire.ReadVarBytes(r, 0, 66000, "[]byte")
- if err != nil {
- return err
- }
-
- *e = bytes
-
- case *lnwire.Message:
- msg, err := lnwire.ReadMessage(r, 0)
- if err != nil {
- return err
- }
-
- *e = msg
-
- case *ChannelStatus:
- if err := util.ReadBin(r, byteOrder, e); err != nil {
- return err
- }
-
- case *ClosureType:
- if err := util.ReadBin(r, byteOrder, e); err != nil {
- return err
- }
-
- case *lnwire.FundingFlag:
- if err := util.ReadBin(r, byteOrder, e); err != nil {
- return err
- }
-
- case *net.Addr:
- addr, err := deserializeAddr(r)
- if err != nil {
- return err
- }
- *e = addr
-
- case *[]net.Addr:
- var numAddrs uint32
- if err := ReadElement(r, &numAddrs); err != nil {
- return err
- }
-
- *e = make([]net.Addr, numAddrs)
- for i := uint32(0); i < numAddrs; i++ {
- addr, err := deserializeAddr(r)
- if err != nil {
- return err
- }
- (*e)[i] = addr
- }
-
- default:
- return er.E(UnknownElementType{"ReadElement", e})
- }
-
- return nil
-}
-
-// ReadElements deserializes a variable number of elements into the passed
-// io.Reader, with each element being deserialized according to the ReadElement
-// function.
-func ReadElements(r io.Reader, elements ...interface{}) er.R {
- for _, element := range elements {
- err := ReadElement(r, element)
- if err != nil {
- return err
- }
- }
- return nil
-}
diff --git a/lnd/channeldb/migration_01_to_11/db.go b/lnd/channeldb/migration_01_to_11/db.go
deleted file mode 100644
index 50e072d2..00000000
--- a/lnd/channeldb/migration_01_to_11/db.go
+++ /dev/null
@@ -1,218 +0,0 @@
-package migration_01_to_11
-
-import (
- "bytes"
- "encoding/binary"
- "os"
- "path/filepath"
- "time"
-
- "github.com/pkt-cash/pktd/btcutil/er"
- "github.com/pkt-cash/pktd/lnd/channeldb/kvdb"
-)
-
-const (
- dbName = "channel.db"
- dbFilePermission = 0600
-)
-
-// migration is a function which takes a prior outdated version of the database
-// instances and mutates the key/bucket structure to arrive at a more
-// up-to-date version of the database.
-type migration func(tx kvdb.RwTx) er.R
-
-var (
- // Big endian is the preferred byte order, due to cursor scans over
- // integer keys iterating in order.
- byteOrder = binary.BigEndian
-)
-
-// DB is the primary datastore for the lnd daemon. The database stores
-// information related to nodes, routing data, open/closed channels, fee
-// schedules, and reputation data.
-type DB struct {
- kvdb.Backend
- dbPath string
- graph *ChannelGraph
- now func() time.Time
-}
-
-// Open opens an existing channeldb. Any necessary schemas migrations due to
-// updates will take place as necessary.
-func Open(dbPath string, modifiers ...OptionModifier) (*DB, er.R) {
- path := filepath.Join(dbPath, dbName)
-
- if !fileExists(path) {
- if err := createChannelDB(dbPath); err != nil {
- return nil, err
- }
- }
-
- opts := DefaultOptions()
- for _, modifier := range modifiers {
- modifier(&opts)
- }
-
- // Specify bbolt freelist options to reduce heap pressure in case the
- // freelist grows to be very large.
- bdb, err := kvdb.Open(kvdb.BoltBackendName, path, opts.NoFreelistSync)
- if err != nil {
- return nil, err
- }
-
- chanDB := &DB{
- Backend: bdb,
- dbPath: dbPath,
- now: time.Now,
- }
- chanDB.graph = newChannelGraph(
- chanDB, opts.RejectCacheSize, opts.ChannelCacheSize,
- )
-
- return chanDB, nil
-}
-
-// createChannelDB creates and initializes a fresh version of channeldb. In
-// the case that the target path has not yet been created or doesn't yet exist,
-// then the path is created. Additionally, all required top-level buckets used
-// within the database are created.
-func createChannelDB(dbPath string) er.R {
- if !fileExists(dbPath) {
- if err := os.MkdirAll(dbPath, 0700); err != nil {
- return er.E(err)
- }
- }
-
- path := filepath.Join(dbPath, dbName)
- bdb, err := kvdb.Create(kvdb.BoltBackendName, path, false)
- if err != nil {
- return err
- }
-
- errr := kvdb.Update(bdb, func(tx kvdb.RwTx) er.R {
- if _, err := tx.CreateTopLevelBucket(openChannelBucket); err != nil {
- return err
- }
- if _, err := tx.CreateTopLevelBucket(closedChannelBucket); err != nil {
- return err
- }
-
- if _, err := tx.CreateTopLevelBucket(invoiceBucket); err != nil {
- return err
- }
-
- if _, err := tx.CreateTopLevelBucket(paymentBucket); err != nil {
- return err
- }
-
- nodes, err := tx.CreateTopLevelBucket(nodeBucket)
- if err != nil {
- return err
- }
- _, err = nodes.CreateBucket(aliasIndexBucket)
- if err != nil {
- return err
- }
- _, err = nodes.CreateBucket(nodeUpdateIndexBucket)
- if err != nil {
- return err
- }
-
- edges, err := tx.CreateTopLevelBucket(edgeBucket)
- if err != nil {
- return err
- }
- if _, err := edges.CreateBucket(edgeIndexBucket); err != nil {
- return err
- }
- if _, err := edges.CreateBucket(edgeUpdateIndexBucket); err != nil {
- return err
- }
- if _, err := edges.CreateBucket(channelPointBucket); err != nil {
- return err
- }
- if _, err := edges.CreateBucket(zombieBucket); err != nil {
- return err
- }
-
- graphMeta, err := tx.CreateTopLevelBucket(graphMetaBucket)
- if err != nil {
- return err
- }
- _, err = graphMeta.CreateBucket(pruneLogBucket)
- if err != nil {
- return err
- }
-
- if _, err := tx.CreateTopLevelBucket(metaBucket); err != nil {
- return err
- }
-
- meta := &Meta{
- DbVersionNumber: 0,
- }
- return putMeta(meta, tx)
- }, func() {})
- if errr != nil {
- return er.Errorf("unable to create new channeldb")
- }
-
- return bdb.Close()
-}
-
-// fileExists returns true if the file exists, and false otherwise.
-func fileExists(path string) bool {
- if _, err := os.Stat(path); err != nil {
- if os.IsNotExist(err) {
- return false
- }
- }
-
- return true
-}
-
-// FetchClosedChannels attempts to fetch all closed channels from the database.
-// The pendingOnly bool toggles if channels that aren't yet fully closed should
-// be returned in the response or not. When a channel was cooperatively closed,
-// it becomes fully closed after a single confirmation. When a channel was
-// forcibly closed, it will become fully closed after _all_ the pending funds
-// (if any) have been swept.
-func (d *DB) FetchClosedChannels(pendingOnly bool) ([]*ChannelCloseSummary, er.R) {
- var chanSummaries []*ChannelCloseSummary
-
- if err := kvdb.View(d, func(tx kvdb.RTx) er.R {
- closeBucket := tx.ReadBucket(closedChannelBucket)
- if closeBucket == nil {
- return ErrNoClosedChannels.Default()
- }
-
- return closeBucket.ForEach(func(chanID []byte, summaryBytes []byte) er.R {
- summaryReader := bytes.NewReader(summaryBytes)
- chanSummary, err := deserializeCloseChannelSummary(summaryReader)
- if err != nil {
- return err
- }
-
- // If the query specified to only include pending
- // channels, then we'll skip any channels which aren't
- // currently pending.
- if !chanSummary.IsPending && pendingOnly {
- return nil
- }
-
- chanSummaries = append(chanSummaries, chanSummary)
- return nil
- })
- }, func() {
- chanSummaries = nil
- }); err != nil {
- return nil, err
- }
-
- return chanSummaries, nil
-}
-
-// ChannelGraph returns a new instance of the directed channel graph.
-func (d *DB) ChannelGraph() *ChannelGraph {
- return d.graph
-}
diff --git a/lnd/channeldb/migration_01_to_11/error.go b/lnd/channeldb/migration_01_to_11/error.go
deleted file mode 100644
index 69cee0de..00000000
--- a/lnd/channeldb/migration_01_to_11/error.go
+++ /dev/null
@@ -1,63 +0,0 @@
-package migration_01_to_11
-
-import "github.com/pkt-cash/pktd/btcutil/er"
-
-var (
- Err = er.NewErrorType("migration_01_to_11")
- // ErrNoInvoicesCreated is returned when we don't have invoices in
- // our database to return.
- ErrNoInvoicesCreated = Err.CodeWithDetail("ErrNoInvoicesCreated",
- "there are no existing invoices")
-
- // ErrNoPaymentsCreated is returned when bucket of payments hasn't been
- // created.
- ErrNoPaymentsCreated = Err.CodeWithDetail("ErrNoPaymentsCreated",
- "there are no existing payments")
-
- // ErrGraphNotFound is returned when at least one of the components of
- // graph doesn't exist.
- ErrGraphNotFound = Err.CodeWithDetail("ErrGraphNotFound",
- "graph bucket not initialized")
-
- // ErrSourceNodeNotSet is returned if the source node of the graph
- // hasn't been added The source node is the center node within a
- // star-graph.
- ErrSourceNodeNotSet = Err.CodeWithDetail("ErrSourceNodeNotSet",
- "source node does not exist")
-
- // ErrGraphNodeNotFound is returned when we're unable to find the target
- // node.
- ErrGraphNodeNotFound = Err.CodeWithDetail("ErrGraphNodeNotFound",
- "unable to find node")
-
- // ErrEdgeNotFound is returned when an edge for the target chanID
- // can't be found.
- ErrEdgeNotFound = Err.CodeWithDetail("ErrEdgeNotFound",
- "edge not found")
-
- // ErrUnknownAddressType is returned when a node's addressType is not
- // an expected value.
- ErrUnknownAddressType = Err.CodeWithDetail("ErrUnknownAddressType",
- "address type cannot be resolved")
-
- // ErrNoClosedChannels is returned when a node is queries for all the
- // channels it has closed, but it hasn't yet closed any channels.
- ErrNoClosedChannels = Err.CodeWithDetail("ErrNoClosedChannels",
- "no channel have been closed yet")
-
- // ErrEdgePolicyOptionalFieldNotFound is an error returned if a channel
- // policy field is not found in the db even though its message flags
- // indicate it should be.
- ErrEdgePolicyOptionalFieldNotFound = Err.CodeWithDetail("ErrEdgePolicyOptionalFieldNotFound",
- "optional field not present")
-)
-
-// ErrTooManyExtraOpaqueBytes creates an error which should be returned if the
-// caller attempts to write an announcement message which bares too many extra
-// opaque bytes. We limit this value in order to ensure that we don't waste
-// disk space due to nodes unnecessarily padding out their announcements with
-// garbage data.
-func ErrTooManyExtraOpaqueBytes(numBytes int) er.R {
- return er.Errorf("max allowed number of opaque bytes is %v, received "+
- "%v bytes", MaxAllowedExtraOpaqueBytes, numBytes)
-}
diff --git a/lnd/channeldb/migration_01_to_11/graph.go b/lnd/channeldb/migration_01_to_11/graph.go
deleted file mode 100644
index 07b6d246..00000000
--- a/lnd/channeldb/migration_01_to_11/graph.go
+++ /dev/null
@@ -1,1181 +0,0 @@
-package migration_01_to_11
-
-import (
- "bytes"
- "image/color"
- "io"
- "net"
- "time"
-
- "github.com/pkt-cash/pktd/btcec"
- "github.com/pkt-cash/pktd/btcutil"
- "github.com/pkt-cash/pktd/btcutil/er"
- "github.com/pkt-cash/pktd/btcutil/util"
- "github.com/pkt-cash/pktd/chaincfg/chainhash"
- "github.com/pkt-cash/pktd/lnd/channeldb/kvdb"
- "github.com/pkt-cash/pktd/lnd/lnwire"
- "github.com/pkt-cash/pktd/wire"
-)
-
-var (
- // nodeBucket is a bucket which houses all the vertices or nodes within
- // the channel graph. This bucket has a single-sub bucket which adds an
- // additional index from pubkey -> alias. Within the top-level of this
- // bucket, the key space maps a node's compressed public key to the
- // serialized information for that node. Additionally, there's a
- // special key "source" which stores the pubkey of the source node. The
- // source node is used as the starting point for all graph/queries and
- // traversals. The graph is formed as a star-graph with the source node
- // at the center.
- //
- // maps: pubKey -> nodeInfo
- // maps: source -> selfPubKey
- nodeBucket = []byte("graph-node")
-
- // nodeUpdateIndexBucket is a sub-bucket of the nodeBucket. This bucket
- // will be used to quickly look up the "freshness" of a node's last
- // update to the network. The bucket only contains keys, and no values,
- // it's mapping:
- //
- // maps: updateTime || nodeID -> nil
- nodeUpdateIndexBucket = []byte("graph-node-update-index")
-
- // sourceKey is a special key that resides within the nodeBucket. The
- // sourceKey maps a key to the public key of the "self node".
- sourceKey = []byte("source")
-
- // aliasIndexBucket is a sub-bucket that's nested within the main
- // nodeBucket. This bucket maps the public key of a node to its
- // current alias. This bucket is provided as it can be used within a
- // future UI layer to add an additional degree of confirmation.
- aliasIndexBucket = []byte("alias")
-
- // edgeBucket is a bucket which houses all of the edge or channel
- // information within the channel graph. This bucket essentially acts
- // as an adjacency list, which in conjunction with a range scan, can be
- // used to iterate over all the incoming and outgoing edges for a
- // particular node. Key in the bucket use a prefix scheme which leads
- // with the node's public key and sends with the compact edge ID.
- // For each chanID, there will be two entries within the bucket, as the
- // graph is directed: nodes may have different policies w.r.t to fees
- // for their respective directions.
- //
- // maps: pubKey || chanID -> channel edge policy for node
- edgeBucket = []byte("graph-edge")
-
- // unknownPolicy is represented as an empty slice. It is
- // used as the value in edgeBucket for unknown channel edge policies.
- // Unknown policies are still stored in the database to enable efficient
- // lookup of incoming channel edges.
- unknownPolicy = []byte{}
-
- // edgeIndexBucket is an index which can be used to iterate all edges
- // in the bucket, grouping them according to their in/out nodes.
- // Additionally, the items in this bucket also contain the complete
- // edge information for a channel. The edge information includes the
- // capacity of the channel, the nodes that made the channel, etc. This
- // bucket resides within the edgeBucket above. Creation of an edge
- // proceeds in two phases: first the edge is added to the edge index,
- // afterwards the edgeBucket can be updated with the latest details of
- // the edge as they are announced on the network.
- //
- // maps: chanID -> pubKey1 || pubKey2 || restofEdgeInfo
- edgeIndexBucket = []byte("edge-index")
-
- // edgeUpdateIndexBucket is a sub-bucket of the main edgeBucket. This
- // bucket contains an index which allows us to gauge the "freshness" of
- // a channel's last updates.
- //
- // maps: updateTime || chanID -> nil
- edgeUpdateIndexBucket = []byte("edge-update-index")
-
- // channelPointBucket maps a channel's full outpoint (txid:index) to
- // its short 8-byte channel ID. This bucket resides within the
- // edgeBucket above, and can be used to quickly remove an edge due to
- // the outpoint being spent, or to query for existence of a channel.
- //
- // maps: outPoint -> chanID
- channelPointBucket = []byte("chan-index")
-
- // zombieBucket is a sub-bucket of the main edgeBucket bucket
- // responsible for maintaining an index of zombie channels. Each entry
- // exists within the bucket as follows:
- //
- // maps: chanID -> pubKey1 || pubKey2
- //
- // The chanID represents the channel ID of the edge that is marked as a
- // zombie and is used as the key, which maps to the public keys of the
- // edge's participants.
- zombieBucket = []byte("zombie-index")
-
- // disabledEdgePolicyBucket is a sub-bucket of the main edgeBucket bucket
- // responsible for maintaining an index of disabled edge policies. Each
- // entry exists within the bucket as follows:
- //
- // maps: -> []byte{}
- //
- // The chanID represents the channel ID of the edge and the direction is
- // one byte representing the direction of the edge. The main purpose of
- // this index is to allow pruning disabled channels in a fast way without
- // the need to iterate all over the graph.
- disabledEdgePolicyBucket = []byte("disabled-edge-policy-index")
-
- // graphMetaBucket is a top-level bucket which stores various meta-deta
- // related to the on-disk channel graph. Data stored in this bucket
- // includes the block to which the graph has been synced to, the total
- // number of channels, etc.
- graphMetaBucket = []byte("graph-meta")
-
- // pruneLogBucket is a bucket within the graphMetaBucket that stores
- // a mapping from the block height to the hash for the blocks used to
- // prune the graph.
- // Once a new block is discovered, any channels that have been closed
- // (by spending the outpoint) can safely be removed from the graph, and
- // the block is added to the prune log. We need to keep such a log for
- // the case where a reorg happens, and we must "rewind" the state of the
- // graph by removing channels that were previously confirmed. In such a
- // case we'll remove all entries from the prune log with a block height
- // that no longer exists.
- pruneLogBucket = []byte("prune-log")
-)
-
-const (
- // MaxAllowedExtraOpaqueBytes is the largest amount of opaque bytes that
- // we'll permit to be written to disk. We limit this as otherwise, it
- // would be possible for a node to create a ton of updates and slowly
- // fill our disk, and also waste bandwidth due to relaying.
- MaxAllowedExtraOpaqueBytes = 10000
-)
-
-// ChannelGraph is a persistent, on-disk graph representation of the Lightning
-// Network. This struct can be used to implement path finding algorithms on top
-// of, and also to update a node's view based on information received from the
-// p2p network. Internally, the graph is stored using a modified adjacency list
-// representation with some added object interaction possible with each
-// serialized edge/node. The graph is stored is directed, meaning that are two
-// edges stored for each channel: an inbound/outbound edge for each node pair.
-// Nodes, edges, and edge information can all be added to the graph
-// independently. Edge removal results in the deletion of all edge information
-// for that edge.
-type ChannelGraph struct {
- db *DB
-}
-
-// newChannelGraph allocates a new ChannelGraph backed by a DB instance. The
-// returned instance has its own unique reject cache and channel cache.
-func newChannelGraph(db *DB, rejectCacheSize, chanCacheSize int) *ChannelGraph {
- return &ChannelGraph{
- db: db,
- }
-}
-
-// SourceNode returns the source node of the graph. The source node is treated
-// as the center node within a star-graph. This method may be used to kick off
-// a path finding algorithm in order to explore the reachability of another
-// node based off the source node.
-func (c *ChannelGraph) SourceNode() (*LightningNode, er.R) {
- var source *LightningNode
- err := kvdb.View(c.db, func(tx kvdb.RTx) er.R {
- // First grab the nodes bucket which stores the mapping from
- // pubKey to node information.
- nodes := tx.ReadBucket(nodeBucket)
- if nodes == nil {
- return ErrGraphNotFound.Default()
- }
-
- node, err := c.sourceNode(nodes)
- if err != nil {
- return err
- }
- source = node
-
- return nil
- }, func() {
- source = nil
- })
- if err != nil {
- return nil, err
- }
-
- return source, nil
-}
-
-// sourceNode uses an existing database transaction and returns the source node
-// of the graph. The source node is treated as the center node within a
-// star-graph. This method may be used to kick off a path finding algorithm in
-// order to explore the reachability of another node based off the source node.
-func (c *ChannelGraph) sourceNode(nodes kvdb.RBucket) (*LightningNode, er.R) {
- selfPub := nodes.Get(sourceKey)
- if selfPub == nil {
- return nil, ErrSourceNodeNotSet.Default()
- }
-
- // With the pubKey of the source node retrieved, we're able to
- // fetch the full node information.
- node, err := fetchLightningNode(nodes, selfPub)
- if err != nil {
- return nil, err
- }
- node.db = c.db
-
- return &node, nil
-}
-
-// SetSourceNode sets the source node within the graph database. The source
-// node is to be used as the center of a star-graph within path finding
-// algorithms.
-func (c *ChannelGraph) SetSourceNode(node *LightningNode) er.R {
- nodePubBytes := node.PubKeyBytes[:]
-
- return kvdb.Update(c.db, func(tx kvdb.RwTx) er.R {
- // First grab the nodes bucket which stores the mapping from
- // pubKey to node information.
- nodes, err := tx.CreateTopLevelBucket(nodeBucket)
- if err != nil {
- return err
- }
-
- // Next we create the mapping from source to the targeted
- // public key.
- if err := nodes.Put(sourceKey, nodePubBytes); err != nil {
- return err
- }
-
- // Finally, we commit the information of the lightning node
- // itself.
- return addLightningNode(tx, node)
- }, func() {})
-}
-
-func addLightningNode(tx kvdb.RwTx, node *LightningNode) er.R {
- nodes, err := tx.CreateTopLevelBucket(nodeBucket)
- if err != nil {
- return err
- }
-
- aliases, err := nodes.CreateBucketIfNotExists(aliasIndexBucket)
- if err != nil {
- return err
- }
-
- updateIndex, err := nodes.CreateBucketIfNotExists(
- nodeUpdateIndexBucket,
- )
- if err != nil {
- return err
- }
-
- return putLightningNode(nodes, aliases, updateIndex, node)
-}
-
-// updateEdgePolicy attempts to update an edge's policy within the relevant
-// buckets using an existing database transaction. The returned boolean will be
-// true if the updated policy belongs to node1, and false if the policy belonged
-// to node2.
-func updateEdgePolicy(tx kvdb.RwTx, edge *ChannelEdgePolicy) (bool, er.R) {
- edges, err := tx.CreateTopLevelBucket(edgeBucket)
- if err != nil {
- return false, ErrEdgeNotFound.Default()
-
- }
- edgeIndex := edges.NestedReadWriteBucket(edgeIndexBucket)
- if edgeIndex == nil {
- return false, ErrEdgeNotFound.Default()
- }
- nodes, err := tx.CreateTopLevelBucket(nodeBucket)
- if err != nil {
- return false, err
- }
-
- // Create the channelID key be converting the channel ID
- // integer into a byte slice.
- var chanID [8]byte
- byteOrder.PutUint64(chanID[:], edge.ChannelID)
-
- // With the channel ID, we then fetch the value storing the two
- // nodes which connect this channel edge.
- nodeInfo := edgeIndex.Get(chanID[:])
- if nodeInfo == nil {
- return false, ErrEdgeNotFound.Default()
- }
-
- // Depending on the flags value passed above, either the first
- // or second edge policy is being updated.
- var fromNode, toNode []byte
- var isUpdate1 bool
- if edge.ChannelFlags&lnwire.ChanUpdateDirection == 0 {
- fromNode = nodeInfo[:33]
- toNode = nodeInfo[33:66]
- isUpdate1 = true
- } else {
- fromNode = nodeInfo[33:66]
- toNode = nodeInfo[:33]
- isUpdate1 = false
- }
-
- // Finally, with the direction of the edge being updated
- // identified, we update the on-disk edge representation.
- errr := putChanEdgePolicy(edges, nodes, edge, fromNode, toNode)
- if errr != nil {
- return false, errr
- }
-
- return isUpdate1, nil
-}
-
-// LightningNode represents an individual vertex/node within the channel graph.
-// A node is connected to other nodes by one or more channel edges emanating
-// from it. As the graph is directed, a node will also have an incoming edge
-// attached to it for each outgoing edge.
-type LightningNode struct {
- // PubKeyBytes is the raw bytes of the public key of the target node.
- PubKeyBytes [33]byte
- pubKey *btcec.PublicKey
-
- // HaveNodeAnnouncement indicates whether we received a node
- // announcement for this particular node. If true, the remaining fields
- // will be set, if false only the PubKey is known for this node.
- HaveNodeAnnouncement bool
-
- // LastUpdate is the last time the vertex information for this node has
- // been updated.
- LastUpdate time.Time
-
- // Address is the TCP address this node is reachable over.
- Addresses []net.Addr
-
- // Color is the selected color for the node.
- Color color.RGBA
-
- // Alias is a nick-name for the node. The alias can be used to confirm
- // a node's identity or to serve as a short ID for an address book.
- Alias string
-
- // AuthSigBytes is the raw signature under the advertised public key
- // which serves to authenticate the attributes announced by this node.
- AuthSigBytes []byte
-
- // Features is the list of protocol features supported by this node.
- Features *lnwire.FeatureVector
-
- // ExtraOpaqueData is the set of data that was appended to this
- // message, some of which we may not actually know how to iterate or
- // parse. By holding onto this data, we ensure that we're able to
- // properly validate the set of signatures that cover these new fields,
- // and ensure we're able to make upgrades to the network in a forwards
- // compatible manner.
- ExtraOpaqueData []byte
-
- db *DB
-
- // TODO(roasbeef): discovery will need storage to keep it's last IP
- // address and re-announce if interface changes?
-
- // TODO(roasbeef): add update method and fetch?
-}
-
-// PubKey is the node's long-term identity public key. This key will be used to
-// authenticated any advertisements/updates sent by the node.
-//
-// NOTE: By having this method to access an attribute, we ensure we only need
-// to fully deserialize the pubkey if absolutely necessary.
-func (l *LightningNode) PubKey() (*btcec.PublicKey, er.R) {
- if l.pubKey != nil {
- return l.pubKey, nil
- }
-
- key, err := btcec.ParsePubKey(l.PubKeyBytes[:], btcec.S256())
- if err != nil {
- return nil, err
- }
- l.pubKey = key
-
- return key, nil
-}
-
-// ChannelEdgeInfo represents a fully authenticated channel along with all its
-// unique attributes. Once an authenticated channel announcement has been
-// processed on the network, then an instance of ChannelEdgeInfo encapsulating
-// the channels attributes is stored. The other portions relevant to routing
-// policy of a channel are stored within a ChannelEdgePolicy for each direction
-// of the channel.
-type ChannelEdgeInfo struct {
- // ChannelID is the unique channel ID for the channel. The first 3
- // bytes are the block height, the next 3 the index within the block,
- // and the last 2 bytes are the output index for the channel.
- ChannelID uint64
-
- // ChainHash is the hash that uniquely identifies the chain that this
- // channel was opened within.
- //
- // TODO(roasbeef): need to modify db keying for multi-chain
- // * must add chain hash to prefix as well
- ChainHash chainhash.Hash
-
- // NodeKey1Bytes is the raw public key of the first node.
- NodeKey1Bytes [33]byte
-
- // NodeKey2Bytes is the raw public key of the first node.
- NodeKey2Bytes [33]byte
-
- // BitcoinKey1Bytes is the raw public key of the first node.
- BitcoinKey1Bytes [33]byte
-
- // BitcoinKey2Bytes is the raw public key of the first node.
- BitcoinKey2Bytes [33]byte
-
- // Features is an opaque byte slice that encodes the set of channel
- // specific features that this channel edge supports.
- Features []byte
-
- // AuthProof is the authentication proof for this channel. This proof
- // contains a set of signatures binding four identities, which attests
- // to the legitimacy of the advertised channel.
- AuthProof *ChannelAuthProof
-
- // ChannelPoint is the funding outpoint of the channel. This can be
- // used to uniquely identify the channel within the channel graph.
- ChannelPoint wire.OutPoint
-
- // Capacity is the total capacity of the channel, this is determined by
- // the value output in the outpoint that created this channel.
- Capacity btcutil.Amount
-
- // ExtraOpaqueData is the set of data that was appended to this
- // message, some of which we may not actually know how to iterate or
- // parse. By holding onto this data, we ensure that we're able to
- // properly validate the set of signatures that cover these new fields,
- // and ensure we're able to make upgrades to the network in a forwards
- // compatible manner.
- ExtraOpaqueData []byte
-}
-
-// ChannelAuthProof is the authentication proof (the signature portion) for a
-// channel. Using the four signatures contained in the struct, and some
-// auxiliary knowledge (the funding script, node identities, and outpoint) nodes
-// on the network are able to validate the authenticity and existence of a
-// channel. Each of these signatures signs the following digest: chanID ||
-// nodeID1 || nodeID2 || bitcoinKey1|| bitcoinKey2 || 2-byte-feature-len ||
-// features.
-type ChannelAuthProof struct {
- // NodeSig1Bytes are the raw bytes of the first node signature encoded
- // in DER format.
- NodeSig1Bytes []byte
-
- // NodeSig2Bytes are the raw bytes of the second node signature
- // encoded in DER format.
- NodeSig2Bytes []byte
-
- // BitcoinSig1Bytes are the raw bytes of the first bitcoin signature
- // encoded in DER format.
- BitcoinSig1Bytes []byte
-
- // BitcoinSig2Bytes are the raw bytes of the second bitcoin signature
- // encoded in DER format.
- BitcoinSig2Bytes []byte
-}
-
-// IsEmpty check is the authentication proof is empty Proof is empty if at
-// least one of the signatures are equal to nil.
-func (c *ChannelAuthProof) IsEmpty() bool {
- return len(c.NodeSig1Bytes) == 0 ||
- len(c.NodeSig2Bytes) == 0 ||
- len(c.BitcoinSig1Bytes) == 0 ||
- len(c.BitcoinSig2Bytes) == 0
-}
-
-// ChannelEdgePolicy represents a *directed* edge within the channel graph. For
-// each channel in the database, there are two distinct edges: one for each
-// possible direction of travel along the channel. The edges themselves hold
-// information concerning fees, and minimum time-lock information which is
-// utilized during path finding.
-type ChannelEdgePolicy struct {
- // SigBytes is the raw bytes of the signature of the channel edge
- // policy. We'll only parse these if the caller needs to access the
- // signature for validation purposes. Do not set SigBytes directly, but
- // use SetSigBytes instead to make sure that the cache is invalidated.
- SigBytes []byte
-
- // ChannelID is the unique channel ID for the channel. The first 3
- // bytes are the block height, the next 3 the index within the block,
- // and the last 2 bytes are the output index for the channel.
- ChannelID uint64
-
- // LastUpdate is the last time an authenticated edge for this channel
- // was received.
- LastUpdate time.Time
-
- // MessageFlags is a bitfield which indicates the presence of optional
- // fields (like max_htlc) in the policy.
- MessageFlags lnwire.ChanUpdateMsgFlags
-
- // ChannelFlags is a bitfield which signals the capabilities of the
- // channel as well as the directed edge this update applies to.
- ChannelFlags lnwire.ChanUpdateChanFlags
-
- // TimeLockDelta is the number of blocks this node will subtract from
- // the expiry of an incoming HTLC. This value expresses the time buffer
- // the node would like to HTLC exchanges.
- TimeLockDelta uint16
-
- // MinHTLC is the smallest value HTLC this node will accept, expressed
- // in millisatoshi.
- MinHTLC lnwire.MilliSatoshi
-
- // MaxHTLC is the largest value HTLC this node will accept, expressed
- // in millisatoshi.
- MaxHTLC lnwire.MilliSatoshi
-
- // FeeBaseMSat is the base HTLC fee that will be charged for forwarding
- // ANY HTLC, expressed in mSAT's.
- FeeBaseMSat lnwire.MilliSatoshi
-
- // FeeProportionalMillionths is the rate that the node will charge for
- // HTLCs for each millionth of a satoshi forwarded.
- FeeProportionalMillionths lnwire.MilliSatoshi
-
- // Node is the LightningNode that this directed edge leads to. Using
- // this pointer the channel graph can further be traversed.
- Node *LightningNode
-
- // ExtraOpaqueData is the set of data that was appended to this
- // message, some of which we may not actually know how to iterate or
- // parse. By holding onto this data, we ensure that we're able to
- // properly validate the set of signatures that cover these new fields,
- // and ensure we're able to make upgrades to the network in a forwards
- // compatible manner.
- ExtraOpaqueData []byte
-}
-
-// IsDisabled determines whether the edge has the disabled bit set.
-func (c *ChannelEdgePolicy) IsDisabled() bool {
- return c.ChannelFlags&lnwire.ChanUpdateDisabled ==
- lnwire.ChanUpdateDisabled
-}
-
-func putLightningNode(nodeBucket kvdb.RwBucket, aliasBucket kvdb.RwBucket,
- updateIndex kvdb.RwBucket, node *LightningNode) er.R {
-
- var (
- scratch [16]byte
- b bytes.Buffer
- )
-
- pub, err := node.PubKey()
- if err != nil {
- return err
- }
- nodePub := pub.SerializeCompressed()
-
- // If the node has the update time set, write it, else write 0.
- updateUnix := uint64(0)
- if node.LastUpdate.Unix() > 0 {
- updateUnix = uint64(node.LastUpdate.Unix())
- }
-
- byteOrder.PutUint64(scratch[:8], updateUnix)
- if _, err := b.Write(scratch[:8]); err != nil {
- return er.E(err)
- }
-
- if _, err := b.Write(nodePub); err != nil {
- return er.E(err)
- }
-
- // If we got a node announcement for this node, we will have the rest
- // of the data available. If not we don't have more data to write.
- if !node.HaveNodeAnnouncement {
- // Write HaveNodeAnnouncement=0.
- byteOrder.PutUint16(scratch[:2], 0)
- if _, err := b.Write(scratch[:2]); err != nil {
- return er.E(err)
- }
-
- return nodeBucket.Put(nodePub, b.Bytes())
- }
-
- // Write HaveNodeAnnouncement=1.
- byteOrder.PutUint16(scratch[:2], 1)
- if _, err := b.Write(scratch[:2]); err != nil {
- return er.E(err)
- }
-
- if err := util.WriteBin(&b, byteOrder, node.Color.R); err != nil {
- return err
- }
- if err := util.WriteBin(&b, byteOrder, node.Color.G); err != nil {
- return err
- }
- if err := util.WriteBin(&b, byteOrder, node.Color.B); err != nil {
- return err
- }
-
- if err := wire.WriteVarString(&b, 0, node.Alias); err != nil {
- return err
- }
-
- if err := node.Features.Encode(&b); err != nil {
- return err
- }
-
- numAddresses := uint16(len(node.Addresses))
- byteOrder.PutUint16(scratch[:2], numAddresses)
- if _, err := b.Write(scratch[:2]); err != nil {
- return er.E(err)
- }
-
- for _, address := range node.Addresses {
- if err := serializeAddr(&b, address); err != nil {
- return err
- }
- }
-
- sigLen := len(node.AuthSigBytes)
- if sigLen > 80 {
- return er.Errorf("max sig len allowed is 80, had %v",
- sigLen)
- }
-
- err = wire.WriteVarBytes(&b, 0, node.AuthSigBytes)
- if err != nil {
- return err
- }
-
- if len(node.ExtraOpaqueData) > MaxAllowedExtraOpaqueBytes {
- return ErrTooManyExtraOpaqueBytes(len(node.ExtraOpaqueData))
- }
- err = wire.WriteVarBytes(&b, 0, node.ExtraOpaqueData)
- if err != nil {
- return err
- }
-
- if err := aliasBucket.Put(nodePub, []byte(node.Alias)); err != nil {
- return err
- }
-
- // With the alias bucket updated, we'll now update the index that
- // tracks the time series of node updates.
- var indexKey [8 + 33]byte
- byteOrder.PutUint64(indexKey[:8], updateUnix)
- copy(indexKey[8:], nodePub)
-
- // If there was already an old index entry for this node, then we'll
- // delete the old one before we write the new entry.
- if nodeBytes := nodeBucket.Get(nodePub); nodeBytes != nil {
- // Extract out the old update time to we can reconstruct the
- // prior index key to delete it from the index.
- oldUpdateTime := nodeBytes[:8]
-
- var oldIndexKey [8 + 33]byte
- copy(oldIndexKey[:8], oldUpdateTime)
- copy(oldIndexKey[8:], nodePub)
-
- if err := updateIndex.Delete(oldIndexKey[:]); err != nil {
- return err
- }
- }
-
- if err := updateIndex.Put(indexKey[:], nil); err != nil {
- return err
- }
-
- return nodeBucket.Put(nodePub, b.Bytes())
-}
-
-func fetchLightningNode(nodeBucket kvdb.RBucket,
- nodePub []byte) (LightningNode, er.R) {
-
- nodeBytes := nodeBucket.Get(nodePub)
- if nodeBytes == nil {
- return LightningNode{}, ErrGraphNodeNotFound.Default()
- }
-
- nodeReader := bytes.NewReader(nodeBytes)
- return deserializeLightningNode(nodeReader)
-}
-
-func deserializeLightningNode(r io.Reader) (LightningNode, er.R) {
- var (
- node LightningNode
- scratch [8]byte
- err er.R
- )
-
- if _, err := r.Read(scratch[:]); err != nil {
- return LightningNode{}, er.E(err)
- }
-
- unix := int64(byteOrder.Uint64(scratch[:]))
- node.LastUpdate = time.Unix(unix, 0)
-
- if _, err := util.ReadFull(r, node.PubKeyBytes[:]); err != nil {
- return LightningNode{}, err
- }
-
- if _, err := r.Read(scratch[:2]); err != nil {
- return LightningNode{}, er.E(err)
- }
-
- hasNodeAnn := byteOrder.Uint16(scratch[:2])
- if hasNodeAnn == 1 {
- node.HaveNodeAnnouncement = true
- } else {
- node.HaveNodeAnnouncement = false
- }
-
- // The rest of the data is optional, and will only be there if we got a node
- // announcement for this node.
- if !node.HaveNodeAnnouncement {
- return node, nil
- }
-
- // We did get a node announcement for this node, so we'll have the rest
- // of the data available.
- if err := util.ReadBin(r, byteOrder, &node.Color.R); err != nil {
- return LightningNode{}, err
- }
- if err := util.ReadBin(r, byteOrder, &node.Color.G); err != nil {
- return LightningNode{}, err
- }
- if err := util.ReadBin(r, byteOrder, &node.Color.B); err != nil {
- return LightningNode{}, err
- }
-
- node.Alias, err = wire.ReadVarString(r, 0)
- if err != nil {
- return LightningNode{}, err
- }
-
- fv := lnwire.NewFeatureVector(nil, nil)
- err = fv.Decode(r)
- if err != nil {
- return LightningNode{}, err
- }
- node.Features = fv
-
- if _, err := r.Read(scratch[:2]); err != nil {
- return LightningNode{}, er.E(err)
- }
- numAddresses := int(byteOrder.Uint16(scratch[:2]))
-
- var addresses []net.Addr
- for i := 0; i < numAddresses; i++ {
- address, err := deserializeAddr(r)
- if err != nil {
- return LightningNode{}, err
- }
- addresses = append(addresses, address)
- }
- node.Addresses = addresses
-
- node.AuthSigBytes, err = wire.ReadVarBytes(r, 0, 80, "sig")
- if err != nil {
- return LightningNode{}, err
- }
-
- // We'll try and see if there are any opaque bytes left, if not, then
- // we'll ignore the EOF error and return the node as is.
- node.ExtraOpaqueData, err = wire.ReadVarBytes(
- r, 0, MaxAllowedExtraOpaqueBytes, "blob",
- )
- switch {
- case er.Wrapped(err) == io.ErrUnexpectedEOF:
- case er.Wrapped(err) == io.EOF:
- case err != nil:
- return LightningNode{}, err
- }
-
- return node, nil
-}
-
-func deserializeChanEdgeInfo(r io.Reader) (ChannelEdgeInfo, er.R) {
- var (
- err er.R
- edgeInfo ChannelEdgeInfo
- )
-
- if _, err := util.ReadFull(r, edgeInfo.NodeKey1Bytes[:]); err != nil {
- return ChannelEdgeInfo{}, err
- }
- if _, err := util.ReadFull(r, edgeInfo.NodeKey2Bytes[:]); err != nil {
- return ChannelEdgeInfo{}, err
- }
- if _, err := util.ReadFull(r, edgeInfo.BitcoinKey1Bytes[:]); err != nil {
- return ChannelEdgeInfo{}, err
- }
- if _, err := util.ReadFull(r, edgeInfo.BitcoinKey2Bytes[:]); err != nil {
- return ChannelEdgeInfo{}, err
- }
-
- edgeInfo.Features, err = wire.ReadVarBytes(r, 0, 900, "features")
- if err != nil {
- return ChannelEdgeInfo{}, err
- }
-
- proof := &ChannelAuthProof{}
-
- proof.NodeSig1Bytes, err = wire.ReadVarBytes(r, 0, 80, "sigs")
- if err != nil {
- return ChannelEdgeInfo{}, err
- }
- proof.NodeSig2Bytes, err = wire.ReadVarBytes(r, 0, 80, "sigs")
- if err != nil {
- return ChannelEdgeInfo{}, err
- }
- proof.BitcoinSig1Bytes, err = wire.ReadVarBytes(r, 0, 80, "sigs")
- if err != nil {
- return ChannelEdgeInfo{}, err
- }
- proof.BitcoinSig2Bytes, err = wire.ReadVarBytes(r, 0, 80, "sigs")
- if err != nil {
- return ChannelEdgeInfo{}, err
- }
-
- if !proof.IsEmpty() {
- edgeInfo.AuthProof = proof
- }
-
- edgeInfo.ChannelPoint = wire.OutPoint{}
- if err := readOutpoint(r, &edgeInfo.ChannelPoint); err != nil {
- return ChannelEdgeInfo{}, err
- }
- if err := util.ReadBin(r, byteOrder, &edgeInfo.Capacity); err != nil {
- return ChannelEdgeInfo{}, err
- }
- if err := util.ReadBin(r, byteOrder, &edgeInfo.ChannelID); err != nil {
- return ChannelEdgeInfo{}, err
- }
-
- if _, err := util.ReadFull(r, edgeInfo.ChainHash[:]); err != nil {
- return ChannelEdgeInfo{}, err
- }
-
- // We'll try and see if there are any opaque bytes left, if not, then
- // we'll ignore the EOF error and return the edge as is.
- edgeInfo.ExtraOpaqueData, err = wire.ReadVarBytes(
- r, 0, MaxAllowedExtraOpaqueBytes, "blob",
- )
- switch {
- case er.Wrapped(err) == io.ErrUnexpectedEOF:
- case er.Wrapped(err) == io.EOF:
- case err != nil:
- return ChannelEdgeInfo{}, err
- }
-
- return edgeInfo, nil
-}
-
-func putChanEdgePolicy(edges, nodes kvdb.RwBucket, edge *ChannelEdgePolicy,
- from, to []byte) er.R {
-
- var edgeKey [33 + 8]byte
- copy(edgeKey[:], from)
- byteOrder.PutUint64(edgeKey[33:], edge.ChannelID)
-
- var b bytes.Buffer
- if err := serializeChanEdgePolicy(&b, edge, to); err != nil {
- return err
- }
-
- // Before we write out the new edge, we'll create a new entry in the
- // update index in order to keep it fresh.
- updateUnix := uint64(edge.LastUpdate.Unix())
- var indexKey [8 + 8]byte
- byteOrder.PutUint64(indexKey[:8], updateUnix)
- byteOrder.PutUint64(indexKey[8:], edge.ChannelID)
-
- updateIndex, err := edges.CreateBucketIfNotExists(edgeUpdateIndexBucket)
- if err != nil {
- return err
- }
-
- // If there was already an entry for this edge, then we'll need to
- // delete the old one to ensure we don't leave around any after-images.
- // An unknown policy value does not have a update time recorded, so
- // it also does not need to be removed.
- if edgeBytes := edges.Get(edgeKey[:]); edgeBytes != nil &&
- !bytes.Equal(edgeBytes[:], unknownPolicy) {
-
- // In order to delete the old entry, we'll need to obtain the
- // *prior* update time in order to delete it. To do this, we'll
- // need to deserialize the existing policy within the database
- // (now outdated by the new one), and delete its corresponding
- // entry within the update index. We'll ignore any
- // ErrEdgePolicyOptionalFieldNotFound error, as we only need
- // the channel ID and update time to delete the entry.
- // TODO(halseth): get rid of these invalid policies in a
- // migration.
- oldEdgePolicy, err := deserializeChanEdgePolicy(
- bytes.NewReader(edgeBytes), nodes,
- )
- if err != nil && !ErrEdgePolicyOptionalFieldNotFound.Is(err) {
- return err
- }
-
- oldUpdateTime := uint64(oldEdgePolicy.LastUpdate.Unix())
-
- var oldIndexKey [8 + 8]byte
- byteOrder.PutUint64(oldIndexKey[:8], oldUpdateTime)
- byteOrder.PutUint64(oldIndexKey[8:], edge.ChannelID)
-
- if err := updateIndex.Delete(oldIndexKey[:]); err != nil {
- return err
- }
- }
-
- if err := updateIndex.Put(indexKey[:], nil); err != nil {
- return err
- }
-
- updateEdgePolicyDisabledIndex(
- edges, edge.ChannelID,
- edge.ChannelFlags&lnwire.ChanUpdateDirection > 0,
- edge.IsDisabled(),
- )
-
- return edges.Put(edgeKey[:], b.Bytes()[:])
-}
-
-// updateEdgePolicyDisabledIndex is used to update the disabledEdgePolicyIndex
-// bucket by either add a new disabled ChannelEdgePolicy or remove an existing
-// one.
-// The direction represents the direction of the edge and disabled is used for
-// deciding whether to remove or add an entry to the bucket.
-// In general a channel is disabled if two entries for the same chanID exist
-// in this bucket.
-// Maintaining the bucket this way allows a fast retrieval of disabled
-// channels, for example when prune is needed.
-func updateEdgePolicyDisabledIndex(edges kvdb.RwBucket, chanID uint64,
- direction bool, disabled bool) er.R {
-
- var disabledEdgeKey [8 + 1]byte
- byteOrder.PutUint64(disabledEdgeKey[0:], chanID)
- if direction {
- disabledEdgeKey[8] = 1
- }
-
- disabledEdgePolicyIndex, err := edges.CreateBucketIfNotExists(
- disabledEdgePolicyBucket,
- )
- if err != nil {
- return err
- }
-
- if disabled {
- return disabledEdgePolicyIndex.Put(disabledEdgeKey[:], []byte{})
- }
-
- return disabledEdgePolicyIndex.Delete(disabledEdgeKey[:])
-}
-
-// putChanEdgePolicyUnknown marks the edge policy as unknown
-// in the edges bucket.
-func putChanEdgePolicyUnknown(edges kvdb.RwBucket, channelID uint64,
- from []byte) er.R {
-
- var edgeKey [33 + 8]byte
- copy(edgeKey[:], from)
- byteOrder.PutUint64(edgeKey[33:], channelID)
-
- if edges.Get(edgeKey[:]) != nil {
- return er.Errorf("Cannot write unknown policy for channel %v "+
- " when there is already a policy present", channelID)
- }
-
- return edges.Put(edgeKey[:], unknownPolicy)
-}
-
-func fetchChanEdgePolicy(edges kvdb.RBucket, chanID []byte,
- nodePub []byte, nodes kvdb.RBucket) (*ChannelEdgePolicy, er.R) {
-
- var edgeKey [33 + 8]byte
- copy(edgeKey[:], nodePub)
- copy(edgeKey[33:], chanID[:])
-
- edgeBytes := edges.Get(edgeKey[:])
- if edgeBytes == nil {
- return nil, ErrEdgeNotFound.Default()
- }
-
- // No need to deserialize unknown policy.
- if bytes.Equal(edgeBytes[:], unknownPolicy) {
- return nil, nil
- }
-
- edgeReader := bytes.NewReader(edgeBytes)
-
- ep, err := deserializeChanEdgePolicy(edgeReader, nodes)
- switch {
- // If the db policy was missing an expected optional field, we return
- // nil as if the policy was unknown.
- case ErrEdgePolicyOptionalFieldNotFound.Is(err):
- return nil, nil
-
- case err != nil:
- return nil, err
- }
-
- return ep, nil
-}
-
-func serializeChanEdgePolicy(w io.Writer, edge *ChannelEdgePolicy,
- to []byte) er.R {
-
- err := wire.WriteVarBytes(w, 0, edge.SigBytes)
- if err != nil {
- return err
- }
-
- if err := util.WriteBin(w, byteOrder, edge.ChannelID); err != nil {
- return err
- }
-
- var scratch [8]byte
- updateUnix := uint64(edge.LastUpdate.Unix())
- byteOrder.PutUint64(scratch[:], updateUnix)
- if _, err := util.Write(w, scratch[:]); err != nil {
- return err
- }
-
- if err := util.WriteBin(w, byteOrder, edge.MessageFlags); err != nil {
- return err
- }
- if err := util.WriteBin(w, byteOrder, edge.ChannelFlags); err != nil {
- return err
- }
- if err := util.WriteBin(w, byteOrder, edge.TimeLockDelta); err != nil {
- return err
- }
- if err := util.WriteBin(w, byteOrder, uint64(edge.MinHTLC)); err != nil {
- return err
- }
- if err := util.WriteBin(w, byteOrder, uint64(edge.FeeBaseMSat)); err != nil {
- return err
- }
- if err := util.WriteBin(w, byteOrder, uint64(edge.FeeProportionalMillionths)); err != nil {
- return err
- }
-
- if _, err := util.Write(w, to); err != nil {
- return err
- }
-
- // If the max_htlc field is present, we write it. To be compatible with
- // older versions that wasn't aware of this field, we write it as part
- // of the opaque data.
- // TODO(halseth): clean up when moving to TLV.
- var opaqueBuf bytes.Buffer
- if edge.MessageFlags.HasMaxHtlc() {
- err := util.WriteBin(&opaqueBuf, byteOrder, uint64(edge.MaxHTLC))
- if err != nil {
- return err
- }
- }
-
- if len(edge.ExtraOpaqueData) > MaxAllowedExtraOpaqueBytes {
- return ErrTooManyExtraOpaqueBytes(len(edge.ExtraOpaqueData))
- }
- if _, err := opaqueBuf.Write(edge.ExtraOpaqueData); err != nil {
- return er.E(err)
- }
-
- if err := wire.WriteVarBytes(w, 0, opaqueBuf.Bytes()); err != nil {
- return err
- }
- return nil
-}
-
-func deserializeChanEdgePolicy(r io.Reader,
- nodes kvdb.RBucket) (*ChannelEdgePolicy, er.R) {
-
- edge := &ChannelEdgePolicy{}
-
- var err er.R
- edge.SigBytes, err = wire.ReadVarBytes(r, 0, 80, "sig")
- if err != nil {
- return nil, err
- }
-
- if err := util.ReadBin(r, byteOrder, &edge.ChannelID); err != nil {
- return nil, err
- }
-
- var scratch [8]byte
- if _, err := r.Read(scratch[:]); err != nil {
- return nil, er.E(err)
- }
- unix := int64(byteOrder.Uint64(scratch[:]))
- edge.LastUpdate = time.Unix(unix, 0)
-
- if err := util.ReadBin(r, byteOrder, &edge.MessageFlags); err != nil {
- return nil, err
- }
- if err := util.ReadBin(r, byteOrder, &edge.ChannelFlags); err != nil {
- return nil, err
- }
- if err := util.ReadBin(r, byteOrder, &edge.TimeLockDelta); err != nil {
- return nil, err
- }
-
- var n uint64
- if err := util.ReadBin(r, byteOrder, &n); err != nil {
- return nil, err
- }
- edge.MinHTLC = lnwire.MilliSatoshi(n)
-
- if err := util.ReadBin(r, byteOrder, &n); err != nil {
- return nil, err
- }
- edge.FeeBaseMSat = lnwire.MilliSatoshi(n)
-
- if err := util.ReadBin(r, byteOrder, &n); err != nil {
- return nil, err
- }
- edge.FeeProportionalMillionths = lnwire.MilliSatoshi(n)
-
- var pub [33]byte
- if _, err := r.Read(pub[:]); err != nil {
- return nil, er.E(err)
- }
-
- node, err := fetchLightningNode(nodes, pub[:])
- if err != nil {
- return nil, er.Errorf("unable to fetch node: %x, %v",
- pub[:], err)
- }
- edge.Node = &node
-
- // We'll try and see if there are any opaque bytes left, if not, then
- // we'll ignore the EOF error and return the edge as is.
- edge.ExtraOpaqueData, err = wire.ReadVarBytes(
- r, 0, MaxAllowedExtraOpaqueBytes, "blob",
- )
- switch {
- case er.Wrapped(err) == io.ErrUnexpectedEOF:
- case er.Wrapped(err) == io.EOF:
- case err != nil:
- return nil, err
- }
-
- // See if optional fields are present.
- if edge.MessageFlags.HasMaxHtlc() {
- // The max_htlc field should be at the beginning of the opaque
- // bytes.
- opq := edge.ExtraOpaqueData
-
- // If the max_htlc field is not present, it might be old data
- // stored before this field was validated. We'll return the
- // edge along with an error.
- if len(opq) < 8 {
- return edge, ErrEdgePolicyOptionalFieldNotFound.Default()
- }
-
- maxHtlc := byteOrder.Uint64(opq[:8])
- edge.MaxHTLC = lnwire.MilliSatoshi(maxHtlc)
-
- // Exclude the parsed field from the rest of the opaque data.
- edge.ExtraOpaqueData = opq[8:]
- }
-
- return edge, nil
-}
diff --git a/lnd/channeldb/migration_01_to_11/graph_test.go b/lnd/channeldb/migration_01_to_11/graph_test.go
deleted file mode 100644
index a403c6ec..00000000
--- a/lnd/channeldb/migration_01_to_11/graph_test.go
+++ /dev/null
@@ -1,58 +0,0 @@
-package migration_01_to_11
-
-import (
- "image/color"
- "math/big"
- prand "math/rand"
- "net"
- "time"
-
- "github.com/pkt-cash/pktd/btcec"
- "github.com/pkt-cash/pktd/btcutil/er"
- "github.com/pkt-cash/pktd/lnd/lnwire"
-)
-
-var (
- testAddr = &net.TCPAddr{IP: (net.IP)([]byte{0xA, 0x0, 0x0, 0x1}),
- Port: 9000}
- anotherAddr, _ = net.ResolveTCPAddr("tcp",
- "[2001:db8:85a3:0:0:8a2e:370:7334]:80")
- testAddrs = []net.Addr{testAddr, anotherAddr}
-
- testSig = &btcec.Signature{
- R: new(big.Int),
- S: new(big.Int),
- }
- _, _ = testSig.R.SetString("63724406601629180062774974542967536251589935445068131219452686511677818569431", 10)
- _, _ = testSig.S.SetString("18801056069249825825291287104931333862866033135609736119018462340006816851118", 10)
-
- testFeatures = lnwire.NewFeatureVector(nil, nil)
-)
-
-func createLightningNode(db *DB, priv *btcec.PrivateKey) (*LightningNode, er.R) {
- updateTime := prand.Int63()
-
- pub := priv.PubKey().SerializeCompressed()
- n := &LightningNode{
- HaveNodeAnnouncement: true,
- AuthSigBytes: testSig.Serialize(),
- LastUpdate: time.Unix(updateTime, 0),
- Color: color.RGBA{1, 2, 3, 0},
- Alias: "kek" + string(pub[:]),
- Features: testFeatures,
- Addresses: testAddrs,
- db: db,
- }
- copy(n.PubKeyBytes[:], priv.PubKey().SerializeCompressed())
-
- return n, nil
-}
-
-func createTestVertex(db *DB) (*LightningNode, er.R) {
- priv, err := btcec.NewPrivateKey(btcec.S256())
- if err != nil {
- return nil, err
- }
-
- return createLightningNode(db, priv)
-}
diff --git a/lnd/channeldb/migration_01_to_11/invoices.go b/lnd/channeldb/migration_01_to_11/invoices.go
deleted file mode 100644
index d3639edc..00000000
--- a/lnd/channeldb/migration_01_to_11/invoices.go
+++ /dev/null
@@ -1,552 +0,0 @@
-package migration_01_to_11
-
-import (
- "bytes"
- "io"
- "time"
-
- "github.com/pkt-cash/pktd/btcutil/er"
- "github.com/pkt-cash/pktd/btcutil/util"
- "github.com/pkt-cash/pktd/lnd/channeldb/kvdb"
- "github.com/pkt-cash/pktd/lnd/lntypes"
- "github.com/pkt-cash/pktd/lnd/lnwire"
- "github.com/pkt-cash/pktd/lnd/tlv"
- "github.com/pkt-cash/pktd/wire"
-)
-
-var (
-
- // invoiceBucket is the name of the bucket within the database that
- // stores all data related to invoices no matter their final state.
- // Within the invoice bucket, each invoice is keyed by its invoice ID
- // which is a monotonically increasing uint32.
- invoiceBucket = []byte("invoices")
-
- // addIndexBucket is an index bucket that we'll use to create a
- // monotonically increasing set of add indexes. Each time we add a new
- // invoice, this sequence number will be incremented and then populated
- // within the new invoice.
- //
- // In addition to this sequence number, we map:
- //
- // addIndexNo => invoiceKey
- addIndexBucket = []byte("invoice-add-index")
-
- // settleIndexBucket is an index bucket that we'll use to create a
- // monotonically increasing integer for tracking a "settle index". Each
- // time an invoice is settled, this sequence number will be incremented
- // as populate within the newly settled invoice.
- //
- // In addition to this sequence number, we map:
- //
- // settleIndexNo => invoiceKey
- settleIndexBucket = []byte("invoice-settle-index")
-)
-
-const (
- // MaxMemoSize is maximum size of the memo field within invoices stored
- // in the database.
- MaxMemoSize = 1024
-
- // MaxReceiptSize is the maximum size of the payment receipt stored
- // within the database along side incoming/outgoing invoices.
- MaxReceiptSize = 1024
-
- // MaxPaymentRequestSize is the max size of a payment request for
- // this invoice.
- // TODO(halseth): determine the max length payment request when field
- // lengths are final.
- MaxPaymentRequestSize = 4096
-
- // A set of tlv type definitions used to serialize invoice htlcs to the
- // database.
- chanIDType tlv.Type = 1
- htlcIDType tlv.Type = 3
- amtType tlv.Type = 5
- acceptHeightType tlv.Type = 7
- acceptTimeType tlv.Type = 9
- resolveTimeType tlv.Type = 11
- expiryHeightType tlv.Type = 13
- stateType tlv.Type = 15
-)
-
-// ContractState describes the state the invoice is in.
-type ContractState uint8
-
-const (
- // ContractOpen means the invoice has only been created.
- ContractOpen ContractState = 0
-
- // ContractSettled means the htlc is settled and the invoice has been
- // paid.
- ContractSettled ContractState = 1
-
- // ContractCanceled means the invoice has been canceled.
- ContractCanceled ContractState = 2
-
- // ContractAccepted means the HTLC has been accepted but not settled
- // yet.
- ContractAccepted ContractState = 3
-)
-
-// String returns a human readable identifier for the ContractState type.
-func (c ContractState) String() string {
- switch c {
- case ContractOpen:
- return "Open"
- case ContractSettled:
- return "Settled"
- case ContractCanceled:
- return "Canceled"
- case ContractAccepted:
- return "Accepted"
- }
-
- return "Unknown"
-}
-
-// ContractTerm is a companion struct to the Invoice struct. This struct houses
-// the necessary conditions required before the invoice can be considered fully
-// settled by the payee.
-type ContractTerm struct {
- // PaymentPreimage is the preimage which is to be revealed in the
- // occasion that an HTLC paying to the hash of this preimage is
- // extended.
- PaymentPreimage lntypes.Preimage
-
- // Value is the expected amount of milli-satoshis to be paid to an HTLC
- // which can be satisfied by the above preimage.
- Value lnwire.MilliSatoshi
-
- // State describes the state the invoice is in.
- State ContractState
-}
-
-// Invoice is a payment invoice generated by a payee in order to request
-// payment for some good or service. The inclusion of invoices within Lightning
-// creates a payment work flow for merchants very similar to that of the
-// existing financial system within PayPal, etc. Invoices are added to the
-// database when a payment is requested, then can be settled manually once the
-// payment is received at the upper layer. For record keeping purposes,
-// invoices are never deleted from the database, instead a bit is toggled
-// denoting the invoice has been fully settled. Within the database, all
-// invoices must have a unique payment hash which is generated by taking the
-// sha256 of the payment preimage.
-type Invoice struct {
- // Memo is an optional memo to be stored along side an invoice. The
- // memo may contain further details pertaining to the invoice itself,
- // or any other message which fits within the size constraints.
- Memo []byte
-
- // Receipt is an optional field dedicated for storing a
- // cryptographically binding receipt of payment.
- //
- // TODO(roasbeef): document scheme.
- Receipt []byte
-
- // PaymentRequest is an optional field where a payment request created
- // for this invoice can be stored.
- PaymentRequest []byte
-
- // FinalCltvDelta is the minimum required number of blocks before htlc
- // expiry when the invoice is accepted.
- FinalCltvDelta int32
-
- // Expiry defines how long after creation this invoice should expire.
- Expiry time.Duration
-
- // CreationDate is the exact time the invoice was created.
- CreationDate time.Time
-
- // SettleDate is the exact time the invoice was settled.
- SettleDate time.Time
-
- // Terms are the contractual payment terms of the invoice. Once all the
- // terms have been satisfied by the payer, then the invoice can be
- // considered fully fulfilled.
- //
- // TODO(roasbeef): later allow for multiple terms to fulfill the final
- // invoice: payment fragmentation, etc.
- Terms ContractTerm
-
- // AddIndex is an auto-incrementing integer that acts as a
- // monotonically increasing sequence number for all invoices created.
- // Clients can then use this field as a "checkpoint" of sorts when
- // implementing a streaming RPC to notify consumers of instances where
- // an invoice has been added before they re-connected.
- //
- // NOTE: This index starts at 1.
- AddIndex uint64
-
- // SettleIndex is an auto-incrementing integer that acts as a
- // monotonically increasing sequence number for all settled invoices.
- // Clients can then use this field as a "checkpoint" of sorts when
- // implementing a streaming RPC to notify consumers of instances where
- // an invoice has been settled before they re-connected.
- //
- // NOTE: This index starts at 1.
- SettleIndex uint64
-
- // AmtPaid is the final amount that we ultimately accepted for pay for
- // this invoice. We specify this value independently as it's possible
- // that the invoice originally didn't specify an amount, or the sender
- // overpaid.
- AmtPaid lnwire.MilliSatoshi
-
- // Htlcs records all htlcs that paid to this invoice. Some of these
- // htlcs may have been marked as canceled.
- Htlcs map[CircuitKey]*InvoiceHTLC
-}
-
-// HtlcState defines the states an htlc paying to an invoice can be in.
-type HtlcState uint8
-
-// InvoiceHTLC contains details about an htlc paying to this invoice.
-type InvoiceHTLC struct {
- // Amt is the amount that is carried by this htlc.
- Amt lnwire.MilliSatoshi
-
- // AcceptHeight is the block height at which the invoice registry
- // decided to accept this htlc as a payment to the invoice. At this
- // height, the invoice cltv delay must have been met.
- AcceptHeight uint32
-
- // AcceptTime is the wall clock time at which the invoice registry
- // decided to accept the htlc.
- AcceptTime time.Time
-
- // ResolveTime is the wall clock time at which the invoice registry
- // decided to settle the htlc.
- ResolveTime time.Time
-
- // Expiry is the expiry height of this htlc.
- Expiry uint32
-
- // State indicates the state the invoice htlc is currently in. A
- // canceled htlc isn't just removed from the invoice htlcs map, because
- // we need AcceptHeight to properly cancel the htlc back.
- State HtlcState
-}
-
-func validateInvoice(i *Invoice) er.R {
- if len(i.Memo) > MaxMemoSize {
- return er.Errorf("max length a memo is %v, and invoice "+
- "of length %v was provided", MaxMemoSize, len(i.Memo))
- }
- if len(i.Receipt) > MaxReceiptSize {
- return er.Errorf("max length a receipt is %v, and invoice "+
- "of length %v was provided", MaxReceiptSize,
- len(i.Receipt))
- }
- if len(i.PaymentRequest) > MaxPaymentRequestSize {
- return er.Errorf("max length of payment request is %v, length "+
- "provided was %v", MaxPaymentRequestSize,
- len(i.PaymentRequest))
- }
- return nil
-}
-
-// FetchAllInvoices returns all invoices currently stored within the database.
-// If the pendingOnly param is true, then only unsettled invoices will be
-// returned, skipping all invoices that are fully settled.
-func (d *DB) FetchAllInvoices(pendingOnly bool) ([]Invoice, er.R) {
- var invoices []Invoice
-
- err := kvdb.View(d, func(tx kvdb.RTx) er.R {
- invoiceB := tx.ReadBucket(invoiceBucket)
- if invoiceB == nil {
- return ErrNoInvoicesCreated.Default()
- }
-
- // Iterate through the entire key space of the top-level
- // invoice bucket. If key with a non-nil value stores the next
- // invoice ID which maps to the corresponding invoice.
- return invoiceB.ForEach(func(k, v []byte) er.R {
- if v == nil {
- return nil
- }
-
- invoiceReader := bytes.NewReader(v)
- invoice, err := deserializeInvoice(invoiceReader)
- if err != nil {
- return err
- }
-
- if pendingOnly &&
- invoice.Terms.State == ContractSettled {
-
- return nil
- }
-
- invoices = append(invoices, invoice)
-
- return nil
- })
- }, func() {
- invoices = nil
- })
- if err != nil {
- return nil, err
- }
-
- return invoices, nil
-}
-
-// serializeInvoice serializes an invoice to a writer.
-//
-// Note: this function is in use for a migration. Before making changes that
-// would modify the on disk format, make a copy of the original code and store
-// it with the migration.
-func serializeInvoice(w io.Writer, i *Invoice) er.R {
- if err := wire.WriteVarBytes(w, 0, i.Memo[:]); err != nil {
- return err
- }
- if err := wire.WriteVarBytes(w, 0, i.Receipt[:]); err != nil {
- return err
- }
- if err := wire.WriteVarBytes(w, 0, i.PaymentRequest[:]); err != nil {
- return err
- }
-
- if err := util.WriteBin(w, byteOrder, i.FinalCltvDelta); err != nil {
- return err
- }
-
- if err := util.WriteBin(w, byteOrder, int64(i.Expiry)); err != nil {
- return err
- }
-
- birthBytes, err := i.CreationDate.MarshalBinary()
- if err != nil {
- return er.E(err)
- }
-
- if err := wire.WriteVarBytes(w, 0, birthBytes); err != nil {
- return err
- }
-
- settleBytes, err := i.SettleDate.MarshalBinary()
- if err != nil {
- return er.E(err)
- }
-
- if err := wire.WriteVarBytes(w, 0, settleBytes); err != nil {
- return err
- }
-
- if _, err := util.Write(w, i.Terms.PaymentPreimage[:]); err != nil {
- return err
- }
-
- var scratch [8]byte
- byteOrder.PutUint64(scratch[:], uint64(i.Terms.Value))
- if _, err := util.Write(w, scratch[:]); err != nil {
- return err
- }
-
- if err := util.WriteBin(w, byteOrder, i.Terms.State); err != nil {
- return err
- }
-
- if err := util.WriteBin(w, byteOrder, i.AddIndex); err != nil {
- return err
- }
- if err := util.WriteBin(w, byteOrder, i.SettleIndex); err != nil {
- return err
- }
- if err := util.WriteBin(w, byteOrder, int64(i.AmtPaid)); err != nil {
- return err
- }
-
- if err := serializeHtlcs(w, i.Htlcs); err != nil {
- return err
- }
-
- return nil
-}
-
-// serializeHtlcs serializes a map containing circuit keys and invoice htlcs to
-// a writer.
-func serializeHtlcs(w io.Writer, htlcs map[CircuitKey]*InvoiceHTLC) er.R {
- for key, htlc := range htlcs {
- // Encode the htlc in a tlv stream.
- chanID := key.ChanID.ToUint64()
- amt := uint64(htlc.Amt)
- acceptTime := uint64(htlc.AcceptTime.UnixNano())
- resolveTime := uint64(htlc.ResolveTime.UnixNano())
- state := uint8(htlc.State)
-
- tlvStream, err := tlv.NewStream(
- tlv.MakePrimitiveRecord(chanIDType, &chanID),
- tlv.MakePrimitiveRecord(htlcIDType, &key.HtlcID),
- tlv.MakePrimitiveRecord(amtType, &amt),
- tlv.MakePrimitiveRecord(
- acceptHeightType, &htlc.AcceptHeight,
- ),
- tlv.MakePrimitiveRecord(acceptTimeType, &acceptTime),
- tlv.MakePrimitiveRecord(resolveTimeType, &resolveTime),
- tlv.MakePrimitiveRecord(expiryHeightType, &htlc.Expiry),
- tlv.MakePrimitiveRecord(stateType, &state),
- )
- if err != nil {
- return err
- }
-
- var b bytes.Buffer
- if err := tlvStream.Encode(&b); err != nil {
- return err
- }
-
- // Write the length of the tlv stream followed by the stream
- // bytes.
- err = util.WriteBin(w, byteOrder, uint64(b.Len()))
- if err != nil {
- return err
- }
-
- if _, err := util.Write(w, b.Bytes()); err != nil {
- return err
- }
- }
-
- return nil
-}
-
-func deserializeInvoice(r io.Reader) (Invoice, er.R) {
- var err er.R
- invoice := Invoice{}
-
- // TODO(roasbeef): use read full everywhere
- invoice.Memo, err = wire.ReadVarBytes(r, 0, MaxMemoSize, "")
- if err != nil {
- return invoice, err
- }
- invoice.Receipt, err = wire.ReadVarBytes(r, 0, MaxReceiptSize, "")
- if err != nil {
- return invoice, err
- }
-
- invoice.PaymentRequest, err = wire.ReadVarBytes(r, 0, MaxPaymentRequestSize, "")
- if err != nil {
- return invoice, err
- }
-
- if err := util.ReadBin(r, byteOrder, &invoice.FinalCltvDelta); err != nil {
- return invoice, err
- }
-
- var expiry int64
- if err := util.ReadBin(r, byteOrder, &expiry); err != nil {
- return invoice, err
- }
- invoice.Expiry = time.Duration(expiry)
-
- birthBytes, err := wire.ReadVarBytes(r, 0, 300, "birth")
- if err != nil {
- return invoice, err
- }
- if err := invoice.CreationDate.UnmarshalBinary(birthBytes); err != nil {
- return invoice, er.E(err)
- }
-
- settledBytes, err := wire.ReadVarBytes(r, 0, 300, "settled")
- if err != nil {
- return invoice, err
- }
- if err := invoice.SettleDate.UnmarshalBinary(settledBytes); err != nil {
- return invoice, er.E(err)
- }
-
- if _, err := util.ReadFull(r, invoice.Terms.PaymentPreimage[:]); err != nil {
- return invoice, err
- }
- var scratch [8]byte
- if _, err := util.ReadFull(r, scratch[:]); err != nil {
- return invoice, err
- }
- invoice.Terms.Value = lnwire.MilliSatoshi(byteOrder.Uint64(scratch[:]))
-
- if err := util.ReadBin(r, byteOrder, &invoice.Terms.State); err != nil {
- return invoice, err
- }
-
- if err := util.ReadBin(r, byteOrder, &invoice.AddIndex); err != nil {
- return invoice, err
- }
- if err := util.ReadBin(r, byteOrder, &invoice.SettleIndex); err != nil {
- return invoice, err
- }
- if err := util.ReadBin(r, byteOrder, &invoice.AmtPaid); err != nil {
- return invoice, err
- }
-
- invoice.Htlcs, err = deserializeHtlcs(r)
- if err != nil {
- return Invoice{}, err
- }
-
- return invoice, nil
-}
-
-// deserializeHtlcs reads a list of invoice htlcs from a reader and returns it
-// as a map.
-func deserializeHtlcs(r io.Reader) (map[CircuitKey]*InvoiceHTLC, er.R) {
- htlcs := make(map[CircuitKey]*InvoiceHTLC, 0)
-
- for {
- // Read the length of the tlv stream for this htlc.
- var streamLen uint64
- if err := util.ReadBin(r, byteOrder, &streamLen); err != nil {
- if er.Wrapped(err) == io.EOF {
- break
- }
-
- return nil, err
- }
-
- streamBytes := make([]byte, streamLen)
- if _, err := r.Read(streamBytes); err != nil {
- return nil, er.E(err)
- }
- streamReader := bytes.NewReader(streamBytes)
-
- // Decode the contents into the htlc fields.
- var (
- htlc InvoiceHTLC
- key CircuitKey
- chanID uint64
- state uint8
- acceptTime, resolveTime uint64
- amt uint64
- )
- tlvStream, err := tlv.NewStream(
- tlv.MakePrimitiveRecord(chanIDType, &chanID),
- tlv.MakePrimitiveRecord(htlcIDType, &key.HtlcID),
- tlv.MakePrimitiveRecord(amtType, &amt),
- tlv.MakePrimitiveRecord(
- acceptHeightType, &htlc.AcceptHeight,
- ),
- tlv.MakePrimitiveRecord(acceptTimeType, &acceptTime),
- tlv.MakePrimitiveRecord(resolveTimeType, &resolveTime),
- tlv.MakePrimitiveRecord(expiryHeightType, &htlc.Expiry),
- tlv.MakePrimitiveRecord(stateType, &state),
- )
- if err != nil {
- return nil, err
- }
-
- if err := tlvStream.Decode(streamReader); err != nil {
- return nil, err
- }
-
- key.ChanID = lnwire.NewShortChanIDFromInt(chanID)
- htlc.AcceptTime = time.Unix(0, int64(acceptTime))
- htlc.ResolveTime = time.Unix(0, int64(resolveTime))
- htlc.State = HtlcState(state)
- htlc.Amt = lnwire.MilliSatoshi(amt)
-
- htlcs[key] = &htlc
- }
-
- return htlcs, nil
-}
diff --git a/lnd/channeldb/migration_01_to_11/legacy_serialization.go b/lnd/channeldb/migration_01_to_11/legacy_serialization.go
deleted file mode 100644
index d96aa6bc..00000000
--- a/lnd/channeldb/migration_01_to_11/legacy_serialization.go
+++ /dev/null
@@ -1,57 +0,0 @@
-package migration_01_to_11
-
-import (
- "io"
-
- "github.com/pkt-cash/pktd/btcutil/er"
-)
-
-// deserializeCloseChannelSummaryV6 reads the v6 database format for
-// ChannelCloseSummary.
-//
-// NOTE: deprecated, only for migration.
-func deserializeCloseChannelSummaryV6(r io.Reader) (*ChannelCloseSummary, er.R) {
- c := &ChannelCloseSummary{}
-
- err := ReadElements(r,
- &c.ChanPoint, &c.ShortChanID, &c.ChainHash, &c.ClosingTXID,
- &c.CloseHeight, &c.RemotePub, &c.Capacity, &c.SettledBalance,
- &c.TimeLockedBalance, &c.CloseType, &c.IsPending,
- )
- if err != nil {
- return nil, err
- }
-
- // We'll now check to see if the channel close summary was encoded with
- // any of the additional optional fields.
- err = ReadElements(r, &c.RemoteCurrentRevocation)
- switch {
- case er.Wrapped(err) == io.EOF:
- return c, nil
-
- // If we got a non-eof error, then we know there's an actually issue.
- // Otherwise, it may have been the case that this summary didn't have
- // the set of optional fields.
- case err != nil:
- return nil, err
- }
-
- if err := readChanConfig(r, &c.LocalChanConfig); err != nil {
- return nil, err
- }
-
- // Finally, we'll attempt to read the next unrevoked commitment point
- // for the remote party. If we closed the channel before receiving a
- // funding locked message, then this can be nil. As a result, we'll use
- // the same technique to read the field, only if there's still data
- // left in the buffer.
- err = ReadElements(r, &c.RemoteNextRevocation)
- if err != nil && er.Wrapped(err) != io.EOF {
- // If we got a non-eof error, then we know there's an actually
- // issue. Otherwise, it may have been the case that this
- // summary didn't have the set of optional fields.
- return nil, err
- }
-
- return c, nil
-}
diff --git a/lnd/channeldb/migration_01_to_11/meta.go b/lnd/channeldb/migration_01_to_11/meta.go
deleted file mode 100644
index 6f778a07..00000000
--- a/lnd/channeldb/migration_01_to_11/meta.go
+++ /dev/null
@@ -1,40 +0,0 @@
-package migration_01_to_11
-
-import (
- "github.com/pkt-cash/pktd/btcutil/er"
- "github.com/pkt-cash/pktd/lnd/channeldb/kvdb"
-)
-
-var (
- // metaBucket stores all the meta information concerning the state of
- // the database.
- metaBucket = []byte("metadata")
-
- // dbVersionKey is a boltdb key and it's used for storing/retrieving
- // current database version.
- dbVersionKey = []byte("dbp")
-)
-
-// Meta structure holds the database meta information.
-type Meta struct {
- // DbVersionNumber is the current schema version of the database.
- DbVersionNumber uint32
-}
-
-// putMeta is an internal helper function used in order to allow callers to
-// re-use a database transaction. See the publicly exported PutMeta method for
-// more information.
-func putMeta(meta *Meta, tx kvdb.RwTx) er.R {
- metaBucket, err := tx.CreateTopLevelBucket(metaBucket)
- if err != nil {
- return err
- }
-
- return putDbVersion(metaBucket, meta)
-}
-
-func putDbVersion(metaBucket kvdb.RwBucket, meta *Meta) er.R {
- scratch := make([]byte, 4)
- byteOrder.PutUint32(scratch, meta.DbVersionNumber)
- return metaBucket.Put(dbVersionKey, scratch)
-}
diff --git a/lnd/channeldb/migration_01_to_11/meta_test.go b/lnd/channeldb/migration_01_to_11/meta_test.go
deleted file mode 100644
index ba230e5a..00000000
--- a/lnd/channeldb/migration_01_to_11/meta_test.go
+++ /dev/null
@@ -1,59 +0,0 @@
-package migration_01_to_11
-
-import (
- "testing"
-
- "github.com/pkt-cash/pktd/btcutil/er"
- "github.com/pkt-cash/pktd/lnd/channeldb/kvdb"
- "github.com/pkt-cash/pktd/pktlog/log"
-)
-
-// applyMigration is a helper test function that encapsulates the general steps
-// which are needed to properly check the result of applying migration function.
-func applyMigration(t *testing.T, beforeMigration, afterMigration func(d *DB),
- migrationFunc migration, shouldFail bool) {
-
- cdb, cleanUp, err := makeTestDB()
- defer cleanUp()
- if err != nil {
- t.Fatal(err)
- }
-
- // Create a test node that will be our source node.
- testNode, err := createTestVertex(cdb)
- if err != nil {
- t.Fatal(err)
- }
- graph := cdb.ChannelGraph()
- if err := graph.SetSourceNode(testNode); err != nil {
- t.Fatal(err)
- }
-
- // beforeMigration usually used for populating the database
- // with test data.
- beforeMigration(cdb)
-
- defer func() {
- if r := recover(); r != nil {
- err = er.Errorf("%v", r)
- }
-
- if err == nil && shouldFail {
- t.Fatal("error wasn't received on migration stage")
- } else if err != nil && !shouldFail {
- t.Fatalf("error was received on migration stage: %v", err)
- }
-
- // afterMigration usually used for checking the database state and
- // throwing the error if something went wrong.
- afterMigration(cdb)
- }()
-
- // Apply migration.
- err = kvdb.Update(cdb, func(tx kvdb.RwTx) er.R {
- return migrationFunc(tx)
- }, func() {})
- if err != nil {
- log.Error(err)
- }
-}
diff --git a/lnd/channeldb/migration_01_to_11/migration_09_legacy_serialization.go b/lnd/channeldb/migration_01_to_11/migration_09_legacy_serialization.go
deleted file mode 100644
index 355489f4..00000000
--- a/lnd/channeldb/migration_01_to_11/migration_09_legacy_serialization.go
+++ /dev/null
@@ -1,503 +0,0 @@
-package migration_01_to_11
-
-import (
- "bytes"
- "encoding/binary"
- "io"
- "sort"
-
- "github.com/pkt-cash/pktd/btcutil/er"
- "github.com/pkt-cash/pktd/btcutil/util"
- "github.com/pkt-cash/pktd/lnd/channeldb/kvdb"
- "github.com/pkt-cash/pktd/lnd/lntypes"
- "github.com/pkt-cash/pktd/lnd/lnwire"
-)
-
-var (
- // paymentBucket is the name of the bucket within the database that
- // stores all data related to payments.
- //
- // Within the payments bucket, each invoice is keyed by its invoice ID
- // which is a monotonically increasing uint64. BoltDB's sequence
- // feature is used for generating monotonically increasing id.
- //
- // NOTE: Deprecated. Kept around for migration purposes.
- paymentBucket = []byte("payments")
-
- // paymentStatusBucket is the name of the bucket within the database
- // that stores the status of a payment indexed by the payment's
- // preimage.
- //
- // NOTE: Deprecated. Kept around for migration purposes.
- paymentStatusBucket = []byte("payment-status")
-)
-
-// outgoingPayment represents a successful payment between the daemon and a
-// remote node. Details such as the total fee paid, and the time of the payment
-// are stored.
-//
-// NOTE: Deprecated. Kept around for migration purposes.
-type outgoingPayment struct {
- Invoice
-
- // Fee is the total fee paid for the payment in milli-satoshis.
- Fee lnwire.MilliSatoshi
-
- // TotalTimeLock is the total cumulative time-lock in the HTLC extended
- // from the second-to-last hop to the destination.
- TimeLockLength uint32
-
- // Path encodes the path the payment took through the network. The path
- // excludes the outgoing node and consists of the hex-encoded
- // compressed public key of each of the nodes involved in the payment.
- Path [][33]byte
-
- // PaymentPreimage is the preImage of a successful payment. This is used
- // to calculate the PaymentHash as well as serve as a proof of payment.
- PaymentPreimage [32]byte
-}
-
-// addPayment saves a successful payment to the database. It is assumed that
-// all payment are sent using unique payment hashes.
-//
-// NOTE: Deprecated. Kept around for migration purposes.
-func (db *DB) addPayment(payment *outgoingPayment) er.R {
- // Validate the field of the inner voice within the outgoing payment,
- // these must also adhere to the same constraints as regular invoices.
- if err := validateInvoice(&payment.Invoice); err != nil {
- return err
- }
-
- // We first serialize the payment before starting the database
- // transaction so we can avoid creating a DB payment in the case of a
- // serialization error.
- var b bytes.Buffer
- if err := serializeOutgoingPayment(&b, payment); err != nil {
- return err
- }
- paymentBytes := b.Bytes()
-
- return kvdb.Update(db, func(tx kvdb.RwTx) er.R {
- payments, err := tx.CreateTopLevelBucket(paymentBucket)
- if err != nil {
- return err
- }
-
- // Obtain the new unique sequence number for this payment.
- paymentID, err := payments.NextSequence()
- if err != nil {
- return err
- }
-
- // We use BigEndian for keys as it orders keys in
- // ascending order. This allows bucket scans to order payments
- // in the order in which they were created.
- paymentIDBytes := make([]byte, 8)
- binary.BigEndian.PutUint64(paymentIDBytes, paymentID)
-
- return payments.Put(paymentIDBytes, paymentBytes)
- }, func() {})
-}
-
-// fetchAllPayments returns all outgoing payments in DB.
-//
-// NOTE: Deprecated. Kept around for migration purposes.
-func (db *DB) fetchAllPayments() ([]*outgoingPayment, er.R) {
- var payments []*outgoingPayment
-
- err := kvdb.View(db, func(tx kvdb.RTx) er.R {
- bucket := tx.ReadBucket(paymentBucket)
- if bucket == nil {
- return ErrNoPaymentsCreated.Default()
- }
-
- return bucket.ForEach(func(k, v []byte) er.R {
- // If the value is nil, then we ignore it as it may be
- // a sub-bucket.
- if v == nil {
- return nil
- }
-
- r := bytes.NewReader(v)
- payment, err := deserializeOutgoingPayment(r)
- if err != nil {
- return err
- }
-
- payments = append(payments, payment)
- return nil
- })
- }, func() {
- payments = nil
- })
- if err != nil {
- return nil, err
- }
-
- return payments, nil
-}
-
-// fetchPaymentStatus returns the payment status for outgoing payment.
-// If status of the payment isn't found, it will default to "StatusUnknown".
-//
-// NOTE: Deprecated. Kept around for migration purposes.
-func (db *DB) fetchPaymentStatus(paymentHash [32]byte) (PaymentStatus, er.R) {
- var paymentStatus = StatusUnknown
- err := kvdb.View(db, func(tx kvdb.RTx) er.R {
- var err er.R
- paymentStatus, err = fetchPaymentStatusTx(tx, paymentHash)
- return err
- }, func() {
- paymentStatus = StatusUnknown
- })
- if err != nil {
- return StatusUnknown, err
- }
-
- return paymentStatus, nil
-}
-
-// fetchPaymentStatusTx is a helper method that returns the payment status for
-// outgoing payment. If status of the payment isn't found, it will default to
-// "StatusUnknown". It accepts the boltdb transactions such that this method
-// can be composed into other atomic operations.
-//
-// NOTE: Deprecated. Kept around for migration purposes.
-func fetchPaymentStatusTx(tx kvdb.RTx, paymentHash [32]byte) (PaymentStatus, er.R) {
- // The default status for all payments that aren't recorded in database.
- var paymentStatus = StatusUnknown
-
- bucket := tx.ReadBucket(paymentStatusBucket)
- if bucket == nil {
- return paymentStatus, nil
- }
-
- paymentStatusBytes := bucket.Get(paymentHash[:])
- if paymentStatusBytes == nil {
- return paymentStatus, nil
- }
-
- paymentStatus.FromBytes(paymentStatusBytes)
-
- return paymentStatus, nil
-}
-
-func serializeOutgoingPayment(w io.Writer, p *outgoingPayment) er.R {
- var scratch [8]byte
-
- if err := serializeInvoiceLegacy(w, &p.Invoice); err != nil {
- return err
- }
-
- byteOrder.PutUint64(scratch[:], uint64(p.Fee))
- if _, err := util.Write(w, scratch[:]); err != nil {
- return err
- }
-
- // First write out the length of the bytes to prefix the value.
- pathLen := uint32(len(p.Path))
- byteOrder.PutUint32(scratch[:4], pathLen)
- if _, err := util.Write(w, scratch[:4]); err != nil {
- return err
- }
-
- // Then with the path written, we write out the series of public keys
- // involved in the path.
- for _, hop := range p.Path {
- if _, err := util.Write(w, hop[:]); err != nil {
- return err
- }
- }
-
- byteOrder.PutUint32(scratch[:4], p.TimeLockLength)
- if _, err := util.Write(w, scratch[:4]); err != nil {
- return err
- }
-
- if _, err := util.Write(w, p.PaymentPreimage[:]); err != nil {
- return err
- }
-
- return nil
-}
-
-func deserializeOutgoingPayment(r io.Reader) (*outgoingPayment, er.R) {
- var scratch [8]byte
-
- p := &outgoingPayment{}
-
- inv, err := deserializeInvoiceLegacy(r)
- if err != nil {
- return nil, err
- }
- p.Invoice = inv
-
- if _, err := r.Read(scratch[:]); err != nil {
- return nil, er.E(err)
- }
- p.Fee = lnwire.MilliSatoshi(byteOrder.Uint64(scratch[:]))
-
- if _, err := r.Read(scratch[:4]); err != nil {
- return nil, er.E(err)
- }
- pathLen := byteOrder.Uint32(scratch[:4])
-
- path := make([][33]byte, pathLen)
- for i := uint32(0); i < pathLen; i++ {
- if _, err := r.Read(path[i][:]); err != nil {
- return nil, er.E(err)
- }
- }
- p.Path = path
-
- if _, err := r.Read(scratch[:4]); err != nil {
- return nil, er.E(err)
- }
- p.TimeLockLength = byteOrder.Uint32(scratch[:4])
-
- if _, err := r.Read(p.PaymentPreimage[:]); err != nil {
- return nil, er.E(err)
- }
-
- return p, nil
-}
-
-// serializePaymentAttemptInfoMigration9 is the serializePaymentAttemptInfo
-// version as existed when migration #9 was created. We keep this around, along
-// with the methods below to ensure that clients that upgrade will use the
-// correct version of this method.
-func serializePaymentAttemptInfoMigration9(w io.Writer, a *PaymentAttemptInfo) er.R {
- if err := WriteElements(w, a.PaymentID, a.SessionKey); err != nil {
- return err
- }
-
- if err := serializeRouteMigration9(w, a.Route); err != nil {
- return err
- }
-
- return nil
-}
-
-func serializeHopMigration9(w io.Writer, h *Hop) er.R {
- if err := WriteElements(w,
- h.PubKeyBytes[:], h.ChannelID, h.OutgoingTimeLock,
- h.AmtToForward,
- ); err != nil {
- return err
- }
-
- return nil
-}
-
-func serializeRouteMigration9(w io.Writer, r Route) er.R {
- if err := WriteElements(w,
- r.TotalTimeLock, r.TotalAmount, r.SourcePubKey[:],
- ); err != nil {
- return err
- }
-
- if err := WriteElements(w, uint32(len(r.Hops))); err != nil {
- return err
- }
-
- for _, h := range r.Hops {
- if err := serializeHopMigration9(w, h); err != nil {
- return err
- }
- }
-
- return nil
-}
-
-func deserializePaymentAttemptInfoMigration9(r io.Reader) (*PaymentAttemptInfo, er.R) {
- a := &PaymentAttemptInfo{}
- err := ReadElements(r, &a.PaymentID, &a.SessionKey)
- if err != nil {
- return nil, err
- }
- a.Route, err = deserializeRouteMigration9(r)
- if err != nil {
- return nil, err
- }
- return a, nil
-}
-
-func deserializeRouteMigration9(r io.Reader) (Route, er.R) {
- rt := Route{}
- if err := ReadElements(r,
- &rt.TotalTimeLock, &rt.TotalAmount,
- ); err != nil {
- return rt, err
- }
-
- var pub []byte
- if err := ReadElements(r, &pub); err != nil {
- return rt, err
- }
- copy(rt.SourcePubKey[:], pub)
-
- var numHops uint32
- if err := ReadElements(r, &numHops); err != nil {
- return rt, err
- }
-
- var hops []*Hop
- for i := uint32(0); i < numHops; i++ {
- hop, err := deserializeHopMigration9(r)
- if err != nil {
- return rt, err
- }
- hops = append(hops, hop)
- }
- rt.Hops = hops
-
- return rt, nil
-}
-
-func deserializeHopMigration9(r io.Reader) (*Hop, er.R) {
- h := &Hop{}
-
- var pub []byte
- if err := ReadElements(r, &pub); err != nil {
- return nil, err
- }
- copy(h.PubKeyBytes[:], pub)
-
- if err := ReadElements(r,
- &h.ChannelID, &h.OutgoingTimeLock, &h.AmtToForward,
- ); err != nil {
- return nil, err
- }
-
- return h, nil
-}
-
-// fetchPaymentsMigration9 returns all sent payments found in the DB using the
-// payment attempt info format that was present as of migration #9. We need
-// this as otherwise, the current FetchPayments version will use the latest
-// decoding format. Note that we only need this for the
-// TestOutgoingPaymentsMigration migration test case.
-func (db *DB) fetchPaymentsMigration9() ([]*Payment, er.R) {
- var payments []*Payment
-
- err := kvdb.View(db, func(tx kvdb.RTx) er.R {
- paymentsBucket := tx.ReadBucket(paymentsRootBucket)
- if paymentsBucket == nil {
- return nil
- }
-
- return paymentsBucket.ForEach(func(k, v []byte) er.R {
- bucket := paymentsBucket.NestedReadBucket(k)
- if bucket == nil {
- // We only expect sub-buckets to be found in
- // this top-level bucket.
- return er.Errorf("non bucket element in " +
- "payments bucket")
- }
-
- p, err := fetchPaymentMigration9(bucket)
- if err != nil {
- return err
- }
-
- payments = append(payments, p)
-
- // For older versions of lnd, duplicate payments to a
- // payment has was possible. These will be found in a
- // sub-bucket indexed by their sequence number if
- // available.
- dup := bucket.NestedReadBucket(paymentDuplicateBucket)
- if dup == nil {
- return nil
- }
-
- return dup.ForEach(func(k, v []byte) er.R {
- subBucket := dup.NestedReadBucket(k)
- if subBucket == nil {
- // We one bucket for each duplicate to
- // be found.
- return er.Errorf("non bucket element" +
- "in duplicate bucket")
- }
-
- p, err := fetchPaymentMigration9(subBucket)
- if err != nil {
- return err
- }
-
- payments = append(payments, p)
- return nil
- })
- })
- }, func() {
- payments = nil
- })
- if err != nil {
- return nil, err
- }
-
- // Before returning, sort the payments by their sequence number.
- sort.Slice(payments, func(i, j int) bool {
- return payments[i].sequenceNum < payments[j].sequenceNum
- })
-
- return payments, nil
-}
-
-func fetchPaymentMigration9(bucket kvdb.RBucket) (*Payment, er.R) {
- var (
- err er.R
- p = &Payment{}
- )
-
- seqBytes := bucket.Get(paymentSequenceKey)
- if seqBytes == nil {
- return nil, er.Errorf("sequence number not found")
- }
-
- p.sequenceNum = binary.BigEndian.Uint64(seqBytes)
-
- // Get the payment status.
- p.Status = fetchPaymentStatus(bucket)
-
- // Get the PaymentCreationInfo.
- b := bucket.Get(paymentCreationInfoKey)
- if b == nil {
- return nil, er.Errorf("creation info not found")
- }
-
- r := bytes.NewReader(b)
- p.Info, err = deserializePaymentCreationInfo(r)
- if err != nil {
- return nil, err
-
- }
-
- // Get the PaymentAttemptInfo. This can be unset.
- b = bucket.Get(paymentAttemptInfoKey)
- if b != nil {
- r = bytes.NewReader(b)
- p.Attempt, err = deserializePaymentAttemptInfoMigration9(r)
- if err != nil {
- return nil, err
- }
- }
-
- // Get the payment preimage. This is only found for
- // completed payments.
- b = bucket.Get(paymentSettleInfoKey)
- if b != nil {
- var preimg lntypes.Preimage
- copy(preimg[:], b[:])
- p.PaymentPreimage = &preimg
- }
-
- // Get failure reason if available.
- b = bucket.Get(paymentFailInfoKey)
- if b != nil {
- reason := FailureReason(b[0])
- p.Failure = &reason
- }
-
- return p, nil
-}
diff --git a/lnd/channeldb/migration_01_to_11/migration_10_route_tlv_records.go b/lnd/channeldb/migration_01_to_11/migration_10_route_tlv_records.go
deleted file mode 100644
index 3d146878..00000000
--- a/lnd/channeldb/migration_01_to_11/migration_10_route_tlv_records.go
+++ /dev/null
@@ -1,237 +0,0 @@
-package migration_01_to_11
-
-import (
- "bytes"
- "io"
-
- "github.com/pkt-cash/pktd/btcutil/er"
- "github.com/pkt-cash/pktd/lnd/channeldb/kvdb"
- "github.com/pkt-cash/pktd/pktlog/log"
-)
-
-// MigrateRouteSerialization migrates the way we serialize routes across the
-// entire database. At the time of writing of this migration, this includes our
-// payment attempts, as well as the payment results in mission control.
-func MigrateRouteSerialization(tx kvdb.RwTx) er.R {
- // First, we'll do all the payment attempts.
- rootPaymentBucket := tx.ReadWriteBucket(paymentsRootBucket)
- if rootPaymentBucket == nil {
- return nil
- }
-
- // As we can't mutate a bucket while we're iterating over it with
- // ForEach, we'll need to collect all the known payment hashes in
- // memory first.
- var payHashes [][]byte
- err := rootPaymentBucket.ForEach(func(k, v []byte) er.R {
- if v != nil {
- return nil
- }
-
- payHashes = append(payHashes, k)
- return nil
- })
- if err != nil {
- return err
- }
-
- // Now that we have all the payment hashes, we can carry out the
- // migration itself.
- for _, payHash := range payHashes {
- payHashBucket := rootPaymentBucket.NestedReadWriteBucket(payHash)
-
- // First, we'll migrate the main (non duplicate) payment to
- // this hash.
- err := migrateAttemptEncoding(tx, payHashBucket)
- if err != nil {
- return err
- }
-
- // Now that we've migrated the main payment, we'll also check
- // for any duplicate payments to the same payment hash.
- dupBucket := payHashBucket.NestedReadWriteBucket(paymentDuplicateBucket)
-
- // If there's no dup bucket, then we can move on to the next
- // payment.
- if dupBucket == nil {
- continue
- }
-
- // Otherwise, we'll now iterate through all the duplicate pay
- // hashes and migrate those.
- var dupSeqNos [][]byte
- err = dupBucket.ForEach(func(k, v []byte) er.R {
- dupSeqNos = append(dupSeqNos, k)
- return nil
- })
- if err != nil {
- return err
- }
-
- // Now in this second pass, we'll re-serialize their duplicate
- // payment attempts under the new encoding.
- for _, seqNo := range dupSeqNos {
- dupPayHashBucket := dupBucket.NestedReadWriteBucket(seqNo)
- err := migrateAttemptEncoding(tx, dupPayHashBucket)
- if err != nil {
- return err
- }
- }
- }
-
- log.Infof("Migration of route/hop serialization complete!")
-
- log.Infof("Migrating to new mission control store by clearing " +
- "existing data")
-
- resultsKey := []byte("missioncontrol-results")
- err = tx.DeleteTopLevelBucket(resultsKey)
- if err != nil && !kvdb.ErrBucketNotFound.Is(err) {
- return err
- }
-
- log.Infof("Migration to new mission control completed!")
-
- return nil
-}
-
-// migrateAttemptEncoding migrates payment attempts using the legacy format to
-// the new format.
-func migrateAttemptEncoding(tx kvdb.RwTx, payHashBucket kvdb.RwBucket) er.R {
- payAttemptBytes := payHashBucket.Get(paymentAttemptInfoKey)
- if payAttemptBytes == nil {
- return nil
- }
-
- // For our migration, we'll first read out the existing payment attempt
- // using the legacy serialization of the attempt.
- payAttemptReader := bytes.NewReader(payAttemptBytes)
- payAttempt, err := deserializePaymentAttemptInfoLegacy(
- payAttemptReader,
- )
- if err != nil {
- return err
- }
-
- // Now that we have the old attempts, we'll explicitly mark this as
- // needing a legacy payload, since after this migration, the modern
- // payload will be the default if signalled.
- for _, hop := range payAttempt.Route.Hops {
- hop.LegacyPayload = true
- }
-
- // Finally, we'll write out the payment attempt using the new encoding.
- var b bytes.Buffer
- err = serializePaymentAttemptInfo(&b, payAttempt)
- if err != nil {
- return err
- }
-
- return payHashBucket.Put(paymentAttemptInfoKey, b.Bytes())
-}
-
-func deserializePaymentAttemptInfoLegacy(r io.Reader) (*PaymentAttemptInfo, er.R) {
- a := &PaymentAttemptInfo{}
- err := ReadElements(r, &a.PaymentID, &a.SessionKey)
- if err != nil {
- return nil, err
- }
- a.Route, err = deserializeRouteLegacy(r)
- if err != nil {
- return nil, err
- }
- return a, nil
-}
-
-func serializePaymentAttemptInfoLegacy(w io.Writer, a *PaymentAttemptInfo) er.R {
- if err := WriteElements(w, a.PaymentID, a.SessionKey); err != nil {
- return err
- }
-
- if err := serializeRouteLegacy(w, a.Route); err != nil {
- return err
- }
-
- return nil
-}
-
-func deserializeHopLegacy(r io.Reader) (*Hop, er.R) {
- h := &Hop{}
-
- var pub []byte
- if err := ReadElements(r, &pub); err != nil {
- return nil, err
- }
- copy(h.PubKeyBytes[:], pub)
-
- if err := ReadElements(r,
- &h.ChannelID, &h.OutgoingTimeLock, &h.AmtToForward,
- ); err != nil {
- return nil, err
- }
-
- return h, nil
-}
-
-func serializeHopLegacy(w io.Writer, h *Hop) er.R {
- if err := WriteElements(w,
- h.PubKeyBytes[:], h.ChannelID, h.OutgoingTimeLock,
- h.AmtToForward,
- ); err != nil {
- return err
- }
-
- return nil
-}
-
-func deserializeRouteLegacy(r io.Reader) (Route, er.R) {
- rt := Route{}
- if err := ReadElements(r,
- &rt.TotalTimeLock, &rt.TotalAmount,
- ); err != nil {
- return rt, err
- }
-
- var pub []byte
- if err := ReadElements(r, &pub); err != nil {
- return rt, err
- }
- copy(rt.SourcePubKey[:], pub)
-
- var numHops uint32
- if err := ReadElements(r, &numHops); err != nil {
- return rt, err
- }
-
- var hops []*Hop
- for i := uint32(0); i < numHops; i++ {
- hop, err := deserializeHopLegacy(r)
- if err != nil {
- return rt, err
- }
- hops = append(hops, hop)
- }
- rt.Hops = hops
-
- return rt, nil
-}
-
-func serializeRouteLegacy(w io.Writer, r Route) er.R {
- if err := WriteElements(w,
- r.TotalTimeLock, r.TotalAmount, r.SourcePubKey[:],
- ); err != nil {
- return err
- }
-
- if err := WriteElements(w, uint32(len(r.Hops))); err != nil {
- return err
- }
-
- for _, h := range r.Hops {
- if err := serializeHopLegacy(w, h); err != nil {
- return err
- }
- }
-
- return nil
-}
diff --git a/lnd/channeldb/migration_01_to_11/migration_11_invoices.go b/lnd/channeldb/migration_01_to_11/migration_11_invoices.go
deleted file mode 100644
index 8af06bee..00000000
--- a/lnd/channeldb/migration_01_to_11/migration_11_invoices.go
+++ /dev/null
@@ -1,231 +0,0 @@
-package migration_01_to_11
-
-import (
- "bytes"
- "io"
-
- litecoinCfg "github.com/ltcsuite/ltcd/chaincfg"
- "github.com/pkt-cash/pktd/btcutil/er"
- "github.com/pkt-cash/pktd/btcutil/util"
- bitcoinCfg "github.com/pkt-cash/pktd/chaincfg"
- "github.com/pkt-cash/pktd/lnd/channeldb/kvdb"
- "github.com/pkt-cash/pktd/lnd/channeldb/migration_01_to_11/zpay32"
- "github.com/pkt-cash/pktd/lnd/lnwire"
- "github.com/pkt-cash/pktd/pktlog/log"
- "github.com/pkt-cash/pktd/wire"
-)
-
-// MigrateInvoices adds invoice htlcs and a separate cltv delta field to the
-// invoices.
-func MigrateInvoices(tx kvdb.RwTx) er.R {
- log.Infof("Migrating invoices to new invoice format")
-
- invoiceB := tx.ReadWriteBucket(invoiceBucket)
- if invoiceB == nil {
- return nil
- }
-
- // Iterate through the entire key space of the top-level invoice bucket.
- // If key with a non-nil value stores the next invoice ID which maps to
- // the corresponding invoice. Store those keys first, because it isn't
- // safe to modify the bucket inside a ForEach loop.
- var invoiceKeys [][]byte
- err := invoiceB.ForEach(func(k, v []byte) er.R {
- if v == nil {
- return nil
- }
-
- invoiceKeys = append(invoiceKeys, k)
-
- return nil
- })
- if err != nil {
- return err
- }
-
- nets := []*bitcoinCfg.Params{
- &bitcoinCfg.MainNetParams, &bitcoinCfg.SimNetParams,
- &bitcoinCfg.RegressionNetParams, &bitcoinCfg.TestNet3Params,
- }
-
- ltcNets := []*litecoinCfg.Params{
- &litecoinCfg.MainNetParams, &litecoinCfg.SimNetParams,
- &litecoinCfg.RegressionNetParams, &litecoinCfg.TestNet4Params,
- }
- for _, net := range ltcNets {
- var convertedNet bitcoinCfg.Params
- convertedNet.Bech32HRPSegwit = net.Bech32HRPSegwit
- nets = append(nets, &convertedNet)
- }
-
- // Iterate over all stored keys and migrate the invoices.
- for _, k := range invoiceKeys {
- v := invoiceB.Get(k)
-
- // Deserialize the invoice with the deserializing function that
- // was in use for this version of the database.
- invoiceReader := bytes.NewReader(v)
- invoice, err := deserializeInvoiceLegacy(invoiceReader)
- if err != nil {
- return err
- }
-
- if invoice.Terms.State == ContractAccepted {
- return er.Errorf("cannot upgrade with invoice(s) " +
- "in accepted state, see release notes")
- }
-
- // Try to decode the payment request for every possible net to
- // avoid passing a the active network to channeldb. This would
- // be a layering violation, while this migration is only running
- // once and will likely be removed in the future.
- var payReq *zpay32.Invoice
- for _, net := range nets {
- payReq, err = zpay32.Decode(
- string(invoice.PaymentRequest), net,
- )
- if err == nil {
- break
- }
- }
- if payReq == nil {
- return er.Errorf("cannot decode payreq")
- }
- invoice.FinalCltvDelta = int32(payReq.MinFinalCLTVExpiry())
- invoice.Expiry = payReq.Expiry()
-
- // Serialize the invoice in the new format and use it to replace
- // the old invoice in the database.
- var buf bytes.Buffer
- if err := serializeInvoice(&buf, &invoice); err != nil {
- return err
- }
-
- err = invoiceB.Put(k, buf.Bytes())
- if err != nil {
- return err
- }
- }
-
- log.Infof("Migration of invoices completed!")
- return nil
-}
-
-func deserializeInvoiceLegacy(r io.Reader) (Invoice, er.R) {
- var err er.R
- invoice := Invoice{}
-
- // TODO(roasbeef): use read full everywhere
- invoice.Memo, err = wire.ReadVarBytes(r, 0, MaxMemoSize, "")
- if err != nil {
- return invoice, err
- }
- invoice.Receipt, err = wire.ReadVarBytes(r, 0, MaxReceiptSize, "")
- if err != nil {
- return invoice, err
- }
-
- invoice.PaymentRequest, err = wire.ReadVarBytes(r, 0, MaxPaymentRequestSize, "")
- if err != nil {
- return invoice, err
- }
-
- birthBytes, err := wire.ReadVarBytes(r, 0, 300, "birth")
- if err != nil {
- return invoice, err
- }
- if err := invoice.CreationDate.UnmarshalBinary(birthBytes); err != nil {
- return invoice, er.E(err)
- }
-
- settledBytes, err := wire.ReadVarBytes(r, 0, 300, "settled")
- if err != nil {
- return invoice, err
- }
- if err := invoice.SettleDate.UnmarshalBinary(settledBytes); err != nil {
- return invoice, er.E(err)
- }
-
- if _, err := util.ReadFull(r, invoice.Terms.PaymentPreimage[:]); err != nil {
- return invoice, err
- }
- var scratch [8]byte
- if _, err := util.ReadFull(r, scratch[:]); err != nil {
- return invoice, err
- }
- invoice.Terms.Value = lnwire.MilliSatoshi(byteOrder.Uint64(scratch[:]))
-
- if err := util.ReadBin(r, byteOrder, &invoice.Terms.State); err != nil {
- return invoice, err
- }
-
- if err := util.ReadBin(r, byteOrder, &invoice.AddIndex); err != nil {
- return invoice, err
- }
- if err := util.ReadBin(r, byteOrder, &invoice.SettleIndex); err != nil {
- return invoice, err
- }
- if err := util.ReadBin(r, byteOrder, &invoice.AmtPaid); err != nil {
- return invoice, err
- }
-
- return invoice, nil
-}
-
-// serializeInvoiceLegacy serializes an invoice in the format of the previous db
-// version.
-func serializeInvoiceLegacy(w io.Writer, i *Invoice) er.R {
- if err := wire.WriteVarBytes(w, 0, i.Memo[:]); err != nil {
- return err
- }
- if err := wire.WriteVarBytes(w, 0, i.Receipt[:]); err != nil {
- return err
- }
- if err := wire.WriteVarBytes(w, 0, i.PaymentRequest[:]); err != nil {
- return err
- }
-
- birthBytes, errr := i.CreationDate.MarshalBinary()
- if errr != nil {
- return er.E(errr)
- }
-
- if err := wire.WriteVarBytes(w, 0, birthBytes); err != nil {
- return err
- }
-
- settleBytes, errr := i.SettleDate.MarshalBinary()
- if errr != nil {
- return er.E(errr)
- }
-
- if err := wire.WriteVarBytes(w, 0, settleBytes); err != nil {
- return err
- }
-
- if _, err := util.Write(w, i.Terms.PaymentPreimage[:]); err != nil {
- return err
- }
-
- var scratch [8]byte
- byteOrder.PutUint64(scratch[:], uint64(i.Terms.Value))
- if _, err := util.Write(w, scratch[:]); err != nil {
- return err
- }
-
- if err := util.WriteBin(w, byteOrder, i.Terms.State); err != nil {
- return err
- }
-
- if err := util.WriteBin(w, byteOrder, i.AddIndex); err != nil {
- return err
- }
- if err := util.WriteBin(w, byteOrder, i.SettleIndex); err != nil {
- return err
- }
- if err := util.WriteBin(w, byteOrder, int64(i.AmtPaid)); err != nil {
- return err
- }
-
- return nil
-}
diff --git a/lnd/channeldb/migration_01_to_11/migration_11_invoices_test.go b/lnd/channeldb/migration_01_to_11/migration_11_invoices_test.go
deleted file mode 100644
index ff7264d5..00000000
--- a/lnd/channeldb/migration_01_to_11/migration_11_invoices_test.go
+++ /dev/null
@@ -1,184 +0,0 @@
-package migration_01_to_11
-
-import (
- "bytes"
- "testing"
- "time"
-
- litecoinCfg "github.com/ltcsuite/ltcd/chaincfg"
- "github.com/pkt-cash/pktd/btcec"
- "github.com/pkt-cash/pktd/btcutil/er"
- bitcoinCfg "github.com/pkt-cash/pktd/chaincfg"
- "github.com/pkt-cash/pktd/lnd/channeldb/kvdb"
- "github.com/pkt-cash/pktd/lnd/zpay32"
-)
-
-var (
- testPrivKeyBytes = []byte{
- 0x2b, 0xd8, 0x06, 0xc9, 0x7f, 0x0e, 0x00, 0xaf,
- 0x1a, 0x1f, 0xc3, 0x32, 0x8f, 0xa7, 0x63, 0xa9,
- 0x26, 0x97, 0x23, 0xc8, 0xdb, 0x8f, 0xac, 0x4f,
- 0x93, 0xaf, 0x71, 0xdb, 0x18, 0x6d, 0x6e, 0x90,
- }
-
- testCltvDelta = int32(50)
-)
-
-// beforeMigrationFuncV11 insert the test invoices in the database.
-func beforeMigrationFuncV11(t *testing.T, d *DB, invoices []Invoice) {
- err := kvdb.Update(d, func(tx kvdb.RwTx) er.R {
- invoicesBucket, err := tx.CreateTopLevelBucket(
- invoiceBucket,
- )
- if err != nil {
- return err
- }
-
- invoiceNum := uint32(1)
- for _, invoice := range invoices {
- var invoiceKey [4]byte
- byteOrder.PutUint32(invoiceKey[:], invoiceNum)
- invoiceNum++
-
- var buf bytes.Buffer
- err := serializeInvoiceLegacy(&buf, &invoice) // nolint:scopelint
- if err != nil {
- return err
- }
-
- err = invoicesBucket.Put(
- invoiceKey[:], buf.Bytes(),
- )
- if err != nil {
- return err
- }
- }
-
- return nil
- }, func() {})
- if err != nil {
- t.Fatal(err)
- }
-}
-
-// TestMigrateInvoices checks that invoices are migrated correctly.
-func TestMigrateInvoices(t *testing.T) {
- t.Parallel()
-
- payReqBtc, err := getPayReq(&bitcoinCfg.MainNetParams)
- if err != nil {
- t.Fatal(err)
- }
-
- var ltcNetParams bitcoinCfg.Params
- ltcNetParams.Bech32HRPSegwit = litecoinCfg.MainNetParams.Bech32HRPSegwit
- payReqLtc, err := getPayReq(<cNetParams)
- if err != nil {
- t.Fatal(err)
- }
-
- invoices := []Invoice{
- {
- PaymentRequest: []byte(payReqBtc),
- },
- {
- PaymentRequest: []byte(payReqLtc),
- },
- }
-
- // Verify that all invoices were migrated.
- afterMigrationFunc := func(d *DB) {
- dbInvoices, err := d.FetchAllInvoices(false)
- if err != nil {
- t.Fatalf("unable to fetch invoices: %v", err)
- }
-
- if len(invoices) != len(dbInvoices) {
- t.Fatalf("expected %d invoices, got %d", len(invoices),
- len(dbInvoices))
- }
-
- for _, dbInvoice := range dbInvoices {
- if dbInvoice.FinalCltvDelta != testCltvDelta {
- t.Fatal("incorrect final cltv delta")
- }
- if dbInvoice.Expiry != 3600*time.Second {
- t.Fatal("incorrect expiry")
- }
- if len(dbInvoice.Htlcs) != 0 {
- t.Fatal("expected no htlcs after migration")
- }
- }
- }
-
- applyMigration(t,
- func(d *DB) { beforeMigrationFuncV11(t, d, invoices) },
- afterMigrationFunc,
- MigrateInvoices,
- false)
-}
-
-// TestMigrateInvoicesHodl checks that a hodl invoice in the accepted state
-// fails the migration.
-func TestMigrateInvoicesHodl(t *testing.T) {
- t.Parallel()
-
- payReqBtc, err := getPayReq(&bitcoinCfg.MainNetParams)
- if err != nil {
- t.Fatal(err)
- }
-
- invoices := []Invoice{
- {
- PaymentRequest: []byte(payReqBtc),
- Terms: ContractTerm{
- State: ContractAccepted,
- },
- },
- }
-
- applyMigration(t,
- func(d *DB) { beforeMigrationFuncV11(t, d, invoices) },
- func(d *DB) {},
- MigrateInvoices,
- true)
-}
-
-// signDigestCompact generates a test signature to be used in the generation of
-// test payment requests.
-func signDigestCompact(hash []byte) ([]byte, er.R) {
- // Should the signature reference a compressed public key or not.
- isCompressedKey := true
-
- privKey, _ := btcec.PrivKeyFromBytes(btcec.S256(), testPrivKeyBytes)
-
- // btcec.SignCompact returns a pubkey-recoverable signature
- sig, err := btcec.SignCompact(
- btcec.S256(), privKey, hash, isCompressedKey,
- )
- if err != nil {
- return nil, er.Errorf("can't sign the hash: %v", err)
- }
-
- return sig, nil
-}
-
-// getPayReq creates a payment request for the given net.
-func getPayReq(net *bitcoinCfg.Params) (string, er.R) {
- options := []func(*zpay32.Invoice){
- zpay32.CLTVExpiry(uint64(testCltvDelta)),
- zpay32.Description("test"),
- }
-
- payReq, err := zpay32.NewInvoice(
- net, [32]byte{}, time.Unix(1, 0), options...,
- )
- if err != nil {
- return "", err
- }
- return payReq.Encode(
- zpay32.MessageSigner{
- SignCompact: signDigestCompact,
- },
- )
-}
diff --git a/lnd/channeldb/migration_01_to_11/migrations.go b/lnd/channeldb/migration_01_to_11/migrations.go
deleted file mode 100644
index ab3c1499..00000000
--- a/lnd/channeldb/migration_01_to_11/migrations.go
+++ /dev/null
@@ -1,939 +0,0 @@
-package migration_01_to_11
-
-import (
- "bytes"
- "crypto/sha256"
- "encoding/binary"
-
- "github.com/pkt-cash/pktd/btcec"
- "github.com/pkt-cash/pktd/btcutil/er"
- "github.com/pkt-cash/pktd/lnd/channeldb/kvdb"
- "github.com/pkt-cash/pktd/lnd/lnwire"
- "github.com/pkt-cash/pktd/pktlog/log"
-)
-
-// MigrateNodeAndEdgeUpdateIndex is a migration function that will update the
-// database from version 0 to version 1. In version 1, we add two new indexes
-// (one for nodes and one for edges) to keep track of the last time a node or
-// edge was updated on the network. These new indexes allow us to implement the
-// new graph sync protocol added.
-func MigrateNodeAndEdgeUpdateIndex(tx kvdb.RwTx) er.R {
- // First, we'll populating the node portion of the new index. Before we
- // can add new values to the index, we'll first create the new bucket
- // where these items will be housed.
- nodes, err := tx.CreateTopLevelBucket(nodeBucket)
- if err != nil {
- return er.Errorf("unable to create node bucket: %v", err)
- }
- nodeUpdateIndex, err := nodes.CreateBucketIfNotExists(
- nodeUpdateIndexBucket,
- )
- if err != nil {
- return er.Errorf("unable to create node update index: %v", err)
- }
-
- log.Infof("Populating new node update index bucket")
-
- // Now that we know the bucket has been created, we'll iterate over the
- // entire node bucket so we can add the (updateTime || nodePub) key
- // into the node update index.
- err = nodes.ForEach(func(nodePub, nodeInfo []byte) er.R {
- if len(nodePub) != 33 {
- return nil
- }
-
- log.Tracef("Adding %x to node update index", nodePub)
-
- // The first 8 bytes of a node's serialize data is the update
- // time, so we can extract that without decoding the entire
- // structure.
- updateTime := nodeInfo[:8]
-
- // Now that we have the update time, we can construct the key
- // to insert into the index.
- var indexKey [8 + 33]byte
- copy(indexKey[:8], updateTime)
- copy(indexKey[8:], nodePub)
-
- return nodeUpdateIndex.Put(indexKey[:], nil)
- })
- if err != nil {
- return er.Errorf("unable to update node indexes: %v", err)
- }
-
- log.Infof("Populating new edge update index bucket")
-
- // With the set of nodes updated, we'll now update all edges to have a
- // corresponding entry in the edge update index.
- edges, err := tx.CreateTopLevelBucket(edgeBucket)
- if err != nil {
- return er.Errorf("unable to create edge bucket: %v", err)
- }
- edgeUpdateIndex, err := edges.CreateBucketIfNotExists(
- edgeUpdateIndexBucket,
- )
- if err != nil {
- return er.Errorf("unable to create edge update index: %v", err)
- }
-
- // We'll now run through each edge policy in the database, and update
- // the index to ensure each edge has the proper record.
- err = edges.ForEach(func(edgeKey, edgePolicyBytes []byte) er.R {
- if len(edgeKey) != 41 {
- return nil
- }
-
- // Now that we know this is the proper record, we'll grab the
- // channel ID (last 8 bytes of the key), and then decode the
- // edge policy so we can access the update time.
- chanID := edgeKey[33:]
- edgePolicyReader := bytes.NewReader(edgePolicyBytes)
-
- edgePolicy, err := deserializeChanEdgePolicy(
- edgePolicyReader, nodes,
- )
- if err != nil {
- return err
- }
-
- log.Tracef("Adding chan_id=%v to edge update index",
- edgePolicy.ChannelID)
-
- // We'll now construct the index key using the channel ID, and
- // the last time it was updated: (updateTime || chanID).
- var indexKey [8 + 8]byte
- byteOrder.PutUint64(
- indexKey[:], uint64(edgePolicy.LastUpdate.Unix()),
- )
- copy(indexKey[8:], chanID)
-
- return edgeUpdateIndex.Put(indexKey[:], nil)
- })
- if err != nil {
- return er.Errorf("unable to update edge indexes: %v", err)
- }
-
- log.Infof("Migration to node and edge update indexes complete!")
-
- return nil
-}
-
-// MigrateInvoiceTimeSeries is a database migration that assigns all existing
-// invoices an index in the add and/or the settle index. Additionally, all
-// existing invoices will have their bytes padded out in order to encode the
-// add+settle index as well as the amount paid.
-func MigrateInvoiceTimeSeries(tx kvdb.RwTx) er.R {
- invoices, err := tx.CreateTopLevelBucket(invoiceBucket)
- if err != nil {
- return err
- }
-
- addIndex, err := invoices.CreateBucketIfNotExists(
- addIndexBucket,
- )
- if err != nil {
- return err
- }
- settleIndex, err := invoices.CreateBucketIfNotExists(
- settleIndexBucket,
- )
- if err != nil {
- return err
- }
-
- log.Infof("Migrating invoice database to new time series format")
-
- // Now that we have all the buckets we need, we'll run through each
- // invoice in the database, and update it to reflect the new format
- // expected post migration.
- // NOTE: we store the converted invoices and put them back into the
- // database after the loop, since modifying the bucket within the
- // ForEach loop is not safe.
- var invoicesKeys [][]byte
- var invoicesValues [][]byte
- err = invoices.ForEach(func(invoiceNum, invoiceBytes []byte) er.R {
- // If this is a sub bucket, then we'll skip it.
- if invoiceBytes == nil {
- return nil
- }
-
- // First, we'll make a copy of the encoded invoice bytes.
- invoiceBytesCopy := make([]byte, len(invoiceBytes))
- copy(invoiceBytesCopy, invoiceBytes)
-
- // With the bytes copied over, we'll append 24 additional
- // bytes. We do this so we can decode the invoice under the new
- // serialization format.
- padding := bytes.Repeat([]byte{0}, 24)
- invoiceBytesCopy = append(invoiceBytesCopy, padding...)
-
- invoiceReader := bytes.NewReader(invoiceBytesCopy)
- invoice, errr := deserializeInvoiceLegacy(invoiceReader)
- if errr != nil {
- return er.Errorf("unable to decode invoice: %v", errr)
- }
-
- // Now that we have the fully decoded invoice, we can update
- // the various indexes that we're added, and finally the
- // invoice itself before re-inserting it.
-
- // First, we'll get the new sequence in the addIndex in order
- // to create the proper mapping.
- nextAddSeqNo, err := addIndex.NextSequence()
- if err != nil {
- return err
- }
- var seqNoBytes [8]byte
- byteOrder.PutUint64(seqNoBytes[:], nextAddSeqNo)
- err = addIndex.Put(seqNoBytes[:], invoiceNum[:])
- if err != nil {
- return err
- }
-
- log.Tracef("Adding invoice (preimage=%x, add_index=%v) to add "+
- "time series", invoice.Terms.PaymentPreimage[:],
- nextAddSeqNo)
-
- // Next, we'll check if the invoice has been settled or not. If
- // so, then we'll also add it to the settle index.
- var nextSettleSeqNo uint64
- if invoice.Terms.State == ContractSettled {
- nextSettleSeqNo, err = settleIndex.NextSequence()
- if err != nil {
- return err
- }
-
- var seqNoBytes [8]byte
- byteOrder.PutUint64(seqNoBytes[:], nextSettleSeqNo)
- err := settleIndex.Put(seqNoBytes[:], invoiceNum)
- if err != nil {
- return err
- }
-
- invoice.AmtPaid = invoice.Terms.Value
-
- log.Tracef("Adding invoice (preimage=%x, "+
- "settle_index=%v) to add time series",
- invoice.Terms.PaymentPreimage[:],
- nextSettleSeqNo)
- }
-
- // Finally, we'll update the invoice itself with the new
- // indexing information as well as the amount paid if it has
- // been settled or not.
- invoice.AddIndex = nextAddSeqNo
- invoice.SettleIndex = nextSettleSeqNo
-
- // We've fully migrated an invoice, so we'll now update the
- // invoice in-place.
- var b bytes.Buffer
- if err := serializeInvoiceLegacy(&b, &invoice); err != nil {
- return err
- }
-
- // Save the key and value pending update for after the ForEach
- // is done.
- invoicesKeys = append(invoicesKeys, invoiceNum)
- invoicesValues = append(invoicesValues, b.Bytes())
- return nil
- })
- if err != nil {
- return err
- }
-
- // Now put the converted invoices into the DB.
- for i := range invoicesKeys {
- key := invoicesKeys[i]
- value := invoicesValues[i]
- if err := invoices.Put(key, value); err != nil {
- return err
- }
- }
-
- log.Infof("Migration to invoice time series index complete!")
-
- return nil
-}
-
-// MigrateInvoiceTimeSeriesOutgoingPayments is a follow up to the
-// migrateInvoiceTimeSeries migration. As at the time of writing, the
-// OutgoingPayment struct embeddeds an instance of the Invoice struct. As a
-// result, we also need to migrate the internal invoice to the new format.
-func MigrateInvoiceTimeSeriesOutgoingPayments(tx kvdb.RwTx) er.R {
- payBucket := tx.ReadWriteBucket(paymentBucket)
- if payBucket == nil {
- return nil
- }
-
- log.Infof("Migrating invoice database to new outgoing payment format")
-
- // We store the keys and values we want to modify since it is not safe
- // to modify them directly within the ForEach loop.
- var paymentKeys [][]byte
- var paymentValues [][]byte
- err := payBucket.ForEach(func(payID, paymentBytes []byte) er.R {
- log.Tracef("Migrating payment %x", payID[:])
-
- // The internal invoices for each payment only contain a
- // populated contract term, and creation date, as a result,
- // most of the bytes will be "empty".
-
- // We'll calculate the end of the invoice index assuming a
- // "minimal" index that's embedded within the greater
- // OutgoingPayment. The breakdown is:
- // 3 bytes empty var bytes, 16 bytes creation date, 16 bytes
- // settled date, 32 bytes payment pre-image, 8 bytes value, 1
- // byte settled.
- endOfInvoiceIndex := 1 + 1 + 1 + 16 + 16 + 32 + 8 + 1
-
- // We'll now extract the prefix of the pure invoice embedded
- // within.
- invoiceBytes := paymentBytes[:endOfInvoiceIndex]
-
- // With the prefix extracted, we'll copy over the invoice, and
- // also add padding for the new 24 bytes of fields, and finally
- // append the remainder of the outgoing payment.
- paymentCopy := make([]byte, len(invoiceBytes))
- copy(paymentCopy[:], invoiceBytes)
-
- padding := bytes.Repeat([]byte{0}, 24)
- paymentCopy = append(paymentCopy, padding...)
- paymentCopy = append(
- paymentCopy, paymentBytes[endOfInvoiceIndex:]...,
- )
-
- // At this point, we now have the new format of the outgoing
- // payments, we'll attempt to deserialize it to ensure the
- // bytes are properly formatted.
- paymentReader := bytes.NewReader(paymentCopy)
- _, err := deserializeOutgoingPayment(paymentReader)
- if err != nil {
- return er.Errorf("unable to deserialize payment: %v", err)
- }
-
- // Now that we know the modifications was successful, we'll
- // store it to our slice of keys and values, and write it back
- // to disk in the new format after the ForEach loop is over.
- paymentKeys = append(paymentKeys, payID)
- paymentValues = append(paymentValues, paymentCopy)
- return nil
- })
- if err != nil {
- return err
- }
-
- // Finally store the updated payments to the bucket.
- for i := range paymentKeys {
- key := paymentKeys[i]
- value := paymentValues[i]
- if err := payBucket.Put(key, value); err != nil {
- return err
- }
- }
-
- log.Infof("Migration to outgoing payment invoices complete!")
-
- return nil
-}
-
-// MigrateEdgePolicies is a migration function that will update the edges
-// bucket. It ensure that edges with unknown policies will also have an entry
-// in the bucket. After the migration, there will be two edge entries for
-// every channel, regardless of whether the policies are known.
-func MigrateEdgePolicies(tx kvdb.RwTx) er.R {
- nodes := tx.ReadWriteBucket(nodeBucket)
- if nodes == nil {
- return nil
- }
-
- edges := tx.ReadWriteBucket(edgeBucket)
- if edges == nil {
- return nil
- }
-
- edgeIndex := edges.NestedReadWriteBucket(edgeIndexBucket)
- if edgeIndex == nil {
- return nil
- }
-
- // checkKey gets the policy from the database with a low-level call
- // so that it is still possible to distinguish between unknown and
- // not present.
- checkKey := func(channelId uint64, keyBytes []byte) er.R {
- var channelID [8]byte
- byteOrder.PutUint64(channelID[:], channelId)
-
- _, err := fetchChanEdgePolicy(edges,
- channelID[:], keyBytes, nodes)
-
- if ErrEdgeNotFound.Is(err) {
- log.Tracef("Adding unknown edge policy present for node %x, channel %v",
- keyBytes, channelId)
-
- err := putChanEdgePolicyUnknown(edges, channelId, keyBytes)
- if err != nil {
- return err
- }
-
- return nil
- }
-
- return err
- }
-
- // Iterate over all channels and check both edge policies.
- err := edgeIndex.ForEach(func(chanID, edgeInfoBytes []byte) er.R {
- infoReader := bytes.NewReader(edgeInfoBytes)
- edgeInfo, err := deserializeChanEdgeInfo(infoReader)
- if err != nil {
- return err
- }
-
- for _, key := range [][]byte{edgeInfo.NodeKey1Bytes[:],
- edgeInfo.NodeKey2Bytes[:]} {
-
- if err := checkKey(edgeInfo.ChannelID, key); err != nil {
- return err
- }
- }
-
- return nil
- })
-
- if err != nil {
- return er.Errorf("unable to update edge policies: %v", err)
- }
-
- log.Infof("Migration of edge policies complete!")
-
- return nil
-}
-
-// PaymentStatusesMigration is a database migration intended for adding payment
-// statuses for each existing payment entity in bucket to be able control
-// transitions of statuses and prevent cases such as double payment
-func PaymentStatusesMigration(tx kvdb.RwTx) er.R {
- // Get the bucket dedicated to storing statuses of payments,
- // where a key is payment hash, value is payment status.
- paymentStatuses, err := tx.CreateTopLevelBucket(paymentStatusBucket)
- if err != nil {
- return err
- }
-
- log.Infof("Migrating database to support payment statuses")
-
- circuitAddKey := []byte("circuit-adds")
- circuits := tx.ReadWriteBucket(circuitAddKey)
- if circuits != nil {
- log.Infof("Marking all known circuits with status InFlight")
-
- err = circuits.ForEach(func(k, v []byte) er.R {
- // Parse the first 8 bytes as the short chan ID for the
- // circuit. We'll skip all short chan IDs are not
- // locally initiated, which includes all non-zero short
- // chan ids.
- chanID := binary.BigEndian.Uint64(k[:8])
- if chanID != 0 {
- return nil
- }
-
- // The payment hash is the third item in the serialized
- // payment circuit. The first two items are an AddRef
- // (10 bytes) and the incoming circuit key (16 bytes).
- const payHashOffset = 10 + 16
-
- paymentHash := v[payHashOffset : payHashOffset+32]
-
- return paymentStatuses.Put(
- paymentHash[:], StatusInFlight.Bytes(),
- )
- })
- if err != nil {
- return err
- }
- }
-
- log.Infof("Marking all existing payments with status Completed")
-
- // Get the bucket dedicated to storing payments
- bucket := tx.ReadWriteBucket(paymentBucket)
- if bucket == nil {
- return nil
- }
-
- // For each payment in the bucket, deserialize the payment and mark it
- // as completed.
- err = bucket.ForEach(func(k, v []byte) er.R {
- // Ignores if it is sub-bucket.
- if v == nil {
- return nil
- }
-
- r := bytes.NewReader(v)
- payment, err := deserializeOutgoingPayment(r)
- if err != nil {
- return err
- }
-
- // Calculate payment hash for current payment.
- paymentHash := sha256.Sum256(payment.PaymentPreimage[:])
-
- // Update status for current payment to completed. If it fails,
- // the migration is aborted and the payment bucket is returned
- // to its previous state.
- return paymentStatuses.Put(paymentHash[:], StatusSucceeded.Bytes())
- })
- if err != nil {
- return err
- }
-
- log.Infof("Migration of payment statuses complete!")
-
- return nil
-}
-
-// MigratePruneEdgeUpdateIndex is a database migration that attempts to resolve
-// some lingering bugs with regards to edge policies and their update index.
-// Stale entries within the edge update index were not being properly pruned due
-// to a miscalculation on the offset of an edge's policy last update. This
-// migration also fixes the case where the public keys within edge policies were
-// being serialized with an extra byte, causing an even greater error when
-// attempting to perform the offset calculation described earlier.
-func MigratePruneEdgeUpdateIndex(tx kvdb.RwTx) er.R {
- // To begin the migration, we'll retrieve the update index bucket. If it
- // does not exist, we have nothing left to do so we can simply exit.
- edges := tx.ReadWriteBucket(edgeBucket)
- if edges == nil {
- return nil
- }
- edgeUpdateIndex := edges.NestedReadWriteBucket(edgeUpdateIndexBucket)
- if edgeUpdateIndex == nil {
- return nil
- }
-
- // Retrieve some buckets that will be needed later on. These should
- // already exist given the assumption that the buckets above do as
- // well.
- edgeIndex, err := edges.CreateBucketIfNotExists(edgeIndexBucket)
- if err != nil {
- return er.Errorf("error creating edge index bucket: %s", err)
- }
- if edgeIndex == nil {
- return er.Errorf("unable to create/fetch edge index " +
- "bucket")
- }
- nodes, err := tx.CreateTopLevelBucket(nodeBucket)
- if err != nil {
- return er.Errorf("unable to make node bucket")
- }
-
- log.Info("Migrating database to properly prune edge update index")
-
- // We'll need to properly prune all the outdated entries within the edge
- // update index. To do so, we'll gather all of the existing policies
- // within the graph to re-populate them later on.
- var edgeKeys [][]byte
- err = edges.ForEach(func(edgeKey, edgePolicyBytes []byte) er.R {
- // All valid entries are indexed by a public key (33 bytes)
- // followed by a channel ID (8 bytes), so we'll skip any entries
- // with keys that do not match this.
- if len(edgeKey) != 33+8 {
- return nil
- }
-
- edgeKeys = append(edgeKeys, edgeKey)
-
- return nil
- })
- if err != nil {
- return er.Errorf("unable to gather existing edge policies: %v",
- err)
- }
-
- log.Info("Constructing set of edge update entries to purge.")
-
- // Build the set of keys that we will remove from the edge update index.
- // This will include all keys contained within the bucket.
- var updateKeysToRemove [][]byte
- err = edgeUpdateIndex.ForEach(func(updKey, _ []byte) er.R {
- updateKeysToRemove = append(updateKeysToRemove, updKey)
- return nil
- })
- if err != nil {
- return er.Errorf("unable to gather existing edge updates: %v",
- err)
- }
-
- log.Infof("Removing %d entries from edge update index.",
- len(updateKeysToRemove))
-
- // With the set of keys contained in the edge update index constructed,
- // we'll proceed in purging all of them from the index.
- for _, updKey := range updateKeysToRemove {
- if err := edgeUpdateIndex.Delete(updKey); err != nil {
- return err
- }
- }
-
- log.Infof("Repopulating edge update index with %d valid entries.",
- len(edgeKeys))
-
- // For each edge key, we'll retrieve the policy, deserialize it, and
- // re-add it to the different buckets. By doing so, we'll ensure that
- // all existing edge policies are serialized correctly within their
- // respective buckets and that the correct entries are populated within
- // the edge update index.
- for _, edgeKey := range edgeKeys {
- edgePolicyBytes := edges.Get(edgeKey)
-
- // Skip any entries with unknown policies as there will not be
- // any entries for them in the edge update index.
- if bytes.Equal(edgePolicyBytes[:], unknownPolicy) {
- continue
- }
-
- edgePolicy, err := deserializeChanEdgePolicy(
- bytes.NewReader(edgePolicyBytes), nodes,
- )
- if err != nil {
- return err
- }
-
- _, err = updateEdgePolicy(tx, edgePolicy)
- if err != nil {
- return err
- }
- }
-
- log.Info("Migration to properly prune edge update index complete!")
-
- return nil
-}
-
-// MigrateOptionalChannelCloseSummaryFields migrates the serialized format of
-// ChannelCloseSummary to a format where optional fields' presence is indicated
-// with boolean markers.
-func MigrateOptionalChannelCloseSummaryFields(tx kvdb.RwTx) er.R {
- closedChanBucket := tx.ReadWriteBucket(closedChannelBucket)
- if closedChanBucket == nil {
- return nil
- }
-
- log.Info("Migrating to new closed channel format...")
-
- // We store the converted keys and values and put them back into the
- // database after the loop, since modifying the bucket within the
- // ForEach loop is not safe.
- var closedChansKeys [][]byte
- var closedChansValues [][]byte
- err := closedChanBucket.ForEach(func(chanID, summary []byte) er.R {
- r := bytes.NewReader(summary)
-
- // Read the old (v6) format from the database.
- c, err := deserializeCloseChannelSummaryV6(r)
- if err != nil {
- return err
- }
-
- // Serialize using the new format, and put back into the
- // bucket.
- var b bytes.Buffer
- if err := serializeChannelCloseSummary(&b, c); err != nil {
- return err
- }
-
- // Now that we know the modifications was successful, we'll
- // Store the key and value to our slices, and write it back to
- // disk in the new format after the ForEach loop is over.
- closedChansKeys = append(closedChansKeys, chanID)
- closedChansValues = append(closedChansValues, b.Bytes())
- return nil
- })
- if err != nil {
- return er.Errorf("unable to update closed channels: %v", err)
- }
-
- // Now put the new format back into the DB.
- for i := range closedChansKeys {
- key := closedChansKeys[i]
- value := closedChansValues[i]
- if err := closedChanBucket.Put(key, value); err != nil {
- return err
- }
- }
-
- log.Info("Migration to new closed channel format complete!")
-
- return nil
-}
-
-var messageStoreBucket = []byte("message-store")
-
-// MigrateGossipMessageStoreKeys migrates the key format for gossip messages
-// found in the message store to a new one that takes into consideration the of
-// the message being stored.
-func MigrateGossipMessageStoreKeys(tx kvdb.RwTx) er.R {
- // We'll start by retrieving the bucket in which these messages are
- // stored within. If there isn't one, there's nothing left for us to do
- // so we can avoid the migration.
- messageStore := tx.ReadWriteBucket(messageStoreBucket)
- if messageStore == nil {
- return nil
- }
-
- log.Info("Migrating to the gossip message store new key format")
-
- // Otherwise we'll proceed with the migration. We'll start by coalescing
- // all the current messages within the store, which are indexed by the
- // public key of the peer which they should be sent to, followed by the
- // short channel ID of the channel for which the message belongs to. We
- // should only expect to find channel announcement signatures as that
- // was the only support message type previously.
- msgs := make(map[[33 + 8]byte]*lnwire.AnnounceSignatures)
- err := messageStore.ForEach(func(k, v []byte) er.R {
- var msgKey [33 + 8]byte
- copy(msgKey[:], k)
-
- msg := &lnwire.AnnounceSignatures{}
- if err := msg.Decode(bytes.NewReader(v), 0); err != nil {
- return err
- }
-
- msgs[msgKey] = msg
-
- return nil
-
- })
- if err != nil {
- return err
- }
-
- // Then, we'll go over all of our messages, remove their previous entry,
- // and add another with the new key format. Once we've done this for
- // every message, we can consider the migration complete.
- for oldMsgKey, msg := range msgs {
- if err := messageStore.Delete(oldMsgKey[:]); err != nil {
- return err
- }
-
- // Construct the new key for which we'll find this message with
- // in the store. It'll be the same as the old, but we'll also
- // include the message type.
- var msgType [2]byte
- binary.BigEndian.PutUint16(msgType[:], uint16(msg.MsgType()))
- newMsgKey := append(oldMsgKey[:], msgType[:]...)
-
- // Serialize the message with its wire encoding.
- var b bytes.Buffer
- if _, err := lnwire.WriteMessage(&b, msg, 0); err != nil {
- return err
- }
-
- if err := messageStore.Put(newMsgKey, b.Bytes()); err != nil {
- return err
- }
- }
-
- log.Info("Migration to the gossip message store new key format complete!")
-
- return nil
-}
-
-// MigrateOutgoingPayments moves the OutgoingPayments into a new bucket format
-// where they all reside in a top-level bucket indexed by the payment hash. In
-// this sub-bucket we store information relevant to this payment, such as the
-// payment status.
-//
-// Since the router cannot handle resumed payments that have the status
-// InFlight (we have no PaymentAttemptInfo available for pre-migration
-// payments) we delete those statuses, so only Completed payments remain in the
-// new bucket structure.
-func MigrateOutgoingPayments(tx kvdb.RwTx) er.R {
- log.Infof("Migrating outgoing payments to new bucket structure")
-
- oldPayments := tx.ReadWriteBucket(paymentBucket)
-
- // Return early if there are no payments to migrate.
- if oldPayments == nil {
- log.Infof("No outgoing payments found, nothing to migrate.")
- return nil
- }
-
- newPayments, err := tx.CreateTopLevelBucket(paymentsRootBucket)
- if err != nil {
- return err
- }
-
- // Helper method to get the source pubkey. We define it such that we
- // only attempt to fetch it if needed.
- sourcePub := func() ([33]byte, er.R) {
- var pub [33]byte
- nodes := tx.ReadWriteBucket(nodeBucket)
- if nodes == nil {
- return pub, ErrGraphNotFound.Default()
- }
-
- selfPub := nodes.Get(sourceKey)
- if selfPub == nil {
- return pub, ErrSourceNodeNotSet.Default()
- }
- copy(pub[:], selfPub[:])
- return pub, nil
- }
-
- err = oldPayments.ForEach(func(k, v []byte) er.R {
- // Ignores if it is sub-bucket.
- if v == nil {
- return nil
- }
-
- // Read the old payment format.
- r := bytes.NewReader(v)
- payment, err := deserializeOutgoingPayment(r)
- if err != nil {
- return err
- }
-
- // Calculate payment hash from the payment preimage.
- paymentHash := sha256.Sum256(payment.PaymentPreimage[:])
-
- // Now create and add a PaymentCreationInfo to the bucket.
- c := &PaymentCreationInfo{
- PaymentHash: paymentHash,
- Value: payment.Terms.Value,
- CreationDate: payment.CreationDate,
- PaymentRequest: payment.PaymentRequest,
- }
-
- var infoBuf bytes.Buffer
- if err := serializePaymentCreationInfo(&infoBuf, c); err != nil {
- return err
- }
-
- sourcePubKey, err := sourcePub()
- if err != nil {
- return err
- }
-
- // Do the same for the PaymentAttemptInfo.
- totalAmt := payment.Terms.Value + payment.Fee
- rt := Route{
- TotalTimeLock: payment.TimeLockLength,
- TotalAmount: totalAmt,
- SourcePubKey: sourcePubKey,
- Hops: []*Hop{},
- }
- for _, hop := range payment.Path {
- rt.Hops = append(rt.Hops, &Hop{
- PubKeyBytes: hop,
- AmtToForward: totalAmt,
- })
- }
-
- // Since the old format didn't store the fee for individual
- // hops, we let the last hop eat the whole fee for the total to
- // add up.
- if len(rt.Hops) > 0 {
- rt.Hops[len(rt.Hops)-1].AmtToForward = payment.Terms.Value
- }
-
- // Since we don't have the session key for old payments, we
- // create a random one to be able to serialize the attempt
- // info.
- priv, _ := btcec.NewPrivateKey(btcec.S256())
- s := &PaymentAttemptInfo{
- PaymentID: 0, // unknown.
- SessionKey: priv, // unknown.
- Route: rt,
- }
-
- var attemptBuf bytes.Buffer
- if err := serializePaymentAttemptInfoMigration9(&attemptBuf, s); err != nil {
- return err
- }
-
- // Reuse the existing payment sequence number.
- var seqNum [8]byte
- copy(seqNum[:], k)
-
- // Create a bucket indexed by the payment hash.
- bucket, err := newPayments.CreateBucket(paymentHash[:])
-
- // If the bucket already exists, it means that we are migrating
- // from a database containing duplicate payments to a payment
- // hash. To keep this information, we store such duplicate
- // payments in a sub-bucket.
- if kvdb.ErrBucketExists.Is(err) {
- pHashBucket := newPayments.NestedReadWriteBucket(paymentHash[:])
-
- // Create a bucket for duplicate payments within this
- // payment hash's bucket.
- dup, err := pHashBucket.CreateBucketIfNotExists(
- paymentDuplicateBucket,
- )
- if err != nil {
- return err
- }
-
- // Each duplicate will get its own sub-bucket within
- // this bucket, so use their sequence number to index
- // them by.
- bucket, err = dup.CreateBucket(seqNum[:])
- if err != nil {
- return err
- }
-
- } else if err != nil {
- return err
- }
-
- // Store the payment's information to the bucket.
- err = bucket.Put(paymentSequenceKey, seqNum[:])
- if err != nil {
- return err
- }
-
- err = bucket.Put(paymentCreationInfoKey, infoBuf.Bytes())
- if err != nil {
- return err
- }
-
- err = bucket.Put(paymentAttemptInfoKey, attemptBuf.Bytes())
- if err != nil {
- return err
- }
-
- err = bucket.Put(paymentSettleInfoKey, payment.PaymentPreimage[:])
- if err != nil {
- return err
- }
-
- return nil
- })
- if err != nil {
- return err
- }
-
- // To continue producing unique sequence numbers, we set the sequence
- // of the new bucket to that of the old one.
- seq := oldPayments.Sequence()
- if err := newPayments.SetSequence(seq); err != nil {
- return err
- }
-
- // Now we delete the old buckets. Deleting the payment status buckets
- // deletes all payment statuses other than Complete.
- err = tx.DeleteTopLevelBucket(paymentStatusBucket)
- if err != nil && !kvdb.ErrBucketNotFound.Is(err) {
- return err
- }
-
- // Finally delete the old payment bucket.
- err = tx.DeleteTopLevelBucket(paymentBucket)
- if err != nil && !kvdb.ErrBucketNotFound.Is(err) {
- return err
- }
-
- log.Infof("Migration of outgoing payment bucket structure completed!")
- return nil
-}
diff --git a/lnd/channeldb/migration_01_to_11/migrations_test.go b/lnd/channeldb/migration_01_to_11/migrations_test.go
deleted file mode 100644
index 0d2edbe2..00000000
--- a/lnd/channeldb/migration_01_to_11/migrations_test.go
+++ /dev/null
@@ -1,932 +0,0 @@
-package migration_01_to_11
-
-import (
- "bytes"
- "crypto/sha256"
- "encoding/binary"
- "math/rand"
- "reflect"
- "testing"
- "time"
-
- "github.com/davecgh/go-spew/spew"
- "github.com/pkt-cash/pktd/btcutil"
- "github.com/pkt-cash/pktd/btcutil/er"
- "github.com/pkt-cash/pktd/lnd/channeldb/kvdb"
- "github.com/pkt-cash/pktd/lnd/lntypes"
- "github.com/pkt-cash/pktd/lnd/lnwire"
-)
-
-// TestPaymentStatusesMigration checks that already completed payments will have
-// their payment statuses set to Completed after the migration.
-func TestPaymentStatusesMigration(t *testing.T) {
- t.Parallel()
-
- fakePayment := makeFakePayment()
- paymentHash := sha256.Sum256(fakePayment.PaymentPreimage[:])
-
- // Add fake payment to test database, verifying that it was created,
- // that we have only one payment, and its status is not "Completed".
- beforeMigrationFunc := func(d *DB) {
- if err := d.addPayment(fakePayment); err != nil {
- t.Fatalf("unable to add payment: %v", err)
- }
-
- payments, err := d.fetchAllPayments()
- if err != nil {
- t.Fatalf("unable to fetch payments: %v", err)
- }
-
- if len(payments) != 1 {
- t.Fatalf("wrong qty of paymets: expected 1, got %v",
- len(payments))
- }
-
- paymentStatus, err := d.fetchPaymentStatus(paymentHash)
- if err != nil {
- t.Fatalf("unable to fetch payment status: %v", err)
- }
-
- // We should receive default status if we have any in database.
- if paymentStatus != StatusUnknown {
- t.Fatalf("wrong payment status: expected %v, got %v",
- StatusUnknown.String(), paymentStatus.String())
- }
-
- // Lastly, we'll add a locally-sourced circuit and
- // non-locally-sourced circuit to the circuit map. The
- // locally-sourced payment should end up with an InFlight
- // status, while the other should remain unchanged, which
- // defaults to Grounded.
- err = kvdb.Update(d, func(tx kvdb.RwTx) er.R {
- circuits, err := tx.CreateTopLevelBucket(
- []byte("circuit-adds"),
- )
- if err != nil {
- return err
- }
-
- groundedKey := make([]byte, 16)
- binary.BigEndian.PutUint64(groundedKey[:8], 1)
- binary.BigEndian.PutUint64(groundedKey[8:], 1)
-
- // Generated using TestHalfCircuitSerialization with nil
- // ErrorEncrypter, which is the case for locally-sourced
- // payments. No payment status should end up being set
- // for this circuit, since the short channel id of the
- // key is non-zero (e.g., a forwarded circuit). This
- // will default it to Grounded.
- groundedCircuit := []byte{
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x01,
- // start payment hash
- 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- // end payment hash
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x0f,
- 0x42, 0x40, 0x00,
- }
-
- err = circuits.Put(groundedKey, groundedCircuit)
- if err != nil {
- return err
- }
-
- inFlightKey := make([]byte, 16)
- binary.BigEndian.PutUint64(inFlightKey[:8], 0)
- binary.BigEndian.PutUint64(inFlightKey[8:], 1)
-
- // Generated using TestHalfCircuitSerialization with nil
- // ErrorEncrypter, which is not the case for forwarded
- // payments, but should have no impact on the
- // correctness of the test. The payment status for this
- // circuit should be set to InFlight, since the short
- // channel id in the key is 0 (sourceHop).
- inFlightCircuit := []byte{
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x01,
- // start payment hash
- 0x04, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- // end payment hash
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x0f,
- 0x42, 0x40, 0x00,
- }
-
- return circuits.Put(inFlightKey, inFlightCircuit)
- }, func() {})
- if err != nil {
- t.Fatalf("unable to add circuit map entry: %v", err)
- }
- }
-
- // Verify that the created payment status is "Completed" for our one
- // fake payment.
- afterMigrationFunc := func(d *DB) {
- // Check that our completed payments were migrated.
- paymentStatus, err := d.fetchPaymentStatus(paymentHash)
- if err != nil {
- t.Fatalf("unable to fetch payment status: %v", err)
- }
-
- if paymentStatus != StatusSucceeded {
- t.Fatalf("wrong payment status: expected %v, got %v",
- StatusSucceeded.String(), paymentStatus.String())
- }
-
- inFlightHash := [32]byte{
- 0x04, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- }
-
- // Check that the locally sourced payment was transitioned to
- // InFlight.
- paymentStatus, err = d.fetchPaymentStatus(inFlightHash)
- if err != nil {
- t.Fatalf("unable to fetch payment status: %v", err)
- }
-
- if paymentStatus != StatusInFlight {
- t.Fatalf("wrong payment status: expected %v, got %v",
- StatusInFlight.String(), paymentStatus.String())
- }
-
- groundedHash := [32]byte{
- 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- }
-
- // Check that non-locally sourced payments remain in the default
- // Grounded state.
- paymentStatus, err = d.fetchPaymentStatus(groundedHash)
- if err != nil {
- t.Fatalf("unable to fetch payment status: %v", err)
- }
-
- if paymentStatus != StatusUnknown {
- t.Fatalf("wrong payment status: expected %v, got %v",
- StatusUnknown.String(), paymentStatus.String())
- }
- }
-
- applyMigration(t,
- beforeMigrationFunc,
- afterMigrationFunc,
- PaymentStatusesMigration,
- false)
-}
-
-// TestMigrateOptionalChannelCloseSummaryFields properly converts a
-// ChannelCloseSummary to the v7 format, where optional fields have their
-// presence indicated with boolean markers.
-func TestMigrateOptionalChannelCloseSummaryFields(t *testing.T) {
- t.Parallel()
-
- chanState, err := createTestChannelState(nil)
- if err != nil {
- t.Fatalf("unable to create channel state: %v", err)
- }
-
- var chanPointBuf bytes.Buffer
- err = writeOutpoint(&chanPointBuf, &chanState.FundingOutpoint)
- if err != nil {
- t.Fatalf("unable to write outpoint: %v", err)
- }
-
- chanID := chanPointBuf.Bytes()
-
- testCases := []struct {
- closeSummary *ChannelCloseSummary
- oldSerialization func(c *ChannelCloseSummary) []byte
- }{
- {
- // A close summary where none of the new fields are
- // set.
- closeSummary: &ChannelCloseSummary{
- ChanPoint: chanState.FundingOutpoint,
- ShortChanID: chanState.ShortChanID(),
- ChainHash: chanState.ChainHash,
- ClosingTXID: testTx.TxHash(),
- CloseHeight: 100,
- RemotePub: chanState.IdentityPub,
- Capacity: chanState.Capacity,
- SettledBalance: btcutil.Amount(50000),
- CloseType: RemoteForceClose,
- IsPending: true,
-
- // The last fields will be unset.
- RemoteCurrentRevocation: nil,
- LocalChanConfig: ChannelConfig{},
- RemoteNextRevocation: nil,
- },
-
- // In the old format the last field written is the
- // IsPendingField. It should be converted by adding an
- // extra boolean marker at the end to indicate that the
- // remaining fields are not there.
- oldSerialization: func(cs *ChannelCloseSummary) []byte {
- var buf bytes.Buffer
- err := WriteElements(&buf, cs.ChanPoint,
- cs.ShortChanID, cs.ChainHash,
- cs.ClosingTXID, cs.CloseHeight,
- cs.RemotePub, cs.Capacity,
- cs.SettledBalance, cs.TimeLockedBalance,
- cs.CloseType, cs.IsPending,
- )
- if err != nil {
- t.Fatal(err)
- }
-
- // For the old format, these are all the fields
- // that are written.
- return buf.Bytes()
- },
- },
- {
- // A close summary where the new fields are present,
- // but the optional RemoteNextRevocation field is not
- // set.
- closeSummary: &ChannelCloseSummary{
- ChanPoint: chanState.FundingOutpoint,
- ShortChanID: chanState.ShortChanID(),
- ChainHash: chanState.ChainHash,
- ClosingTXID: testTx.TxHash(),
- CloseHeight: 100,
- RemotePub: chanState.IdentityPub,
- Capacity: chanState.Capacity,
- SettledBalance: btcutil.Amount(50000),
- CloseType: RemoteForceClose,
- IsPending: true,
- RemoteCurrentRevocation: chanState.RemoteCurrentRevocation,
- LocalChanConfig: chanState.LocalChanCfg,
-
- // RemoteNextRevocation is optional, and here
- // it is not set.
- RemoteNextRevocation: nil,
- },
-
- // In the old format the last field written is the
- // LocalChanConfig. This indicates that the optional
- // RemoteNextRevocation field is not present. It should
- // be converted by adding boolean markers for all these
- // fields.
- oldSerialization: func(cs *ChannelCloseSummary) []byte {
- var buf bytes.Buffer
- err := WriteElements(&buf, cs.ChanPoint,
- cs.ShortChanID, cs.ChainHash,
- cs.ClosingTXID, cs.CloseHeight,
- cs.RemotePub, cs.Capacity,
- cs.SettledBalance, cs.TimeLockedBalance,
- cs.CloseType, cs.IsPending,
- )
- if err != nil {
- t.Fatal(err)
- }
-
- err = WriteElements(&buf, cs.RemoteCurrentRevocation)
- if err != nil {
- t.Fatal(err)
- }
-
- err = writeChanConfig(&buf, &cs.LocalChanConfig)
- if err != nil {
- t.Fatal(err)
- }
-
- // RemoteNextRevocation is not written.
- return buf.Bytes()
- },
- },
- {
- // A close summary where all fields are present.
- closeSummary: &ChannelCloseSummary{
- ChanPoint: chanState.FundingOutpoint,
- ShortChanID: chanState.ShortChanID(),
- ChainHash: chanState.ChainHash,
- ClosingTXID: testTx.TxHash(),
- CloseHeight: 100,
- RemotePub: chanState.IdentityPub,
- Capacity: chanState.Capacity,
- SettledBalance: btcutil.Amount(50000),
- CloseType: RemoteForceClose,
- IsPending: true,
- RemoteCurrentRevocation: chanState.RemoteCurrentRevocation,
- LocalChanConfig: chanState.LocalChanCfg,
-
- // RemoteNextRevocation is optional, and in
- // this case we set it.
- RemoteNextRevocation: chanState.RemoteNextRevocation,
- },
-
- // In the old format all the fields are written. It
- // should be converted by adding boolean markers for
- // all these fields.
- oldSerialization: func(cs *ChannelCloseSummary) []byte {
- var buf bytes.Buffer
- err := WriteElements(&buf, cs.ChanPoint,
- cs.ShortChanID, cs.ChainHash,
- cs.ClosingTXID, cs.CloseHeight,
- cs.RemotePub, cs.Capacity,
- cs.SettledBalance, cs.TimeLockedBalance,
- cs.CloseType, cs.IsPending,
- )
- if err != nil {
- t.Fatal(err)
- }
-
- err = WriteElements(&buf, cs.RemoteCurrentRevocation)
- if err != nil {
- t.Fatal(err)
- }
-
- err = writeChanConfig(&buf, &cs.LocalChanConfig)
- if err != nil {
- t.Fatal(err)
- }
-
- err = WriteElements(&buf, cs.RemoteNextRevocation)
- if err != nil {
- t.Fatal(err)
- }
-
- return buf.Bytes()
- },
- },
- }
-
- for _, test := range testCases {
-
- // Before the migration we must add the old format to the DB.
- beforeMigrationFunc := func(d *DB) {
-
- // Get the old serialization format for this test's
- // close summary, and it to the closed channel bucket.
- old := test.oldSerialization(test.closeSummary)
- err = kvdb.Update(d, func(tx kvdb.RwTx) er.R {
- closedChanBucket, err := tx.CreateTopLevelBucket(
- closedChannelBucket,
- )
- if err != nil {
- return err
- }
- return closedChanBucket.Put(chanID, old)
- }, func() {})
- if err != nil {
- t.Fatalf("unable to add old serialization: %v",
- err)
- }
- }
-
- // After the migration it should be found in the new format.
- afterMigrationFunc := func(d *DB) {
- // We generate the new serialized version, to check
- // against what is found in the DB.
- var b bytes.Buffer
- err = serializeChannelCloseSummary(&b, test.closeSummary)
- if err != nil {
- t.Fatalf("unable to serialize: %v", err)
- }
- newSerialization := b.Bytes()
-
- var dbSummary []byte
- err = kvdb.View(d, func(tx kvdb.RTx) er.R {
- closedChanBucket := tx.ReadBucket(closedChannelBucket)
- if closedChanBucket == nil {
- return er.New("unable to find bucket")
- }
-
- // Get the serialized verision from the DB and
- // make sure it matches what we expected.
- dbSummary = closedChanBucket.Get(chanID)
- if !bytes.Equal(dbSummary, newSerialization) {
- return er.Errorf("unexpected new " +
- "serialization")
- }
- return nil
- }, func() {
- dbSummary = nil
- })
- if err != nil {
- t.Fatalf("unable to view DB: %v", err)
- }
-
- // Finally we fetch the deserialized summary from the
- // DB and check that it is equal to our original one.
- dbChannels, err := d.FetchClosedChannels(false)
- if err != nil {
- t.Fatalf("unable to fetch closed channels: %v",
- err)
- }
-
- if len(dbChannels) != 1 {
- t.Fatalf("expected 1 closed channels, found %v",
- len(dbChannels))
- }
-
- dbChan := dbChannels[0]
- if !reflect.DeepEqual(dbChan, test.closeSummary) {
- dbChan.RemotePub.Curve = nil
- test.closeSummary.RemotePub.Curve = nil
- t.Fatalf("not equal: %v vs %v",
- spew.Sdump(dbChan),
- spew.Sdump(test.closeSummary))
- }
-
- }
-
- applyMigration(t,
- beforeMigrationFunc,
- afterMigrationFunc,
- MigrateOptionalChannelCloseSummaryFields,
- false)
- }
-}
-
-// TestMigrateGossipMessageStoreKeys ensures that the migration to the new
-// gossip message store key format is successful/unsuccessful under various
-// scenarios.
-func TestMigrateGossipMessageStoreKeys(t *testing.T) {
- t.Parallel()
-
- // Construct the message which we'll use to test the migration, along
- // with its old and new key formats.
- shortChanID := lnwire.ShortChannelID{BlockHeight: 10}
- msg := &lnwire.AnnounceSignatures{ShortChannelID: shortChanID}
-
- var oldMsgKey [33 + 8]byte
- copy(oldMsgKey[:33], pubKey.SerializeCompressed())
- binary.BigEndian.PutUint64(oldMsgKey[33:41], shortChanID.ToUint64())
-
- var newMsgKey [33 + 8 + 2]byte
- copy(newMsgKey[:41], oldMsgKey[:])
- binary.BigEndian.PutUint16(newMsgKey[41:43], uint16(msg.MsgType()))
-
- // Before the migration, we'll create the bucket where the messages
- // should live and insert them.
- beforeMigration := func(db *DB) {
- var b bytes.Buffer
- if err := msg.Encode(&b, 0); err != nil {
- t.Fatalf("unable to serialize message: %v", err)
- }
-
- err := kvdb.Update(db, func(tx kvdb.RwTx) er.R {
- messageStore, err := tx.CreateTopLevelBucket(
- messageStoreBucket,
- )
- if err != nil {
- return err
- }
-
- return messageStore.Put(oldMsgKey[:], b.Bytes())
- }, func() {})
- if err != nil {
- t.Fatal(err)
- }
- }
-
- // After the migration, we'll make sure that:
- // 1. We cannot find the message under its old key.
- // 2. We can find the message under its new key.
- // 3. The message matches the original.
- afterMigration := func(db *DB) {
- var rawMsg []byte
- err := kvdb.View(db, func(tx kvdb.RTx) er.R {
- messageStore := tx.ReadBucket(messageStoreBucket)
- if messageStore == nil {
- return er.New("message store bucket not " +
- "found")
- }
- rawMsg = messageStore.Get(oldMsgKey[:])
- if rawMsg != nil {
- t.Fatal("expected to not find message under " +
- "old key, but did")
- }
- rawMsg = messageStore.Get(newMsgKey[:])
- if rawMsg == nil {
- return er.Errorf("expected to find message " +
- "under new key, but didn't")
- }
-
- return nil
- }, func() {
- rawMsg = nil
- })
- if err != nil {
- t.Fatal(err)
- }
-
- gotMsg, errr := lnwire.ReadMessage(bytes.NewReader(rawMsg), 0)
- if errr != nil {
- t.Fatalf("unable to deserialize raw message: %v", errr)
- }
- if !reflect.DeepEqual(msg, gotMsg) {
- t.Fatalf("expected message: %v\ngot message: %v",
- spew.Sdump(msg), spew.Sdump(gotMsg))
- }
- }
-
- applyMigration(
- t, beforeMigration, afterMigration,
- MigrateGossipMessageStoreKeys, false,
- )
-}
-
-// TestOutgoingPaymentsMigration checks that OutgoingPayments are migrated to a
-// new bucket structure after the migration.
-func TestOutgoingPaymentsMigration(t *testing.T) {
- t.Parallel()
-
- const numPayments = 4
- var oldPayments []*outgoingPayment
-
- // Add fake payments to test database, verifying that it was created.
- beforeMigrationFunc := func(d *DB) {
- for i := 0; i < numPayments; i++ {
- var p *outgoingPayment
- var err er.R
-
- // We fill the database with random payments. For the
- // very last one we'll use a duplicate of the first, to
- // ensure we are able to handle migration from a
- // database that has copies.
- if i < numPayments-1 {
- p, err = makeRandomFakePayment()
- if err != nil {
- t.Fatalf("unable to create payment: %v",
- err)
- }
- } else {
- p = oldPayments[0]
- }
-
- if err := d.addPayment(p); err != nil {
- t.Fatalf("unable to add payment: %v", err)
- }
-
- oldPayments = append(oldPayments, p)
- }
-
- payments, err := d.fetchAllPayments()
- if err != nil {
- t.Fatalf("unable to fetch payments: %v", err)
- }
-
- if len(payments) != numPayments {
- t.Fatalf("wrong qty of paymets: expected %d got %v",
- numPayments, len(payments))
- }
- }
-
- // Verify that all payments were migrated.
- afterMigrationFunc := func(d *DB) {
- sentPayments, err := d.fetchPaymentsMigration9()
- if err != nil {
- t.Fatalf("unable to fetch sent payments: %v", err)
- }
-
- if len(sentPayments) != numPayments {
- t.Fatalf("expected %d payments, got %d", numPayments,
- len(sentPayments))
- }
-
- graph := d.ChannelGraph()
- sourceNode, err := graph.SourceNode()
- if err != nil {
- t.Fatalf("unable to fetch source node: %v", err)
- }
-
- for i, p := range sentPayments {
- // The payment status should be Completed.
- if p.Status != StatusSucceeded {
- t.Fatalf("expected Completed, got %v", p.Status)
- }
-
- // Check that the sequence number is preserved. They
- // start counting at 1.
- if p.sequenceNum != uint64(i+1) {
- t.Fatalf("expected seqnum %d, got %d", i,
- p.sequenceNum)
- }
-
- // Order of payments should be be preserved.
- old := oldPayments[i]
-
- // Check the individial fields.
- if p.Info.Value != old.Terms.Value {
- t.Fatalf("value mismatch")
- }
-
- if p.Info.CreationDate != old.CreationDate {
- t.Fatalf("date mismatch")
- }
-
- if !bytes.Equal(p.Info.PaymentRequest, old.PaymentRequest) {
- t.Fatalf("payreq mismatch")
- }
-
- if *p.PaymentPreimage != old.PaymentPreimage {
- t.Fatalf("preimage mismatch")
- }
-
- if p.Attempt.Route.TotalFees() != old.Fee {
- t.Fatalf("Fee mismatch")
- }
-
- if p.Attempt.Route.TotalAmount != old.Fee+old.Terms.Value {
- t.Fatalf("Total amount mismatch")
- }
-
- if p.Attempt.Route.TotalTimeLock != old.TimeLockLength {
- t.Fatalf("timelock mismatch")
- }
-
- if p.Attempt.Route.SourcePubKey != sourceNode.PubKeyBytes {
- t.Fatalf("source mismatch: %x vs %x",
- p.Attempt.Route.SourcePubKey[:],
- sourceNode.PubKeyBytes[:])
- }
-
- for i, hop := range old.Path {
- if hop != p.Attempt.Route.Hops[i].PubKeyBytes {
- t.Fatalf("path mismatch")
- }
- }
- }
-
- // Finally, check that the payment sequence number is updated
- // to reflect the migrated payments.
- err = kvdb.Update(d, func(tx kvdb.RwTx) er.R {
- payments := tx.ReadWriteBucket(paymentsRootBucket)
- if payments == nil {
- return er.Errorf("payments bucket not found")
- }
-
- seq := payments.Sequence()
- if seq != numPayments {
- return er.Errorf("expected sequence to be "+
- "%d, got %d", numPayments, seq)
- }
-
- return nil
- }, func() {})
- if err != nil {
- t.Fatal(err)
- }
- }
-
- applyMigration(t,
- beforeMigrationFunc,
- afterMigrationFunc,
- MigrateOutgoingPayments,
- false)
-}
-
-func makeRandPaymentCreationInfo() (*PaymentCreationInfo, er.R) {
- var payHash lntypes.Hash
- if _, err := rand.Read(payHash[:]); err != nil {
- return nil, er.E(err)
- }
-
- return &PaymentCreationInfo{
- PaymentHash: payHash,
- Value: lnwire.MilliSatoshi(rand.Int63()),
- CreationDate: time.Now(),
- PaymentRequest: []byte("test"),
- }, nil
-}
-
-// TestPaymentRouteSerialization tests that we're able to properly migrate
-// existing payments on disk that contain the traversed routes to the new
-// routing format which supports the TLV payloads. We also test that the
-// migration is able to handle duplicate payment attempts.
-func TestPaymentRouteSerialization(t *testing.T) {
- t.Parallel()
-
- legacyHop1 := &Hop{
- PubKeyBytes: NewVertex(pub),
- ChannelID: 12345,
- OutgoingTimeLock: 111,
- LegacyPayload: true,
- AmtToForward: 555,
- }
- legacyHop2 := &Hop{
- PubKeyBytes: NewVertex(pub),
- ChannelID: 12345,
- OutgoingTimeLock: 111,
- LegacyPayload: true,
- AmtToForward: 555,
- }
- legacyRoute := Route{
- TotalTimeLock: 123,
- TotalAmount: 1234567,
- SourcePubKey: NewVertex(pub),
- Hops: []*Hop{legacyHop1, legacyHop2},
- }
-
- const numPayments = 4
- var oldPayments []*Payment
-
- sharedPayAttempt := PaymentAttemptInfo{
- PaymentID: 1,
- SessionKey: priv,
- Route: legacyRoute,
- }
-
- // We'll first add a series of fake payments, using the existing legacy
- // serialization format.
- beforeMigrationFunc := func(d *DB) {
- err := kvdb.Update(d, func(tx kvdb.RwTx) er.R {
- paymentsBucket, err := tx.CreateTopLevelBucket(
- paymentsRootBucket,
- )
- if err != nil {
- t.Fatalf("unable to create new payments "+
- "bucket: %v", err)
- }
-
- for i := 0; i < numPayments; i++ {
- var seqNum [8]byte
- byteOrder.PutUint64(seqNum[:], uint64(i))
-
- // All payments will be randomly generated,
- // other than the final payment. We'll force
- // the final payment to re-use an existing
- // payment hash so we can insert it into the
- // duplicate payment hash bucket.
- var payInfo *PaymentCreationInfo
- if i < numPayments-1 {
- payInfo, err = makeRandPaymentCreationInfo()
- if err != nil {
- t.Fatalf("unable to create "+
- "payment: %v", err)
- }
- } else {
- payInfo = oldPayments[0].Info
- }
-
- // Next, legacy encoded when needed, we'll
- // serialize the info and the attempt.
- var payInfoBytes bytes.Buffer
- errr := serializePaymentCreationInfo(
- &payInfoBytes, payInfo,
- )
- if errr != nil {
- t.Fatalf("unable to encode pay "+
- "info: %v", errr)
- }
- var payAttemptBytes bytes.Buffer
- errr = serializePaymentAttemptInfoLegacy(
- &payAttemptBytes, &sharedPayAttempt,
- )
- if errr != nil {
- t.Fatalf("unable to encode payment attempt: "+
- "%v", errr)
- }
-
- // Before we write to disk, we'll need to fetch
- // the proper bucket. If this is the duplicate
- // payment, then we'll grab the dup bucket,
- // otherwise, we'll use the top level bucket.
- var payHashBucket kvdb.RwBucket
- if i < numPayments-1 {
- payHashBucket, err = paymentsBucket.CreateBucket(
- payInfo.PaymentHash[:],
- )
- if err != nil {
- t.Fatalf("unable to create payments bucket: %v", err)
- }
- } else {
- payHashBucket = paymentsBucket.NestedReadWriteBucket(
- payInfo.PaymentHash[:],
- )
- dupPayBucket, err := payHashBucket.CreateBucket(
- paymentDuplicateBucket,
- )
- if err != nil {
- t.Fatalf("unable to create "+
- "dup hash bucket: %v", err)
- }
-
- payHashBucket, err = dupPayBucket.CreateBucket(
- seqNum[:],
- )
- if err != nil {
- t.Fatalf("unable to make dup "+
- "bucket: %v", err)
- }
- }
-
- err = payHashBucket.Put(paymentSequenceKey, seqNum[:])
- if err != nil {
- t.Fatalf("unable to write seqno: %v", err)
- }
-
- err = payHashBucket.Put(
- paymentCreationInfoKey, payInfoBytes.Bytes(),
- )
- if err != nil {
- t.Fatalf("unable to write creation "+
- "info: %v", err)
- }
-
- err = payHashBucket.Put(
- paymentAttemptInfoKey, payAttemptBytes.Bytes(),
- )
- if err != nil {
- t.Fatalf("unable to write attempt "+
- "info: %v", err)
- }
-
- oldPayments = append(oldPayments, &Payment{
- Info: payInfo,
- Attempt: &sharedPayAttempt,
- })
- }
-
- return nil
- }, func() {
- oldPayments = nil
- })
- if err != nil {
- t.Fatalf("unable to create test payments: %v", err)
- }
- }
-
- afterMigrationFunc := func(d *DB) {
- newPayments, err := d.FetchPayments()
- if err != nil {
- t.Fatalf("unable to fetch new payments: %v", err)
- }
-
- if len(newPayments) != numPayments {
- t.Fatalf("expected %d payments, got %d", numPayments,
- len(newPayments))
- }
-
- for i, p := range newPayments {
- // Order of payments should be be preserved.
- old := oldPayments[i]
-
- if p.Attempt.PaymentID != old.Attempt.PaymentID {
- t.Fatalf("wrong pay ID: expected %v, got %v",
- p.Attempt.PaymentID,
- old.Attempt.PaymentID)
- }
-
- if p.Attempt.Route.TotalFees() != old.Attempt.Route.TotalFees() {
- t.Fatalf("Fee mismatch")
- }
-
- if p.Attempt.Route.TotalAmount != old.Attempt.Route.TotalAmount {
- t.Fatalf("Total amount mismatch")
- }
-
- if p.Attempt.Route.TotalTimeLock != old.Attempt.Route.TotalTimeLock {
- t.Fatalf("timelock mismatch")
- }
-
- if p.Attempt.Route.SourcePubKey != old.Attempt.Route.SourcePubKey {
- t.Fatalf("source mismatch: %x vs %x",
- p.Attempt.Route.SourcePubKey[:],
- old.Attempt.Route.SourcePubKey[:])
- }
-
- for i, hop := range p.Attempt.Route.Hops {
- if !reflect.DeepEqual(hop, legacyRoute.Hops[i]) {
- t.Fatalf("hop mismatch")
- }
- }
- }
- }
-
- applyMigration(t,
- beforeMigrationFunc,
- afterMigrationFunc,
- MigrateRouteSerialization,
- false)
-}
-
-// TestNotCoveredMigrations only references migrations that are not referenced
-// anywhere else in this package. This prevents false positives when linting
-// with unused.
-func TestNotCoveredMigrations(t *testing.T) {
- _ = MigrateNodeAndEdgeUpdateIndex
- _ = MigrateInvoiceTimeSeries
- _ = MigrateInvoiceTimeSeriesOutgoingPayments
- _ = MigrateEdgePolicies
- _ = MigratePruneEdgeUpdateIndex
-}
diff --git a/lnd/channeldb/migration_01_to_11/options.go b/lnd/channeldb/migration_01_to_11/options.go
deleted file mode 100644
index 03b287e0..00000000
--- a/lnd/channeldb/migration_01_to_11/options.go
+++ /dev/null
@@ -1,41 +0,0 @@
-package migration_01_to_11
-
-const (
- // DefaultRejectCacheSize is the default number of rejectCacheEntries to
- // cache for use in the rejection cache of incoming gossip traffic. This
- // produces a cache size of around 1MB.
- DefaultRejectCacheSize = 50000
-
- // DefaultChannelCacheSize is the default number of ChannelEdges cached
- // in order to reply to gossip queries. This produces a cache size of
- // around 40MB.
- DefaultChannelCacheSize = 20000
-)
-
-// Options holds parameters for tuning and customizing a channeldb.DB.
-type Options struct {
- // RejectCacheSize is the maximum number of rejectCacheEntries to hold
- // in the rejection cache.
- RejectCacheSize int
-
- // ChannelCacheSize is the maximum number of ChannelEdges to hold in the
- // channel cache.
- ChannelCacheSize int
-
- // NoFreelistSync, if true, prevents the database from syncing its
- // freelist to disk, resulting in improved performance at the expense of
- // increased startup time.
- NoFreelistSync bool
-}
-
-// DefaultOptions returns an Options populated with default values.
-func DefaultOptions() Options {
- return Options{
- RejectCacheSize: DefaultRejectCacheSize,
- ChannelCacheSize: DefaultChannelCacheSize,
- NoFreelistSync: true,
- }
-}
-
-// OptionModifier is a function signature for modifying the default Options.
-type OptionModifier func(*Options)
diff --git a/lnd/channeldb/migration_01_to_11/payment_control.go b/lnd/channeldb/migration_01_to_11/payment_control.go
deleted file mode 100644
index 0525aaa1..00000000
--- a/lnd/channeldb/migration_01_to_11/payment_control.go
+++ /dev/null
@@ -1,21 +0,0 @@
-package migration_01_to_11
-
-import "github.com/pkt-cash/pktd/lnd/channeldb/kvdb"
-
-// fetchPaymentStatus fetches the payment status of the payment. If the payment
-// isn't found, it will default to "StatusUnknown".
-func fetchPaymentStatus(bucket kvdb.RBucket) PaymentStatus {
- if bucket.Get(paymentSettleInfoKey) != nil {
- return StatusSucceeded
- }
-
- if bucket.Get(paymentFailInfoKey) != nil {
- return StatusFailed
- }
-
- if bucket.Get(paymentCreationInfoKey) != nil {
- return StatusInFlight
- }
-
- return StatusUnknown
-}
diff --git a/lnd/channeldb/migration_01_to_11/payments.go b/lnd/channeldb/migration_01_to_11/payments.go
deleted file mode 100644
index 39b5c0ca..00000000
--- a/lnd/channeldb/migration_01_to_11/payments.go
+++ /dev/null
@@ -1,623 +0,0 @@
-package migration_01_to_11
-
-import (
- "bytes"
- "encoding/binary"
- "io"
- "sort"
- "time"
-
- "github.com/pkt-cash/pktd/btcec"
- "github.com/pkt-cash/pktd/btcutil/er"
- "github.com/pkt-cash/pktd/btcutil/util"
- "github.com/pkt-cash/pktd/lnd/channeldb/kvdb"
- "github.com/pkt-cash/pktd/lnd/lntypes"
- "github.com/pkt-cash/pktd/lnd/lnwire"
- "github.com/pkt-cash/pktd/lnd/tlv"
- "github.com/pkt-cash/pktd/wire"
-)
-
-var (
- // paymentsRootBucket is the name of the top-level bucket within the
- // database that stores all data related to payments. Within this
- // bucket, each payment hash its own sub-bucket keyed by its payment
- // hash.
- //
- // Bucket hierarchy:
- //
- // root-bucket
- // |
- // |--
- // | |--sequence-key:
- // | |--creation-info-key:
- // | |--attempt-info-key:
- // | |--settle-info-key:
- // | |--fail-info-key:
- // | |
- // | |--duplicate-bucket (only for old, completed payments)
- // | |
- // | |--
- // | | |--sequence-key:
- // | | |--creation-info-key:
- // | | |--attempt-info-key:
- // | | |--settle-info-key:
- // | | |--fail-info-key:
- // | |
- // | |--
- // | | |
- // | ... ...
- // |
- // |--
- // | |
- // | ...
- // ...
- //
- paymentsRootBucket = []byte("payments-root-bucket")
-
- // paymentDublicateBucket is the name of a optional sub-bucket within
- // the payment hash bucket, that is used to hold duplicate payments to
- // a payment hash. This is needed to support information from earlier
- // versions of lnd, where it was possible to pay to a payment hash more
- // than once.
- paymentDuplicateBucket = []byte("payment-duplicate-bucket")
-
- // paymentSequenceKey is a key used in the payment's sub-bucket to
- // store the sequence number of the payment.
- paymentSequenceKey = []byte("payment-sequence-key")
-
- // paymentCreationInfoKey is a key used in the payment's sub-bucket to
- // store the creation info of the payment.
- paymentCreationInfoKey = []byte("payment-creation-info")
-
- // paymentAttemptInfoKey is a key used in the payment's sub-bucket to
- // store the info about the latest attempt that was done for the
- // payment in question.
- paymentAttemptInfoKey = []byte("payment-attempt-info")
-
- // paymentSettleInfoKey is a key used in the payment's sub-bucket to
- // store the settle info of the payment.
- paymentSettleInfoKey = []byte("payment-settle-info")
-
- // paymentFailInfoKey is a key used in the payment's sub-bucket to
- // store information about the reason a payment failed.
- paymentFailInfoKey = []byte("payment-fail-info")
-)
-
-// FailureReason encodes the reason a payment ultimately failed.
-type FailureReason byte
-
-const (
- // FailureReasonTimeout indicates that the payment did timeout before a
- // successful payment attempt was made.
- FailureReasonTimeout FailureReason = 0
-
- // FailureReasonNoRoute indicates no successful route to the
- // destination was found during path finding.
- FailureReasonNoRoute FailureReason = 1
-
- // FailureReasonError indicates that an unexpected error happened during
- // payment.
- FailureReasonError FailureReason = 2
-
- // FailureReasonIncorrectPaymentDetails indicates that either the hash
- // is unknown or the final cltv delta or amount is incorrect.
- FailureReasonIncorrectPaymentDetails FailureReason = 3
-
- // TODO(halseth): cancel state.
-
- // TODO(joostjager): Add failure reasons for:
- // LocalLiquidityInsufficient, RemoteCapacityInsufficient.
-)
-
-// String returns a human readable FailureReason
-func (r FailureReason) String() string {
- switch r {
- case FailureReasonTimeout:
- return "timeout"
- case FailureReasonNoRoute:
- return "no_route"
- case FailureReasonError:
- return "error"
- case FailureReasonIncorrectPaymentDetails:
- return "incorrect_payment_details"
- }
-
- return "unknown"
-}
-
-// PaymentStatus represent current status of payment
-type PaymentStatus byte
-
-const (
- // StatusUnknown is the status where a payment has never been initiated
- // and hence is unknown.
- StatusUnknown PaymentStatus = 0
-
- // StatusInFlight is the status where a payment has been initiated, but
- // a response has not been received.
- StatusInFlight PaymentStatus = 1
-
- // StatusSucceeded is the status where a payment has been initiated and
- // the payment was completed successfully.
- StatusSucceeded PaymentStatus = 2
-
- // StatusFailed is the status where a payment has been initiated and a
- // failure result has come back.
- StatusFailed PaymentStatus = 3
-)
-
-// Bytes returns status as slice of bytes.
-func (ps PaymentStatus) Bytes() []byte {
- return []byte{byte(ps)}
-}
-
-// FromBytes sets status from slice of bytes.
-func (ps *PaymentStatus) FromBytes(status []byte) er.R {
- if len(status) != 1 {
- return er.New("payment status is empty")
- }
-
- switch PaymentStatus(status[0]) {
- case StatusUnknown, StatusInFlight, StatusSucceeded, StatusFailed:
- *ps = PaymentStatus(status[0])
- default:
- return er.New("unknown payment status")
- }
-
- return nil
-}
-
-// String returns readable representation of payment status.
-func (ps PaymentStatus) String() string {
- switch ps {
- case StatusUnknown:
- return "Unknown"
- case StatusInFlight:
- return "In Flight"
- case StatusSucceeded:
- return "Succeeded"
- case StatusFailed:
- return "Failed"
- default:
- return "Unknown"
- }
-}
-
-// PaymentCreationInfo is the information necessary to have ready when
-// initiating a payment, moving it into state InFlight.
-type PaymentCreationInfo struct {
- // PaymentHash is the hash this payment is paying to.
- PaymentHash lntypes.Hash
-
- // Value is the amount we are paying.
- Value lnwire.MilliSatoshi
-
- // CreatingDate is the time when this payment was initiated.
- CreationDate time.Time
-
- // PaymentRequest is the full payment request, if any.
- PaymentRequest []byte
-}
-
-// PaymentAttemptInfo contains information about a specific payment attempt for
-// a given payment. This information is used by the router to handle any errors
-// coming back after an attempt is made, and to query the switch about the
-// status of a payment. For settled payment this will be the information for
-// the succeeding payment attempt.
-type PaymentAttemptInfo struct {
- // PaymentID is the unique ID used for this attempt.
- PaymentID uint64
-
- // SessionKey is the ephemeral key used for this payment attempt.
- SessionKey *btcec.PrivateKey
-
- // Route is the route attempted to send the HTLC.
- Route Route
-}
-
-// Payment is a wrapper around a payment's PaymentCreationInfo,
-// PaymentAttemptInfo, and preimage. All payments will have the
-// PaymentCreationInfo set, the PaymentAttemptInfo will be set only if at least
-// one payment attempt has been made, while only completed payments will have a
-// non-zero payment preimage.
-type Payment struct {
- // sequenceNum is a unique identifier used to sort the payments in
- // order of creation.
- sequenceNum uint64
-
- // Status is the current PaymentStatus of this payment.
- Status PaymentStatus
-
- // Info holds all static information about this payment, and is
- // populated when the payment is initiated.
- Info *PaymentCreationInfo
-
- // Attempt is the information about the last payment attempt made.
- //
- // NOTE: Can be nil if no attempt is yet made.
- Attempt *PaymentAttemptInfo
-
- // PaymentPreimage is the preimage of a successful payment. This serves
- // as a proof of payment. It will only be non-nil for settled payments.
- //
- // NOTE: Can be nil if payment is not settled.
- PaymentPreimage *lntypes.Preimage
-
- // Failure is a failure reason code indicating the reason the payment
- // failed. It is only non-nil for failed payments.
- //
- // NOTE: Can be nil if payment is not failed.
- Failure *FailureReason
-}
-
-// FetchPayments returns all sent payments found in the DB.
-func (db *DB) FetchPayments() ([]*Payment, er.R) {
- var payments []*Payment
-
- err := kvdb.View(db, func(tx kvdb.RTx) er.R {
- paymentsBucket := tx.ReadBucket(paymentsRootBucket)
- if paymentsBucket == nil {
- return nil
- }
-
- return paymentsBucket.ForEach(func(k, v []byte) er.R {
- bucket := paymentsBucket.NestedReadBucket(k)
- if bucket == nil {
- // We only expect sub-buckets to be found in
- // this top-level bucket.
- return er.Errorf("non bucket element in " +
- "payments bucket")
- }
-
- p, err := fetchPayment(bucket)
- if err != nil {
- return err
- }
-
- payments = append(payments, p)
-
- // For older versions of lnd, duplicate payments to a
- // payment has was possible. These will be found in a
- // sub-bucket indexed by their sequence number if
- // available.
- dup := bucket.NestedReadBucket(paymentDuplicateBucket)
- if dup == nil {
- return nil
- }
-
- return dup.ForEach(func(k, v []byte) er.R {
- subBucket := dup.NestedReadBucket(k)
- if subBucket == nil {
- // We one bucket for each duplicate to
- // be found.
- return er.Errorf("non bucket element" +
- "in duplicate bucket")
- }
-
- p, err := fetchPayment(subBucket)
- if err != nil {
- return err
- }
-
- payments = append(payments, p)
- return nil
- })
- })
- }, func() {
- payments = nil
- })
- if err != nil {
- return nil, err
- }
-
- // Before returning, sort the payments by their sequence number.
- sort.Slice(payments, func(i, j int) bool {
- return payments[i].sequenceNum < payments[j].sequenceNum
- })
-
- return payments, nil
-}
-
-func fetchPayment(bucket kvdb.RBucket) (*Payment, er.R) {
- var (
- err er.R
- p = &Payment{}
- )
-
- seqBytes := bucket.Get(paymentSequenceKey)
- if seqBytes == nil {
- return nil, er.Errorf("sequence number not found")
- }
-
- p.sequenceNum = binary.BigEndian.Uint64(seqBytes)
-
- // Get the payment status.
- p.Status = fetchPaymentStatus(bucket)
-
- // Get the PaymentCreationInfo.
- b := bucket.Get(paymentCreationInfoKey)
- if b == nil {
- return nil, er.Errorf("creation info not found")
- }
-
- r := bytes.NewReader(b)
- p.Info, err = deserializePaymentCreationInfo(r)
- if err != nil {
- return nil, err
-
- }
-
- // Get the PaymentAttemptInfo. This can be unset.
- b = bucket.Get(paymentAttemptInfoKey)
- if b != nil {
- r = bytes.NewReader(b)
- p.Attempt, err = deserializePaymentAttemptInfo(r)
- if err != nil {
- return nil, err
- }
- }
-
- // Get the payment preimage. This is only found for
- // completed payments.
- b = bucket.Get(paymentSettleInfoKey)
- if b != nil {
- var preimg lntypes.Preimage
- copy(preimg[:], b[:])
- p.PaymentPreimage = &preimg
- }
-
- // Get failure reason if available.
- b = bucket.Get(paymentFailInfoKey)
- if b != nil {
- reason := FailureReason(b[0])
- p.Failure = &reason
- }
-
- return p, nil
-}
-
-func serializePaymentCreationInfo(w io.Writer, c *PaymentCreationInfo) er.R {
- var scratch [8]byte
-
- if _, err := util.Write(w, c.PaymentHash[:]); err != nil {
- return err
- }
-
- byteOrder.PutUint64(scratch[:], uint64(c.Value))
- if _, err := util.Write(w, scratch[:]); err != nil {
- return err
- }
-
- byteOrder.PutUint64(scratch[:], uint64(c.CreationDate.Unix()))
- if _, err := util.Write(w, scratch[:]); err != nil {
- return err
- }
-
- byteOrder.PutUint32(scratch[:4], uint32(len(c.PaymentRequest)))
- if _, err := util.Write(w, scratch[:4]); err != nil {
- return err
- }
-
- if _, err := util.Write(w, c.PaymentRequest[:]); err != nil {
- return err
- }
-
- return nil
-}
-
-func deserializePaymentCreationInfo(r io.Reader) (*PaymentCreationInfo, er.R) {
- var scratch [8]byte
-
- c := &PaymentCreationInfo{}
-
- if _, err := util.ReadFull(r, c.PaymentHash[:]); err != nil {
- return nil, err
- }
-
- if _, err := util.ReadFull(r, scratch[:]); err != nil {
- return nil, err
- }
- c.Value = lnwire.MilliSatoshi(byteOrder.Uint64(scratch[:]))
-
- if _, err := util.ReadFull(r, scratch[:]); err != nil {
- return nil, err
- }
- c.CreationDate = time.Unix(int64(byteOrder.Uint64(scratch[:])), 0)
-
- if _, err := util.ReadFull(r, scratch[:4]); err != nil {
- return nil, err
- }
-
- reqLen := uint32(byteOrder.Uint32(scratch[:4]))
- payReq := make([]byte, reqLen)
- if reqLen > 0 {
- if _, err := util.ReadFull(r, payReq[:]); err != nil {
- return nil, err
- }
- }
- c.PaymentRequest = payReq
-
- return c, nil
-}
-
-func serializePaymentAttemptInfo(w io.Writer, a *PaymentAttemptInfo) er.R {
- if err := WriteElements(w, a.PaymentID, a.SessionKey); err != nil {
- return err
- }
-
- if err := SerializeRoute(w, a.Route); err != nil {
- return err
- }
-
- return nil
-}
-
-func deserializePaymentAttemptInfo(r io.Reader) (*PaymentAttemptInfo, er.R) {
- a := &PaymentAttemptInfo{}
- err := ReadElements(r, &a.PaymentID, &a.SessionKey)
- if err != nil {
- return nil, err
- }
- a.Route, err = DeserializeRoute(r)
- if err != nil {
- return nil, err
- }
- return a, nil
-}
-
-func serializeHop(w io.Writer, h *Hop) er.R {
- if err := WriteElements(w,
- h.PubKeyBytes[:], h.ChannelID, h.OutgoingTimeLock,
- h.AmtToForward,
- ); err != nil {
- return err
- }
-
- if err := util.WriteBin(w, byteOrder, h.LegacyPayload); err != nil {
- return err
- }
-
- // For legacy payloads, we don't need to write any TLV records, so
- // we'll write a zero indicating the our serialized TLV map has no
- // records.
- if h.LegacyPayload {
- return WriteElements(w, uint32(0))
- }
-
- // Otherwise, we'll transform our slice of records into a map of the
- // raw bytes, then serialize them in-line with a length (number of
- // elements) prefix.
- mapRecords, err := tlv.RecordsToMap(h.TLVRecords)
- if err != nil {
- return err
- }
-
- numRecords := uint32(len(mapRecords))
- if err := WriteElements(w, numRecords); err != nil {
- return err
- }
-
- for recordType, rawBytes := range mapRecords {
- if err := WriteElements(w, recordType); err != nil {
- return err
- }
-
- if err := wire.WriteVarBytes(w, 0, rawBytes); err != nil {
- return err
- }
- }
-
- return nil
-}
-
-// maxOnionPayloadSize is the largest Sphinx payload possible, so we don't need
-// to read/write a TLV stream larger than this.
-const maxOnionPayloadSize = 1300
-
-func deserializeHop(r io.Reader) (*Hop, er.R) {
- h := &Hop{}
-
- var pub []byte
- if err := ReadElements(r, &pub); err != nil {
- return nil, err
- }
- copy(h.PubKeyBytes[:], pub)
-
- if err := ReadElements(r,
- &h.ChannelID, &h.OutgoingTimeLock, &h.AmtToForward,
- ); err != nil {
- return nil, err
- }
-
- // TODO(roasbeef): change field to allow LegacyPayload false to be the
- // legacy default?
- err := util.ReadBin(r, byteOrder, &h.LegacyPayload)
- if err != nil {
- return nil, err
- }
-
- var numElements uint32
- if err := ReadElements(r, &numElements); err != nil {
- return nil, err
- }
-
- // If there're no elements, then we can return early.
- if numElements == 0 {
- return h, nil
- }
-
- tlvMap := make(map[uint64][]byte)
- for i := uint32(0); i < numElements; i++ {
- var tlvType uint64
- if err := ReadElements(r, &tlvType); err != nil {
- return nil, err
- }
-
- rawRecordBytes, err := wire.ReadVarBytes(
- r, 0, maxOnionPayloadSize, "tlv",
- )
- if err != nil {
- return nil, err
- }
-
- tlvMap[tlvType] = rawRecordBytes
- }
-
- h.TLVRecords = tlv.MapToRecords(tlvMap)
-
- return h, nil
-}
-
-// SerializeRoute serializes a route.
-func SerializeRoute(w io.Writer, r Route) er.R {
- if err := WriteElements(w,
- r.TotalTimeLock, r.TotalAmount, r.SourcePubKey[:],
- ); err != nil {
- return err
- }
-
- if err := WriteElements(w, uint32(len(r.Hops))); err != nil {
- return err
- }
-
- for _, h := range r.Hops {
- if err := serializeHop(w, h); err != nil {
- return err
- }
- }
-
- return nil
-}
-
-// DeserializeRoute deserializes a route.
-func DeserializeRoute(r io.Reader) (Route, er.R) {
- rt := Route{}
- if err := ReadElements(r,
- &rt.TotalTimeLock, &rt.TotalAmount,
- ); err != nil {
- return rt, err
- }
-
- var pub []byte
- if err := ReadElements(r, &pub); err != nil {
- return rt, err
- }
- copy(rt.SourcePubKey[:], pub)
-
- var numHops uint32
- if err := ReadElements(r, &numHops); err != nil {
- return rt, err
- }
-
- var hops []*Hop
- for i := uint32(0); i < numHops; i++ {
- hop, err := deserializeHop(r)
- if err != nil {
- return rt, err
- }
- hops = append(hops, hop)
- }
- rt.Hops = hops
-
- return rt, nil
-}
diff --git a/lnd/channeldb/migration_01_to_11/payments_test.go b/lnd/channeldb/migration_01_to_11/payments_test.go
deleted file mode 100644
index 3e7bfe5f..00000000
--- a/lnd/channeldb/migration_01_to_11/payments_test.go
+++ /dev/null
@@ -1,108 +0,0 @@
-package migration_01_to_11
-
-import (
- "bytes"
- "math/rand"
- "time"
-
- "github.com/pkt-cash/pktd/btcec"
- "github.com/pkt-cash/pktd/btcutil/er"
- "github.com/pkt-cash/pktd/lnd/lnwire"
-)
-
-var (
- priv, _ = btcec.NewPrivateKey(btcec.S256())
- pub = priv.PubKey()
-)
-
-func makeFakePayment() *outgoingPayment {
- fakeInvoice := &Invoice{
- // Use single second precision to avoid false positive test
- // failures due to the monotonic time component.
- CreationDate: time.Unix(time.Now().Unix(), 0),
- Memo: []byte("fake memo"),
- Receipt: []byte("fake receipt"),
- PaymentRequest: []byte(""),
- }
-
- copy(fakeInvoice.Terms.PaymentPreimage[:], rev[:])
- fakeInvoice.Terms.Value = lnwire.NewMSatFromSatoshis(10000)
-
- fakePath := make([][33]byte, 3)
- for i := 0; i < 3; i++ {
- copy(fakePath[i][:], bytes.Repeat([]byte{byte(i)}, 33))
- }
-
- fakePayment := &outgoingPayment{
- Invoice: *fakeInvoice,
- Fee: 101,
- Path: fakePath,
- TimeLockLength: 1000,
- }
- copy(fakePayment.PaymentPreimage[:], rev[:])
- return fakePayment
-}
-
-// randomBytes creates random []byte with length in range [minLen, maxLen)
-func randomBytes(minLen, maxLen int) ([]byte, er.R) {
- randBuf := make([]byte, minLen+rand.Intn(maxLen-minLen))
-
- if _, err := rand.Read(randBuf); err != nil {
- return nil, er.Errorf("Internal error. "+
- "Cannot generate random string: %v", err)
- }
-
- return randBuf, nil
-}
-
-func makeRandomFakePayment() (*outgoingPayment, er.R) {
- var err er.R
- fakeInvoice := &Invoice{
- // Use single second precision to avoid false positive test
- // failures due to the monotonic time component.
- CreationDate: time.Unix(time.Now().Unix(), 0),
- }
-
- fakeInvoice.Memo, err = randomBytes(1, 50)
- if err != nil {
- return nil, err
- }
-
- fakeInvoice.Receipt, err = randomBytes(1, 50)
- if err != nil {
- return nil, err
- }
-
- fakeInvoice.PaymentRequest, err = randomBytes(1, 50)
- if err != nil {
- return nil, err
- }
-
- preImg, err := randomBytes(32, 33)
- if err != nil {
- return nil, err
- }
- copy(fakeInvoice.Terms.PaymentPreimage[:], preImg)
-
- fakeInvoice.Terms.Value = lnwire.MilliSatoshi(rand.Intn(10000))
-
- fakePathLen := 1 + rand.Intn(5)
- fakePath := make([][33]byte, fakePathLen)
- for i := 0; i < fakePathLen; i++ {
- b, err := randomBytes(33, 34)
- if err != nil {
- return nil, err
- }
- copy(fakePath[i][:], b)
- }
-
- fakePayment := &outgoingPayment{
- Invoice: *fakeInvoice,
- Fee: lnwire.MilliSatoshi(rand.Intn(1001)),
- Path: fakePath,
- TimeLockLength: uint32(rand.Intn(10000)),
- }
- copy(fakePayment.PaymentPreimage[:], fakeInvoice.Terms.PaymentPreimage[:])
-
- return fakePayment, nil
-}
diff --git a/lnd/channeldb/migration_01_to_11/route.go b/lnd/channeldb/migration_01_to_11/route.go
deleted file mode 100644
index 58253d19..00000000
--- a/lnd/channeldb/migration_01_to_11/route.go
+++ /dev/null
@@ -1,331 +0,0 @@
-package migration_01_to_11
-
-import (
- "bytes"
- "encoding/binary"
- "fmt"
- "io"
- "strconv"
- "strings"
-
- "github.com/pkt-cash/pktd/btcec"
- "github.com/pkt-cash/pktd/btcutil/er"
- "github.com/pkt-cash/pktd/btcutil/util"
- sphinx "github.com/pkt-cash/pktd/lightning-onion"
- "github.com/pkt-cash/pktd/lnd/lnwire"
- "github.com/pkt-cash/pktd/lnd/record"
- "github.com/pkt-cash/pktd/lnd/tlv"
-)
-
-// VertexSize is the size of the array to store a vertex.
-const VertexSize = 33
-
-// ErrNoRouteHopsProvided is returned when a caller attempts to construct a new
-// sphinx packet, but provides an empty set of hops for each route.
-var ErrNoRouteHopsProvided = Err.CodeWithDetail("ErrNoRouteHopsProvided", "empty route hops provided")
-
-// Vertex is a simple alias for the serialization of a compressed Bitcoin
-// public key.
-type Vertex [VertexSize]byte
-
-// NewVertex returns a new Vertex given a public key.
-func NewVertex(pub *btcec.PublicKey) Vertex {
- var v Vertex
- copy(v[:], pub.SerializeCompressed())
- return v
-}
-
-// NewVertexFromBytes returns a new Vertex based on a serialized pubkey in a
-// byte slice.
-func NewVertexFromBytes(b []byte) (Vertex, er.R) {
- vertexLen := len(b)
- if vertexLen != VertexSize {
- return Vertex{}, er.Errorf("invalid vertex length of %v, "+
- "want %v", vertexLen, VertexSize)
- }
-
- var v Vertex
- copy(v[:], b)
- return v, nil
-}
-
-// NewVertexFromStr returns a new Vertex given its hex-encoded string format.
-func NewVertexFromStr(v string) (Vertex, er.R) {
- // Return error if hex string is of incorrect length.
- if len(v) != VertexSize*2 {
- return Vertex{}, er.Errorf("invalid vertex string length of "+
- "%v, want %v", len(v), VertexSize*2)
- }
-
- vertex, err := util.DecodeHex(v)
- if err != nil {
- return Vertex{}, err
- }
-
- return NewVertexFromBytes(vertex)
-}
-
-// String returns a human readable version of the Vertex which is the
-// hex-encoding of the serialized compressed public key.
-func (v Vertex) String() string {
- return fmt.Sprintf("%x", v[:])
-}
-
-// Hop represents an intermediate or final node of the route. This naming
-// is in line with the definition given in BOLT #4: Onion Routing Protocol.
-// The struct houses the channel along which this hop can be reached and
-// the values necessary to create the HTLC that needs to be sent to the
-// next hop. It is also used to encode the per-hop payload included within
-// the Sphinx packet.
-type Hop struct {
- // PubKeyBytes is the raw bytes of the public key of the target node.
- PubKeyBytes Vertex
-
- // ChannelID is the unique channel ID for the channel. The first 3
- // bytes are the block height, the next 3 the index within the block,
- // and the last 2 bytes are the output index for the channel.
- ChannelID uint64
-
- // OutgoingTimeLock is the timelock value that should be used when
- // crafting the _outgoing_ HTLC from this hop.
- OutgoingTimeLock uint32
-
- // AmtToForward is the amount that this hop will forward to the next
- // hop. This value is less than the value that the incoming HTLC
- // carries as a fee will be subtracted by the hop.
- AmtToForward lnwire.MilliSatoshi
-
- // TLVRecords if non-nil are a set of additional TLV records that
- // should be included in the forwarding instructions for this node.
- TLVRecords []tlv.Record
-
- // LegacyPayload if true, then this signals that this node doesn't
- // understand the new TLV payload, so we must instead use the legacy
- // payload.
- LegacyPayload bool
-}
-
-// PackHopPayload writes to the passed io.Writer, the series of byes that can
-// be placed directly into the per-hop payload (EOB) for this hop. This will
-// include the required routing fields, as well as serializing any of the
-// passed optional TLVRecords. nextChanID is the unique channel ID that
-// references the _outgoing_ channel ID that follows this hop. This field
-// follows the same semantics as the NextAddress field in the onion: it should
-// be set to zero to indicate the terminal hop.
-func (h *Hop) PackHopPayload(w io.Writer, nextChanID uint64) er.R {
- // If this is a legacy payload, then we'll exit here as this method
- // shouldn't be called.
- if h.LegacyPayload == true {
- return er.Errorf("cannot pack hop payloads for legacy " +
- "payloads")
- }
-
- // Otherwise, we'll need to make a new stream that includes our
- // required routing fields, as well as these optional values.
- var records []tlv.Record
-
- // Every hop must have an amount to forward and CLTV expiry.
- amt := uint64(h.AmtToForward)
- records = append(records,
- record.NewAmtToFwdRecord(&amt),
- record.NewLockTimeRecord(&h.OutgoingTimeLock),
- )
-
- // BOLT 04 says the next_hop_id should be omitted for the final hop,
- // but present for all others.
- //
- // TODO(conner): test using hop.Exit once available
- if nextChanID != 0 {
- records = append(records,
- record.NewNextHopIDRecord(&nextChanID),
- )
- }
-
- // Append any custom types destined for this hop.
- records = append(records, h.TLVRecords...)
-
- // To ensure we produce a canonical stream, we'll sort the records
- // before encoding them as a stream in the hop payload.
- tlv.SortRecords(records)
-
- tlvStream, err := tlv.NewStream(records...)
- if err != nil {
- return err
- }
-
- return tlvStream.Encode(w)
-}
-
-// Route represents a path through the channel graph which runs over one or
-// more channels in succession. This struct carries all the information
-// required to craft the Sphinx onion packet, and send the payment along the
-// first hop in the path. A route is only selected as valid if all the channels
-// have sufficient capacity to carry the initial payment amount after fees are
-// accounted for.
-type Route struct {
- // TotalTimeLock is the cumulative (final) time lock across the entire
- // route. This is the CLTV value that should be extended to the first
- // hop in the route. All other hops will decrement the time-lock as
- // advertised, leaving enough time for all hops to wait for or present
- // the payment preimage to complete the payment.
- TotalTimeLock uint32
-
- // TotalAmount is the total amount of funds required to complete a
- // payment over this route. This value includes the cumulative fees at
- // each hop. As a result, the HTLC extended to the first-hop in the
- // route will need to have at least this many satoshis, otherwise the
- // route will fail at an intermediate node due to an insufficient
- // amount of fees.
- TotalAmount lnwire.MilliSatoshi
-
- // SourcePubKey is the pubkey of the node where this route originates
- // from.
- SourcePubKey Vertex
-
- // Hops contains details concerning the specific forwarding details at
- // each hop.
- Hops []*Hop
-}
-
-// HopFee returns the fee charged by the route hop indicated by hopIndex.
-func (r *Route) HopFee(hopIndex int) lnwire.MilliSatoshi {
- var incomingAmt lnwire.MilliSatoshi
- if hopIndex == 0 {
- incomingAmt = r.TotalAmount
- } else {
- incomingAmt = r.Hops[hopIndex-1].AmtToForward
- }
-
- // Fee is calculated as difference between incoming and outgoing amount.
- return incomingAmt - r.Hops[hopIndex].AmtToForward
-}
-
-// TotalFees is the sum of the fees paid at each hop within the final route. In
-// the case of a one-hop payment, this value will be zero as we don't need to
-// pay a fee to ourself.
-func (r *Route) TotalFees() lnwire.MilliSatoshi {
- if len(r.Hops) == 0 {
- return 0
- }
-
- return r.TotalAmount - r.Hops[len(r.Hops)-1].AmtToForward
-}
-
-// NewRouteFromHops creates a new Route structure from the minimally required
-// information to perform the payment. It infers fee amounts and populates the
-// node, chan and prev/next hop maps.
-func NewRouteFromHops(amtToSend lnwire.MilliSatoshi, timeLock uint32,
- sourceVertex Vertex, hops []*Hop) (*Route, er.R) {
-
- if len(hops) == 0 {
- return nil, ErrNoRouteHopsProvided.Default()
- }
-
- // First, we'll create a route struct and populate it with the fields
- // for which the values are provided as arguments of this function.
- // TotalFees is determined based on the difference between the amount
- // that is send from the source and the final amount that is received
- // by the destination.
- route := &Route{
- SourcePubKey: sourceVertex,
- Hops: hops,
- TotalTimeLock: timeLock,
- TotalAmount: amtToSend,
- }
-
- return route, nil
-}
-
-// ToSphinxPath converts a complete route into a sphinx PaymentPath that
-// contains the per-hop paylods used to encoding the HTLC routing data for each
-// hop in the route. This method also accepts an optional EOB payload for the
-// final hop.
-func (r *Route) ToSphinxPath() (*sphinx.PaymentPath, er.R) {
- var path sphinx.PaymentPath
-
- // For each hop encoded within the route, we'll convert the hop struct
- // to an OnionHop with matching per-hop payload within the path as used
- // by the sphinx package.
- for i, hop := range r.Hops {
- pub, err := btcec.ParsePubKey(
- hop.PubKeyBytes[:], btcec.S256(),
- )
- if err != nil {
- return nil, err
- }
-
- // As a base case, the next hop is set to all zeroes in order
- // to indicate that the "last hop" as no further hops after it.
- nextHop := uint64(0)
-
- // If we aren't on the last hop, then we set the "next address"
- // field to be the channel that directly follows it.
- if i != len(r.Hops)-1 {
- nextHop = r.Hops[i+1].ChannelID
- }
-
- var payload sphinx.HopPayload
-
- // If this is the legacy payload, then we can just include the
- // hop data as normal.
- if hop.LegacyPayload {
- // Before we encode this value, we'll pack the next hop
- // into the NextAddress field of the hop info to ensure
- // we point to the right now.
- hopData := sphinx.HopData{
- ForwardAmount: uint64(hop.AmtToForward),
- OutgoingCltv: hop.OutgoingTimeLock,
- }
- binary.BigEndian.PutUint64(
- hopData.NextAddress[:], nextHop,
- )
-
- payload, err = sphinx.NewHopPayload(&hopData, nil)
- if err != nil {
- return nil, err
- }
- } else {
- // For non-legacy payloads, we'll need to pack the
- // routing information, along with any extra TLV
- // information into the new per-hop payload format.
- // We'll also pass in the chan ID of the hop this
- // channel should be forwarded to so we can construct a
- // valid payload.
- var b bytes.Buffer
- err := hop.PackHopPayload(&b, nextHop)
- if err != nil {
- return nil, err
- }
-
- // TODO(roasbeef): make better API for NewHopPayload?
- payload, err = sphinx.NewHopPayload(nil, b.Bytes())
- if err != nil {
- return nil, err
- }
- }
-
- path[i] = sphinx.OnionHop{
- NodePub: *pub,
- HopPayload: payload,
- }
- }
-
- return &path, nil
-}
-
-// String returns a human readable representation of the route.
-func (r *Route) String() string {
- var b strings.Builder
-
- for i, hop := range r.Hops {
- if i > 0 {
- b.WriteString(",")
- }
- b.WriteString(strconv.FormatUint(hop.ChannelID, 10))
- }
-
- return fmt.Sprintf("amt=%v, fees=%v, tl=%v, chans=%v",
- r.TotalAmount-r.TotalFees(), r.TotalFees(), r.TotalTimeLock,
- b.String(),
- )
-}
diff --git a/lnd/channeldb/migration_01_to_11/zpay32/amountunits.go b/lnd/channeldb/migration_01_to_11/zpay32/amountunits.go
deleted file mode 100644
index 2e1caea0..00000000
--- a/lnd/channeldb/migration_01_to_11/zpay32/amountunits.go
+++ /dev/null
@@ -1,158 +0,0 @@
-package zpay32
-
-import (
- "strconv"
-
- "github.com/pkt-cash/pktd/btcutil/er"
- "github.com/pkt-cash/pktd/lnd/lnwire"
-)
-
-var (
- // toMSat is a map from a unit to a function that converts an amount
- // of that unit to millisatoshis.
- toMSat = map[byte]func(uint64) (lnwire.MilliSatoshi, er.R){
- 'm': mBtcToMSat,
- 'u': uBtcToMSat,
- 'n': nBtcToMSat,
- 'p': pBtcToMSat,
- }
-
- // fromMSat is a map from a unit to a function that converts an amount
- // in millisatoshis to an amount of that unit.
- fromMSat = map[byte]func(lnwire.MilliSatoshi) (uint64, er.R){
- 'm': mSatToMBtc,
- 'u': mSatToUBtc,
- 'n': mSatToNBtc,
- 'p': mSatToPBtc,
- }
-)
-
-// mBtcToMSat converts the given amount in milliBTC to millisatoshis.
-func mBtcToMSat(m uint64) (lnwire.MilliSatoshi, er.R) {
- return lnwire.MilliSatoshi(m) * 100000000, nil
-}
-
-// uBtcToMSat converts the given amount in microBTC to millisatoshis.
-func uBtcToMSat(u uint64) (lnwire.MilliSatoshi, er.R) {
- return lnwire.MilliSatoshi(u * 100000), nil
-}
-
-// nBtcToMSat converts the given amount in nanoBTC to millisatoshis.
-func nBtcToMSat(n uint64) (lnwire.MilliSatoshi, er.R) {
- return lnwire.MilliSatoshi(n * 100), nil
-}
-
-// pBtcToMSat converts the given amount in picoBTC to millisatoshis.
-func pBtcToMSat(p uint64) (lnwire.MilliSatoshi, er.R) {
- if p < 10 {
- return 0, er.Errorf("minimum amount is 10p")
- }
- if p%10 != 0 {
- return 0, er.Errorf("amount %d pBTC not expressible in msat",
- p)
- }
- return lnwire.MilliSatoshi(p / 10), nil
-}
-
-// mSatToMBtc converts the given amount in millisatoshis to milliBTC.
-func mSatToMBtc(msat lnwire.MilliSatoshi) (uint64, er.R) {
- if msat%100000000 != 0 {
- return 0, er.Errorf("%d msat not expressible "+
- "in mBTC", msat)
- }
- return uint64(msat / 100000000), nil
-}
-
-// mSatToUBtc converts the given amount in millisatoshis to microBTC.
-func mSatToUBtc(msat lnwire.MilliSatoshi) (uint64, er.R) {
- if msat%100000 != 0 {
- return 0, er.Errorf("%d msat not expressible "+
- "in uBTC", msat)
- }
- return uint64(msat / 100000), nil
-}
-
-// mSatToNBtc converts the given amount in millisatoshis to nanoBTC.
-func mSatToNBtc(msat lnwire.MilliSatoshi) (uint64, er.R) {
- if msat%100 != 0 {
- return 0, er.Errorf("%d msat not expressible in nBTC", msat)
- }
- return uint64(msat / 100), nil
-}
-
-// mSatToPBtc converts the given amount in millisatoshis to picoBTC.
-func mSatToPBtc(msat lnwire.MilliSatoshi) (uint64, er.R) {
- return uint64(msat * 10), nil
-}
-
-// decodeAmount returns the amount encoded by the provided string in
-// millisatoshi.
-func decodeAmount(amount string) (lnwire.MilliSatoshi, er.R) {
- if len(amount) < 1 {
- return 0, er.Errorf("amount must be non-empty")
- }
-
- // If last character is a digit, then the amount can just be
- // interpreted as BTC.
- char := amount[len(amount)-1]
- digit := char - '0'
- if digit >= 0 && digit <= 9 {
- btc, err := strconv.ParseUint(amount, 10, 64)
- if err != nil {
- return 0, er.E(err)
- }
- return lnwire.MilliSatoshi(btc) * mSatPerBtc, nil
- }
-
- // If not a digit, it must be part of the known units.
- conv, ok := toMSat[char]
- if !ok {
- return 0, er.Errorf("unknown multiplier %c", char)
- }
-
- // Known unit.
- num := amount[:len(amount)-1]
- if len(num) < 1 {
- return 0, er.Errorf("number must be non-empty")
- }
-
- am, err := strconv.ParseUint(num, 10, 64)
- if err != nil {
- return 0, er.E(err)
- }
-
- return conv(am)
-}
-
-// encodeAmount encodes the provided millisatoshi amount using as few characters
-// as possible.
-func encodeAmount(msat lnwire.MilliSatoshi) (string, er.R) {
- // If possible to express in BTC, that will always be the shortest
- // representation.
- if msat%mSatPerBtc == 0 {
- return strconv.FormatInt(int64(msat/mSatPerBtc), 10), nil
- }
-
- // Should always be expressible in pico BTC.
- pico, err := fromMSat['p'](msat)
- if err != nil {
- return "", er.Errorf("unable to express %d msat as pBTC: %v",
- msat, err)
- }
- shortened := strconv.FormatUint(pico, 10) + "p"
- for unit, conv := range fromMSat {
- am, err := conv(msat)
- if err != nil {
- // Not expressible using this unit.
- continue
- }
-
- // Save the shortest found representation.
- str := strconv.FormatUint(am, 10) + string(unit)
- if len(str) < len(shortened) {
- shortened = str
- }
- }
-
- return shortened, nil
-}
diff --git a/lnd/channeldb/migration_01_to_11/zpay32/bech32.go b/lnd/channeldb/migration_01_to_11/zpay32/bech32.go
deleted file mode 100644
index 209a8423..00000000
--- a/lnd/channeldb/migration_01_to_11/zpay32/bech32.go
+++ /dev/null
@@ -1,170 +0,0 @@
-package zpay32
-
-import (
- "fmt"
- "strings"
-
- "github.com/pkt-cash/pktd/btcutil/er"
-)
-
-const charset = "qpzry9x8gf2tvdw0s3jn54khce6mua7l"
-
-var gen = []int{0x3b6a57b2, 0x26508e6d, 0x1ea119fa, 0x3d4233dd, 0x2a1462b3}
-
-// NOTE: This method it a slight modification of the method bech32.Decode found
-// btcutil, allowing strings to be more than 90 characters.
-
-// decodeBech32 decodes a bech32 encoded string, returning the human-readable
-// part and the data part excluding the checksum.
-// Note: the data will be base32 encoded, that is each element of the returned
-// byte array will encode 5 bits of data. Use the ConvertBits method to convert
-// this to 8-bit representation.
-func decodeBech32(bech string) (string, []byte, er.R) {
- // The maximum allowed length for a bech32 string is 90. It must also
- // be at least 8 characters, since it needs a non-empty HRP, a
- // separator, and a 6 character checksum.
- // NB: The 90 character check specified in BIP173 is skipped here, to
- // allow strings longer than 90 characters.
- if len(bech) < 8 {
- return "", nil, er.Errorf("invalid bech32 string length %d",
- len(bech))
- }
- // Only ASCII characters between 33 and 126 are allowed.
- for i := 0; i < len(bech); i++ {
- if bech[i] < 33 || bech[i] > 126 {
- return "", nil, er.Errorf("invalid character in "+
- "string: '%c'", bech[i])
- }
- }
-
- // The characters must be either all lowercase or all uppercase.
- lower := strings.ToLower(bech)
- upper := strings.ToUpper(bech)
- if bech != lower && bech != upper {
- return "", nil, er.Errorf("string not all lowercase or all " +
- "uppercase")
- }
-
- // We'll work with the lowercase string from now on.
- bech = lower
-
- // The string is invalid if the last '1' is non-existent, it is the
- // first character of the string (no human-readable part) or one of the
- // last 6 characters of the string (since checksum cannot contain '1'),
- // or if the string is more than 90 characters in total.
- one := strings.LastIndexByte(bech, '1')
- if one < 1 || one+7 > len(bech) {
- return "", nil, er.Errorf("invalid index of 1")
- }
-
- // The human-readable part is everything before the last '1'.
- hrp := bech[:one]
- data := bech[one+1:]
-
- // Each character corresponds to the byte with value of the index in
- // 'charset'.
- decoded, err := toBytes(data)
- if err != nil {
- return "", nil, er.Errorf("failed converting data to bytes: "+
- "%v", err)
- }
-
- if !bech32VerifyChecksum(hrp, decoded) {
- moreInfo := ""
- checksum := bech[len(bech)-6:]
- expected, err := toChars(bech32Checksum(hrp,
- decoded[:len(decoded)-6]))
- if err == nil {
- moreInfo = fmt.Sprintf("Expected %v, got %v.",
- expected, checksum)
- }
- return "", nil, er.Errorf("checksum failed. " + moreInfo)
- }
-
- // We exclude the last 6 bytes, which is the checksum.
- return hrp, decoded[:len(decoded)-6], nil
-}
-
-// toBytes converts each character in the string 'chars' to the value of the
-// index of the corresponding character in 'charset'.
-func toBytes(chars string) ([]byte, er.R) {
- decoded := make([]byte, 0, len(chars))
- for i := 0; i < len(chars); i++ {
- index := strings.IndexByte(charset, chars[i])
- if index < 0 {
- return nil, er.Errorf("invalid character not part of "+
- "charset: %v", chars[i])
- }
- decoded = append(decoded, byte(index))
- }
- return decoded, nil
-}
-
-// toChars converts the byte slice 'data' to a string where each byte in 'data'
-// encodes the index of a character in 'charset'.
-func toChars(data []byte) (string, er.R) {
- result := make([]byte, 0, len(data))
- for _, b := range data {
- if int(b) >= len(charset) {
- return "", er.Errorf("invalid data byte: %v", b)
- }
- result = append(result, charset[b])
- }
- return string(result), nil
-}
-
-// For more details on the checksum calculation, please refer to BIP 173.
-func bech32Checksum(hrp string, data []byte) []byte {
- // Convert the bytes to list of integers, as this is needed for the
- // checksum calculation.
- integers := make([]int, len(data))
- for i, b := range data {
- integers[i] = int(b)
- }
- values := append(bech32HrpExpand(hrp), integers...)
- values = append(values, []int{0, 0, 0, 0, 0, 0}...)
- polymod := bech32Polymod(values) ^ 1
- var res []byte
- for i := 0; i < 6; i++ {
- res = append(res, byte((polymod>>uint(5*(5-i)))&31))
- }
- return res
-}
-
-// For more details on the polymod calculation, please refer to BIP 173.
-func bech32Polymod(values []int) int {
- chk := 1
- for _, v := range values {
- b := chk >> 25
- chk = (chk&0x1ffffff)<<5 ^ v
- for i := 0; i < 5; i++ {
- if (b>>uint(i))&1 == 1 {
- chk ^= gen[i]
- }
- }
- }
- return chk
-}
-
-// For more details on HRP expansion, please refer to BIP 173.
-func bech32HrpExpand(hrp string) []int {
- v := make([]int, 0, len(hrp)*2+1)
- for i := 0; i < len(hrp); i++ {
- v = append(v, int(hrp[i]>>5))
- }
- v = append(v, 0)
- for i := 0; i < len(hrp); i++ {
- v = append(v, int(hrp[i]&31))
- }
- return v
-}
-
-// For more details on the checksum verification, please refer to BIP 173.
-func bech32VerifyChecksum(hrp string, data []byte) bool {
- integers := make([]int, len(data))
- for i, b := range data {
- integers[i] = int(b)
- }
- concat := append(bech32HrpExpand(hrp), integers...)
- return bech32Polymod(concat) == 1
-}
diff --git a/lnd/channeldb/migration_01_to_11/zpay32/decode.go b/lnd/channeldb/migration_01_to_11/zpay32/decode.go
deleted file mode 100644
index 56627098..00000000
--- a/lnd/channeldb/migration_01_to_11/zpay32/decode.go
+++ /dev/null
@@ -1,496 +0,0 @@
-package zpay32
-
-import (
- "bytes"
- "encoding/binary"
- "strings"
- "time"
-
- "github.com/pkt-cash/pktd/btcec"
- "github.com/pkt-cash/pktd/btcutil"
- "github.com/pkt-cash/pktd/btcutil/bech32"
- "github.com/pkt-cash/pktd/btcutil/er"
- "github.com/pkt-cash/pktd/chaincfg"
- "github.com/pkt-cash/pktd/chaincfg/chainhash"
- "github.com/pkt-cash/pktd/lnd/lnwire"
-)
-
-// Decode parses the provided encoded invoice and returns a decoded Invoice if
-// it is valid by BOLT-0011 and matches the provided active network.
-func Decode(invoice string, net *chaincfg.Params) (*Invoice, er.R) {
- decodedInvoice := Invoice{}
-
- // Before bech32 decoding the invoice, make sure that it is not too large.
- // This is done as an anti-DoS measure since bech32 decoding is expensive.
- if len(invoice) > maxInvoiceLength {
- return nil, ErrInvoiceTooLarge.Default()
- }
-
- // Decode the invoice using the modified bech32 decoder.
- hrp, data, err := decodeBech32(invoice)
- if err != nil {
- return nil, err
- }
-
- // We expect the human-readable part to at least have ln + one char
- // encoding the network.
- if len(hrp) < 3 {
- return nil, er.Errorf("hrp too short")
- }
-
- // First two characters of HRP should be "ln".
- if hrp[:2] != "ln" {
- return nil, er.Errorf("prefix should be \"ln\"")
- }
-
- // The next characters should be a valid prefix for a segwit BIP173
- // address that match the active network.
- if !strings.HasPrefix(hrp[2:], net.Bech32HRPSegwit) {
- return nil, er.Errorf(
- "invoice not for current active network '%s'", net.Name)
- }
- decodedInvoice.Net = net
-
- // Optionally, if there's anything left of the HRP after ln + the segwit
- // prefix, we try to decode this as the payment amount.
- var netPrefixLength = len(net.Bech32HRPSegwit) + 2
- if len(hrp) > netPrefixLength {
- amount, err := decodeAmount(hrp[netPrefixLength:])
- if err != nil {
- return nil, err
- }
- decodedInvoice.MilliSat = &amount
- }
-
- // Everything except the last 520 bits of the data encodes the invoice's
- // timestamp and tagged fields.
- if len(data) < signatureBase32Len {
- return nil, er.New("short invoice")
- }
- invoiceData := data[:len(data)-signatureBase32Len]
-
- // Parse the timestamp and tagged fields, and fill the Invoice struct.
- if err := parseData(&decodedInvoice, invoiceData, net); err != nil {
- return nil, err
- }
-
- // The last 520 bits (104 groups) make up the signature.
- sigBase32 := data[len(data)-signatureBase32Len:]
- sigBase256, err := bech32.ConvertBits(sigBase32, 5, 8, true)
- if err != nil {
- return nil, err
- }
- var sig lnwire.Sig
- copy(sig[:], sigBase256[:64])
- recoveryID := sigBase256[64]
-
- // The signature is over the hrp + the data the invoice, encoded in
- // base 256.
- taggedDataBytes, err := bech32.ConvertBits(invoiceData, 5, 8, true)
- if err != nil {
- return nil, err
- }
-
- toSign := append([]byte(hrp), taggedDataBytes...)
-
- // We expect the signature to be over the single SHA-256 hash of that
- // data.
- hash := chainhash.HashB(toSign)
-
- // If the destination pubkey was provided as a tagged field, use that
- // to verify the signature, if not do public key recovery.
- if decodedInvoice.Destination != nil {
- signature, err := sig.ToSignature()
- if err != nil {
- return nil, er.Errorf("unable to deserialize "+
- "signature: %v", err)
- }
- if !signature.Verify(hash, decodedInvoice.Destination) {
- return nil, er.Errorf("invalid invoice signature")
- }
- } else {
- headerByte := recoveryID + 27 + 4
- compactSign := append([]byte{headerByte}, sig[:]...)
- pubkey, _, err := btcec.RecoverCompact(btcec.S256(),
- compactSign, hash)
- if err != nil {
- return nil, err
- }
- decodedInvoice.Destination = pubkey
- }
-
- // If no feature vector was decoded, populate an empty one.
- if decodedInvoice.Features == nil {
- decodedInvoice.Features = lnwire.NewFeatureVector(
- nil, lnwire.Features,
- )
- }
-
- // Now that we have created the invoice, make sure it has the required
- // fields set.
- if err := validateInvoice(&decodedInvoice); err != nil {
- return nil, err
- }
-
- return &decodedInvoice, nil
-}
-
-// parseData parses the data part of the invoice. It expects base32 data
-// returned from the bech32.Decode method, except signature.
-func parseData(invoice *Invoice, data []byte, net *chaincfg.Params) er.R {
- // It must contain the timestamp, encoded using 35 bits (7 groups).
- if len(data) < timestampBase32Len {
- return er.Errorf("data too short: %d", len(data))
- }
-
- t, err := parseTimestamp(data[:timestampBase32Len])
- if err != nil {
- return err
- }
- invoice.Timestamp = time.Unix(int64(t), 0)
-
- // The rest are tagged parts.
- tagData := data[7:]
- return parseTaggedFields(invoice, tagData, net)
-}
-
-// parseTimestamp converts a 35-bit timestamp (encoded in base32) to uint64.
-func parseTimestamp(data []byte) (uint64, er.R) {
- if len(data) != timestampBase32Len {
- return 0, er.Errorf("timestamp must be 35 bits, was %d",
- len(data)*5)
- }
-
- return base32ToUint64(data)
-}
-
-// parseTaggedFields takes the base32 encoded tagged fields of the invoice, and
-// fills the Invoice struct accordingly.
-func parseTaggedFields(invoice *Invoice, fields []byte, net *chaincfg.Params) er.R {
- index := 0
- for len(fields)-index > 0 {
- // If there are less than 3 groups to read, there cannot be more
- // interesting information, as we need the type (1 group) and
- // length (2 groups).
- //
- // This means the last tagged field is broken.
- if len(fields)-index < 3 {
- return ErrBrokenTaggedField.Default()
- }
-
- typ := fields[index]
- dataLength, err := parseFieldDataLength(fields[index+1 : index+3])
- if err != nil {
- return err
- }
-
- // If we don't have enough field data left to read this length,
- // return error.
- if len(fields) < index+3+int(dataLength) {
- return ErrInvalidFieldLength.Default()
- }
- base32Data := fields[index+3 : index+3+int(dataLength)]
-
- // Advance the index in preparation for the next iteration.
- index += 3 + int(dataLength)
-
- switch typ {
- case fieldTypeP:
- if invoice.PaymentHash != nil {
- // We skip the field if we have already seen a
- // supported one.
- continue
- }
-
- invoice.PaymentHash, err = parse32Bytes(base32Data)
- case fieldTypeS:
- if invoice.PaymentAddr != nil {
- // We skip the field if we have already seen a
- // supported one.
- continue
- }
-
- invoice.PaymentAddr, err = parse32Bytes(base32Data)
- case fieldTypeD:
- if invoice.Description != nil {
- // We skip the field if we have already seen a
- // supported one.
- continue
- }
-
- invoice.Description, err = parseDescription(base32Data)
- case fieldTypeN:
- if invoice.Destination != nil {
- // We skip the field if we have already seen a
- // supported one.
- continue
- }
-
- invoice.Destination, err = parseDestination(base32Data)
- case fieldTypeH:
- if invoice.DescriptionHash != nil {
- // We skip the field if we have already seen a
- // supported one.
- continue
- }
-
- invoice.DescriptionHash, err = parse32Bytes(base32Data)
- case fieldTypeX:
- if invoice.expiry != nil {
- // We skip the field if we have already seen a
- // supported one.
- continue
- }
-
- invoice.expiry, err = parseExpiry(base32Data)
- case fieldTypeC:
- if invoice.minFinalCLTVExpiry != nil {
- // We skip the field if we have already seen a
- // supported one.
- continue
- }
-
- invoice.minFinalCLTVExpiry, err = parseMinFinalCLTVExpiry(base32Data)
- case fieldTypeF:
- if invoice.FallbackAddr != nil {
- // We skip the field if we have already seen a
- // supported one.
- continue
- }
-
- invoice.FallbackAddr, err = parseFallbackAddr(base32Data, net)
- case fieldTypeR:
- // An `r` field can be included in an invoice multiple
- // times, so we won't skip it if we have already seen
- // one.
- routeHint, err := parseRouteHint(base32Data)
- if err != nil {
- return err
- }
-
- invoice.RouteHints = append(invoice.RouteHints, routeHint)
- case fieldType9:
- if invoice.Features != nil {
- // We skip the field if we have already seen a
- // supported one.
- continue
- }
-
- invoice.Features, err = parseFeatures(base32Data)
- default:
- // Ignore unknown type.
- }
-
- // Check if there was an error from parsing any of the tagged
- // fields and return it.
- if err != nil {
- return err
- }
- }
-
- return nil
-}
-
-// parseFieldDataLength converts the two byte slice into a uint16.
-func parseFieldDataLength(data []byte) (uint16, er.R) {
- if len(data) != 2 {
- return 0, er.Errorf("data length must be 2 bytes, was %d",
- len(data))
- }
-
- return uint16(data[0])<<5 | uint16(data[1]), nil
-}
-
-// parse32Bytes converts a 256-bit value (encoded in base32) to *[32]byte. This
-// can be used for payment hashes, description hashes, payment addresses, etc.
-func parse32Bytes(data []byte) (*[32]byte, er.R) {
- var paymentHash [32]byte
-
- // As BOLT-11 states, a reader must skip over the 32-byte fields if
- // it does not have a length of 52, so avoid returning an error.
- if len(data) != hashBase32Len {
- return nil, nil
- }
-
- hash, err := bech32.ConvertBits(data, 5, 8, false)
- if err != nil {
- return nil, err
- }
-
- copy(paymentHash[:], hash)
-
- return &paymentHash, nil
-}
-
-// parseDescription converts the data (encoded in base32) into a string to use
-// as the description.
-func parseDescription(data []byte) (*string, er.R) {
- base256Data, err := bech32.ConvertBits(data, 5, 8, false)
- if err != nil {
- return nil, err
- }
-
- description := string(base256Data)
-
- return &description, nil
-}
-
-// parseDestination converts the data (encoded in base32) into a 33-byte public
-// key of the payee node.
-func parseDestination(data []byte) (*btcec.PublicKey, er.R) {
- // As BOLT-11 states, a reader must skip over the destination field
- // if it does not have a length of 53, so avoid returning an error.
- if len(data) != pubKeyBase32Len {
- return nil, nil
- }
-
- base256Data, err := bech32.ConvertBits(data, 5, 8, false)
- if err != nil {
- return nil, err
- }
-
- return btcec.ParsePubKey(base256Data, btcec.S256())
-}
-
-// parseExpiry converts the data (encoded in base32) into the expiry time.
-func parseExpiry(data []byte) (*time.Duration, er.R) {
- expiry, err := base32ToUint64(data)
- if err != nil {
- return nil, err
- }
-
- duration := time.Duration(expiry) * time.Second
-
- return &duration, nil
-}
-
-// parseMinFinalCLTVExpiry converts the data (encoded in base32) into a uint64
-// to use as the minFinalCLTVExpiry.
-func parseMinFinalCLTVExpiry(data []byte) (*uint64, er.R) {
- expiry, err := base32ToUint64(data)
- if err != nil {
- return nil, err
- }
-
- return &expiry, nil
-}
-
-// parseFallbackAddr converts the data (encoded in base32) into a fallback
-// on-chain address.
-func parseFallbackAddr(data []byte, net *chaincfg.Params) (btcutil.Address, er.R) {
- // Checks if the data is empty or contains a version without an address.
- if len(data) < 2 {
- return nil, er.Errorf("empty fallback address field")
- }
-
- var addr btcutil.Address
-
- version := data[0]
- switch version {
- case 0:
- witness, err := bech32.ConvertBits(data[1:], 5, 8, false)
- if err != nil {
- return nil, err
- }
-
- switch len(witness) {
- case 20:
- addr, err = btcutil.NewAddressWitnessPubKeyHash(witness, net)
- case 32:
- addr, err = btcutil.NewAddressWitnessScriptHash(witness, net)
- default:
- return nil, er.Errorf("unknown witness program length %d",
- len(witness))
- }
-
- if err != nil {
- return nil, err
- }
- case 17:
- pubKeyHash, err := bech32.ConvertBits(data[1:], 5, 8, false)
- if err != nil {
- return nil, err
- }
-
- addr, err = btcutil.NewAddressPubKeyHash(pubKeyHash, net)
- if err != nil {
- return nil, err
- }
- case 18:
- scriptHash, err := bech32.ConvertBits(data[1:], 5, 8, false)
- if err != nil {
- return nil, err
- }
-
- addr, err = btcutil.NewAddressScriptHashFromHash(scriptHash, net)
- if err != nil {
- return nil, err
- }
- default:
- // Ignore unknown version.
- }
-
- return addr, nil
-}
-
-// parseRouteHint converts the data (encoded in base32) into an array containing
-// one or more routing hop hints that represent a single route hint.
-func parseRouteHint(data []byte) ([]HopHint, er.R) {
- base256Data, err := bech32.ConvertBits(data, 5, 8, false)
- if err != nil {
- return nil, err
- }
-
- // Check that base256Data is a multiple of hopHintLen.
- if len(base256Data)%hopHintLen != 0 {
- return nil, er.Errorf("expected length multiple of %d bytes, "+
- "got %d", hopHintLen, len(base256Data))
- }
-
- var routeHint []HopHint
-
- for len(base256Data) > 0 {
- hopHint := HopHint{}
- hopHint.NodeID, err = btcec.ParsePubKey(base256Data[:33], btcec.S256())
- if err != nil {
- return nil, err
- }
- hopHint.ChannelID = binary.BigEndian.Uint64(base256Data[33:41])
- hopHint.FeeBaseMSat = binary.BigEndian.Uint32(base256Data[41:45])
- hopHint.FeeProportionalMillionths = binary.BigEndian.Uint32(base256Data[45:49])
- hopHint.CLTVExpiryDelta = binary.BigEndian.Uint16(base256Data[49:51])
-
- routeHint = append(routeHint, hopHint)
-
- base256Data = base256Data[51:]
- }
-
- return routeHint, nil
-}
-
-// parseFeatures decodes any feature bits directly from the base32
-// representation.
-func parseFeatures(data []byte) (*lnwire.FeatureVector, er.R) {
- rawFeatures := lnwire.NewRawFeatureVector()
- err := rawFeatures.DecodeBase32(bytes.NewReader(data), len(data))
- if err != nil {
- return nil, err
- }
-
- return lnwire.NewFeatureVector(rawFeatures, lnwire.Features), nil
-}
-
-// base32ToUint64 converts a base32 encoded number to uint64.
-func base32ToUint64(data []byte) (uint64, er.R) {
- // Maximum that fits in uint64 is ceil(64 / 5) = 12 groups.
- if len(data) > 13 {
- return 0, er.Errorf("cannot parse data of length %d as uint64",
- len(data))
- }
-
- val := uint64(0)
- for i := 0; i < len(data); i++ {
- val = val<<5 | uint64(data[i])
- }
- return val, nil
-}
diff --git a/lnd/channeldb/migration_01_to_11/zpay32/hophint.go b/lnd/channeldb/migration_01_to_11/zpay32/hophint.go
deleted file mode 100644
index e2c8d858..00000000
--- a/lnd/channeldb/migration_01_to_11/zpay32/hophint.go
+++ /dev/null
@@ -1,43 +0,0 @@
-package zpay32
-
-import "github.com/pkt-cash/pktd/btcec"
-
-const (
- // DefaultFinalCLTVDelta is the default value to be used as the final
- // CLTV delta for a route if one is unspecified.
- DefaultFinalCLTVDelta = 9
-)
-
-// HopHint is a routing hint that contains the minimum information of a channel
-// required for an intermediate hop in a route to forward the payment to the
-// next. This should be ideally used for private channels, since they are not
-// publicly advertised to the network for routing.
-type HopHint struct {
- // NodeID is the public key of the node at the start of the channel.
- NodeID *btcec.PublicKey
-
- // ChannelID is the unique identifier of the channel.
- ChannelID uint64
-
- // FeeBaseMSat is the base fee of the channel in millisatoshis.
- FeeBaseMSat uint32
-
- // FeeProportionalMillionths is the fee rate, in millionths of a
- // satoshi, for every satoshi sent through the channel.
- FeeProportionalMillionths uint32
-
- // CLTVExpiryDelta is the time-lock delta of the channel.
- CLTVExpiryDelta uint16
-}
-
-// Copy returns a deep copy of the hop hint.
-func (h HopHint) Copy() HopHint {
- nodeID := *h.NodeID
- return HopHint{
- NodeID: &nodeID,
- ChannelID: h.ChannelID,
- FeeBaseMSat: h.FeeBaseMSat,
- FeeProportionalMillionths: h.FeeProportionalMillionths,
- CLTVExpiryDelta: h.CLTVExpiryDelta,
- }
-}
diff --git a/lnd/channeldb/migration_01_to_11/zpay32/invoice.go b/lnd/channeldb/migration_01_to_11/zpay32/invoice.go
deleted file mode 100644
index 2ff684a1..00000000
--- a/lnd/channeldb/migration_01_to_11/zpay32/invoice.go
+++ /dev/null
@@ -1,374 +0,0 @@
-package zpay32
-
-import (
- "time"
-
- "github.com/pkt-cash/pktd/btcec"
- "github.com/pkt-cash/pktd/btcutil"
- "github.com/pkt-cash/pktd/btcutil/er"
- "github.com/pkt-cash/pktd/chaincfg"
- "github.com/pkt-cash/pktd/lnd/lnwire"
-)
-
-const (
- // mSatPerBtc is the number of millisatoshis in 1 BTC.
- mSatPerBtc = 100000000000
-
- // signatureBase32Len is the number of 5-bit groups needed to encode
- // the 512 bit signature + 8 bit recovery ID.
- signatureBase32Len = 104
-
- // timestampBase32Len is the number of 5-bit groups needed to encode
- // the 35-bit timestamp.
- timestampBase32Len = 7
-
- // hashBase32Len is the number of 5-bit groups needed to encode a
- // 256-bit hash. Note that the last group will be padded with zeroes.
- hashBase32Len = 52
-
- // pubKeyBase32Len is the number of 5-bit groups needed to encode a
- // 33-byte compressed pubkey. Note that the last group will be padded
- // with zeroes.
- pubKeyBase32Len = 53
-
- // hopHintLen is the number of bytes needed to encode the hop hint of a
- // single private route.
- hopHintLen = 51
-
- // The following byte values correspond to the supported field types.
- // The field name is the character representing that 5-bit value in the
- // bech32 string.
-
- // fieldTypeP is the field containing the payment hash.
- fieldTypeP = 1
-
- // fieldTypeD contains a short description of the payment.
- fieldTypeD = 13
-
- // fieldTypeN contains the pubkey of the target node.
- fieldTypeN = 19
-
- // fieldTypeH contains the hash of a description of the payment.
- fieldTypeH = 23
-
- // fieldTypeX contains the expiry in seconds of the invoice.
- fieldTypeX = 6
-
- // fieldTypeF contains a fallback on-chain address.
- fieldTypeF = 9
-
- // fieldTypeR contains extra routing information.
- fieldTypeR = 3
-
- // fieldTypeC contains an optional requested final CLTV delta.
- fieldTypeC = 24
-
- // fieldType9 contains one or more bytes for signaling features
- // supported or required by the receiver.
- fieldType9 = 5
-
- // fieldTypeS contains a 32-byte payment address, which is a nonce
- // included in the final hop's payload to prevent intermediaries from
- // probing the recipient.
- fieldTypeS = 16
-
- // maxInvoiceLength is the maximum total length an invoice can have.
- // This is chosen to be the maximum number of bytes that can fit into a
- // single QR code: https://en.wikipedia.org/wiki/QR_code#Storage
- maxInvoiceLength = 7089
-
- // DefaultInvoiceExpiry is the default expiry duration from the creation
- // timestamp if expiry is set to zero.
- DefaultInvoiceExpiry = time.Hour
-)
-
-var (
- Err = er.NewErrorType("lnd.zpay32")
- // ErrInvoiceTooLarge is returned when an invoice exceeds
- // maxInvoiceLength.
- ErrInvoiceTooLarge = Err.CodeWithDetail("ErrInvoiceTooLarge", "invoice is too large")
-
- // ErrInvalidFieldLength is returned when a tagged field was specified
- // with a length larger than the left over bytes of the data field.
- ErrInvalidFieldLength = Err.CodeWithDetail("ErrInvalidFieldLength", "invalid field length")
-
- // ErrBrokenTaggedField is returned when the last tagged field is
- // incorrectly formatted and doesn't have enough bytes to be read.
- ErrBrokenTaggedField = Err.CodeWithDetail("ErrBrokenTaggedField", "last tagged field is broken")
-)
-
-// MessageSigner is passed to the Encode method to provide a signature
-// corresponding to the node's pubkey.
-type MessageSigner struct {
- // SignCompact signs the passed hash with the node's privkey. The
- // returned signature should be 65 bytes, where the last 64 are the
- // compact signature, and the first one is a header byte. This is the
- // format returned by btcec.SignCompact.
- SignCompact func(hash []byte) ([]byte, er.R)
-}
-
-// Invoice represents a decoded invoice, or to-be-encoded invoice. Some of the
-// fields are optional, and will only be non-nil if the invoice this was parsed
-// from contains that field. When encoding, only the non-nil fields will be
-// added to the encoded invoice.
-type Invoice struct {
- // Net specifies what network this Lightning invoice is meant for.
- Net *chaincfg.Params
-
- // MilliSat specifies the amount of this invoice in millisatoshi.
- // Optional.
- MilliSat *lnwire.MilliSatoshi
-
- // Timestamp specifies the time this invoice was created.
- // Mandatory
- Timestamp time.Time
-
- // PaymentHash is the payment hash to be used for a payment to this
- // invoice.
- PaymentHash *[32]byte
-
- // PaymentAddr is the payment address to be used by payments to prevent
- // probing of the destination.
- PaymentAddr *[32]byte
-
- // Destination is the public key of the target node. This will always
- // be set after decoding, and can optionally be set before encoding to
- // include the pubkey as an 'n' field. If this is not set before
- // encoding then the destination pubkey won't be added as an 'n' field,
- // and the pubkey will be extracted from the signature during decoding.
- Destination *btcec.PublicKey
-
- // minFinalCLTVExpiry is the value that the creator of the invoice
- // expects to be used for the CLTV expiry of the HTLC extended to it in
- // the last hop.
- //
- // NOTE: This value is optional, and should be set to nil if the
- // invoice creator doesn't have a strong requirement on the CLTV expiry
- // of the final HTLC extended to it.
- //
- // This field is un-exported and can only be read by the
- // MinFinalCLTVExpiry() method. By forcing callers to read via this
- // method, we can easily enforce the default if not specified.
- minFinalCLTVExpiry *uint64
-
- // Description is a short description of the purpose of this invoice.
- // Optional. Non-nil iff DescriptionHash is nil.
- Description *string
-
- // DescriptionHash is the SHA256 hash of a description of the purpose of
- // this invoice.
- // Optional. Non-nil iff Description is nil.
- DescriptionHash *[32]byte
-
- // expiry specifies the timespan this invoice will be valid.
- // Optional. If not set, a default expiry of 60 min will be implied.
- //
- // This field is unexported and can be read by the Expiry() method. This
- // method makes sure the default expiry time is returned in case the
- // field is not set.
- expiry *time.Duration
-
- // FallbackAddr is an on-chain address that can be used for payment in
- // case the Lightning payment fails.
- // Optional.
- FallbackAddr btcutil.Address
-
- // RouteHints represents one or more different route hints. Each route
- // hint can be individually used to reach the destination. These usually
- // represent private routes.
- //
- // NOTE: This is optional.
- RouteHints [][]HopHint
-
- // Features represents an optional field used to signal optional or
- // required support for features by the receiver.
- Features *lnwire.FeatureVector
-}
-
-// Amount is a functional option that allows callers of NewInvoice to set the
-// amount in millisatoshis that the Invoice should encode.
-func Amount(milliSat lnwire.MilliSatoshi) func(*Invoice) {
- return func(i *Invoice) {
- i.MilliSat = &milliSat
- }
-}
-
-// Destination is a functional option that allows callers of NewInvoice to
-// explicitly set the pubkey of the Invoice's destination node.
-func Destination(destination *btcec.PublicKey) func(*Invoice) {
- return func(i *Invoice) {
- i.Destination = destination
- }
-}
-
-// Description is a functional option that allows callers of NewInvoice to set
-// the payment description of the created Invoice.
-//
-// NOTE: Must be used if and only if DescriptionHash is not used.
-func Description(description string) func(*Invoice) {
- return func(i *Invoice) {
- i.Description = &description
- }
-}
-
-// CLTVExpiry is an optional value which allows the receiver of the payment to
-// specify the delta between the current height and the HTLC extended to the
-// receiver.
-func CLTVExpiry(delta uint64) func(*Invoice) {
- return func(i *Invoice) {
- i.minFinalCLTVExpiry = &delta
- }
-}
-
-// DescriptionHash is a functional option that allows callers of NewInvoice to
-// set the payment description hash of the created Invoice.
-//
-// NOTE: Must be used if and only if Description is not used.
-func DescriptionHash(descriptionHash [32]byte) func(*Invoice) {
- return func(i *Invoice) {
- i.DescriptionHash = &descriptionHash
- }
-}
-
-// Expiry is a functional option that allows callers of NewInvoice to set the
-// expiry of the created Invoice. If not set, a default expiry of 60 min will
-// be implied.
-func Expiry(expiry time.Duration) func(*Invoice) {
- return func(i *Invoice) {
- i.expiry = &expiry
- }
-}
-
-// FallbackAddr is a functional option that allows callers of NewInvoice to set
-// the Invoice's fallback on-chain address that can be used for payment in case
-// the Lightning payment fails
-func FallbackAddr(fallbackAddr btcutil.Address) func(*Invoice) {
- return func(i *Invoice) {
- i.FallbackAddr = fallbackAddr
- }
-}
-
-// RouteHint is a functional option that allows callers of NewInvoice to add
-// one or more hop hints that represent a private route to the destination.
-func RouteHint(routeHint []HopHint) func(*Invoice) {
- return func(i *Invoice) {
- i.RouteHints = append(i.RouteHints, routeHint)
- }
-}
-
-// Features is a functional option that allows callers of NewInvoice to set the
-// desired feature bits that are advertised on the invoice. If this option is
-// not used, an empty feature vector will automatically be populated.
-func Features(features *lnwire.FeatureVector) func(*Invoice) {
- return func(i *Invoice) {
- i.Features = features
- }
-}
-
-// PaymentAddr is a functional option that allows callers of NewInvoice to set
-// the desired payment address tht is advertised on the invoice.
-func PaymentAddr(addr [32]byte) func(*Invoice) {
- return func(i *Invoice) {
- i.PaymentAddr = &addr
- }
-}
-
-// NewInvoice creates a new Invoice object. The last parameter is a set of
-// variadic arguments for setting optional fields of the invoice.
-//
-// NOTE: Either Description or DescriptionHash must be provided for the Invoice
-// to be considered valid.
-func NewInvoice(net *chaincfg.Params, paymentHash [32]byte,
- timestamp time.Time, options ...func(*Invoice)) (*Invoice, er.R) {
-
- invoice := &Invoice{
- Net: net,
- PaymentHash: &paymentHash,
- Timestamp: timestamp,
- }
-
- for _, option := range options {
- option(invoice)
- }
-
- // If no features were set, we'll populate an empty feature vector.
- if invoice.Features == nil {
- invoice.Features = lnwire.NewFeatureVector(
- nil, lnwire.Features,
- )
- }
-
- if err := validateInvoice(invoice); err != nil {
- return nil, err
- }
-
- return invoice, nil
-}
-
-// Expiry returns the expiry time for this invoice. If expiry time is not set
-// explicitly, the default 3600 second expiry will be returned.
-func (invoice *Invoice) Expiry() time.Duration {
- if invoice.expiry != nil {
- return *invoice.expiry
- }
-
- // If no expiry is set for this invoice, default is 3600 seconds.
- return DefaultInvoiceExpiry
-}
-
-// MinFinalCLTVExpiry returns the minimum final CLTV expiry delta as specified
-// by the creator of the invoice. This value specifies the delta between the
-// current height and the expiry height of the HTLC extended in the last hop.
-func (invoice *Invoice) MinFinalCLTVExpiry() uint64 {
- if invoice.minFinalCLTVExpiry != nil {
- return *invoice.minFinalCLTVExpiry
- }
-
- return DefaultFinalCLTVDelta
-}
-
-// validateInvoice does a sanity check of the provided Invoice, making sure it
-// has all the necessary fields set for it to be considered valid by BOLT-0011.
-func validateInvoice(invoice *Invoice) er.R {
- // The net must be set.
- if invoice.Net == nil {
- return er.Errorf("net params not set")
- }
-
- // The invoice must contain a payment hash.
- if invoice.PaymentHash == nil {
- return er.Errorf("no payment hash found")
- }
-
- // Either Description or DescriptionHash must be set, not both.
- if invoice.Description != nil && invoice.DescriptionHash != nil {
- return er.Errorf("both description and description hash set")
- }
- if invoice.Description == nil && invoice.DescriptionHash == nil {
- return er.Errorf("neither description nor description hash set")
- }
-
- // Check that we support the field lengths.
- if len(invoice.PaymentHash) != 32 {
- return er.Errorf("unsupported payment hash length: %d",
- len(invoice.PaymentHash))
- }
-
- if invoice.DescriptionHash != nil && len(invoice.DescriptionHash) != 32 {
- return er.Errorf("unsupported description hash length: %d",
- len(invoice.DescriptionHash))
- }
-
- if invoice.Destination != nil &&
- len(invoice.Destination.SerializeCompressed()) != 33 {
- return er.Errorf("unsupported pubkey length: %d",
- len(invoice.Destination.SerializeCompressed()))
- }
-
- // Ensure that all invoices have feature vectors.
- if invoice.Features == nil {
- return er.Errorf("missing feature vector")
- }
-
- return nil
-}
diff --git a/lnd/channeldb/migtest/migtest.go b/lnd/channeldb/migtest/migtest.go
deleted file mode 100644
index 5caebc72..00000000
--- a/lnd/channeldb/migtest/migtest.go
+++ /dev/null
@@ -1,93 +0,0 @@
-package migtest
-
-import (
- "io/ioutil"
- "os"
- "testing"
-
- "github.com/pkt-cash/pktd/btcutil/er"
- "github.com/pkt-cash/pktd/lnd/channeldb/kvdb"
-)
-
-// MakeDB creates a new instance of the ChannelDB for testing purposes. A
-// callback which cleans up the created temporary directories is also returned
-// and intended to be executed after the test completes.
-func MakeDB() (kvdb.Backend, func(), er.R) {
- // Create temporary database for mission control.
- file, errr := ioutil.TempFile("", "*.db")
- if errr != nil {
- return nil, nil, er.E(errr)
- }
-
- dbPath := file.Name()
- db, err := kvdb.Open(kvdb.BoltBackendName, dbPath, true)
- if err != nil {
- return nil, nil, err
- }
-
- cleanUp := func() {
- db.Close()
- os.RemoveAll(dbPath)
- }
-
- return db, cleanUp, nil
-}
-
-// ApplyMigration is a helper test function that encapsulates the general steps
-// which are needed to properly check the result of applying migration function.
-func ApplyMigration(t *testing.T,
- beforeMigration, afterMigration, migrationFunc func(tx kvdb.RwTx) er.R,
- shouldFail bool) {
-
- cdb, cleanUp, err := MakeDB()
- defer cleanUp()
- if err != nil {
- t.Fatal(err)
- }
-
- // beforeMigration usually used for populating the database
- // with test data.
- err = kvdb.Update(cdb, beforeMigration, func() {})
- if err != nil {
- t.Fatal(err)
- }
-
- defer func() {
- if r := recover(); r != nil {
- err = newError(r)
- }
-
- if err == nil && shouldFail {
- t.Fatal("error wasn't received on migration stage")
- } else if err != nil && !shouldFail {
- t.Fatalf("error was received on migration stage: %v", err)
- }
-
- // afterMigration usually used for checking the database state and
- // throwing the error if something went wrong.
- err = kvdb.Update(cdb, afterMigration, func() {})
- if err != nil {
- t.Fatal(err)
- }
- }()
-
- // Apply migration.
- err = kvdb.Update(cdb, migrationFunc, func() {})
- if err != nil {
- t.Logf("migration error: %v", err)
- }
-}
-
-func newError(e interface{}) er.R {
- var err er.R
- switch e := e.(type) {
- case er.R:
- err = e
- case error:
- err = er.E(e)
- default:
- err = er.Errorf("%v", e)
- }
-
- return err
-}
diff --git a/lnd/channeldb/migtest/raw_db.go b/lnd/channeldb/migtest/raw_db.go
deleted file mode 100644
index 449e85aa..00000000
--- a/lnd/channeldb/migtest/raw_db.go
+++ /dev/null
@@ -1,188 +0,0 @@
-package migtest
-
-import (
- "bytes"
- "encoding/hex"
- "fmt"
- "strings"
-
- "github.com/pkt-cash/pktd/btcutil/er"
- "github.com/pkt-cash/pktd/btcutil/util"
- "github.com/pkt-cash/pktd/lnd/channeldb/kvdb"
-)
-
-// DumpDB dumps go code describing the contents of the database to stdout. This
-// function is only intended for use during development.
-//
-// Example output:
-//
-// map[string]interface{}{
-// hex("1234"): map[string]interface{}{
-// "human-readable": hex("102030"),
-// hex("1111"): hex("5783492373"),
-// },
-// }
-func DumpDB(tx kvdb.RTx, rootKey []byte) er.R {
- bucket := tx.ReadBucket(rootKey)
- if bucket == nil {
- return er.Errorf("bucket %v not found", string(rootKey))
- }
-
- return dumpBucket(bucket)
-}
-
-func dumpBucket(bucket kvdb.RBucket) er.R {
- fmt.Printf("map[string]interface{} {\n")
- err := bucket.ForEach(func(k, v []byte) er.R {
- key := toString(k)
- fmt.Printf("%v: ", key)
-
- subBucket := bucket.NestedReadBucket(k)
- if subBucket != nil {
- err := dumpBucket(subBucket)
- if err != nil {
- return err
- }
- } else {
- fmt.Print(toHex(v))
- }
- fmt.Printf(",\n")
-
- return nil
- })
- if err != nil {
- return err
- }
- fmt.Printf("}")
-
- return nil
-}
-
-// RestoreDB primes the database with the given data set.
-func RestoreDB(tx kvdb.RwTx, rootKey []byte, data map[string]interface{}) er.R {
- bucket, err := tx.CreateTopLevelBucket(rootKey)
- if err != nil {
- return err
- }
-
- return restoreDB(bucket, data)
-}
-
-func restoreDB(bucket kvdb.RwBucket, data map[string]interface{}) er.R {
- for k, v := range data {
- key := []byte(k)
-
- switch value := v.(type) {
-
- // Key contains value.
- case string:
- err := bucket.Put(key, []byte(value))
- if err != nil {
- return err
- }
-
- // Key contains a sub-bucket.
- case map[string]interface{}:
- subBucket, err := bucket.CreateBucket(key)
- if err != nil {
- return err
- }
-
- if err := restoreDB(subBucket, value); err != nil {
- return err
- }
-
- default:
- return er.New("invalid type")
- }
- }
-
- return nil
-}
-
-// VerifyDB verifies the database against the given data set.
-func VerifyDB(tx kvdb.RTx, rootKey []byte, data map[string]interface{}) er.R {
- bucket := tx.ReadBucket(rootKey)
- if bucket == nil {
- return er.Errorf("bucket %v not found", string(rootKey))
- }
-
- return verifyDB(bucket, data)
-}
-
-func verifyDB(bucket kvdb.RBucket, data map[string]interface{}) er.R {
- for k, v := range data {
- key := []byte(k)
-
- switch value := v.(type) {
-
- // Key contains value.
- case string:
- expectedValue := []byte(value)
- dbValue := bucket.Get(key)
-
- if !bytes.Equal(dbValue, expectedValue) {
- return er.New("value mismatch")
- }
-
- // Key contains a sub-bucket.
- case map[string]interface{}:
- subBucket := bucket.NestedReadBucket(key)
- if subBucket == nil {
- return er.Errorf("bucket %v not found", k)
- }
-
- err := verifyDB(subBucket, value)
- if err != nil {
- return err
- }
-
- default:
- return er.New("invalid type")
- }
- }
-
- keyCount := 0
- err := bucket.ForEach(func(k, v []byte) er.R {
- keyCount++
- return nil
- })
- if err != nil {
- return err
- }
- if keyCount != len(data) {
- return er.New("unexpected keys in database")
- }
-
- return nil
-}
-
-func toHex(v []byte) string {
- if len(v) == 0 {
- return "nil"
- }
-
- return "hex(\"" + hex.EncodeToString(v) + "\")"
-}
-
-func toString(v []byte) string {
- readableChars := "abcdefghijklmnopqrstuvwxyz0123456789-"
-
- for _, c := range v {
- if !strings.Contains(readableChars, string(c)) {
- return toHex(v)
- }
- }
-
- return "\"" + string(v) + "\""
-}
-
-// Hex is a test helper function to convert readable hex arrays to raw byte
-// strings.
-func Hex(value string) string {
- b, err := util.DecodeHex(value)
- if err != nil {
- panic(err)
- }
- return string(b)
-}
diff --git a/lnd/channeldb/mp_payment.go b/lnd/channeldb/mp_payment.go
deleted file mode 100644
index 64e5af49..00000000
--- a/lnd/channeldb/mp_payment.go
+++ /dev/null
@@ -1,313 +0,0 @@
-package channeldb
-
-import (
- "bytes"
- "io"
- "time"
-
- "github.com/pkt-cash/pktd/btcec"
- "github.com/pkt-cash/pktd/btcutil/er"
- "github.com/pkt-cash/pktd/btcutil/util"
- "github.com/pkt-cash/pktd/lnd/lntypes"
- "github.com/pkt-cash/pktd/lnd/lnwire"
- "github.com/pkt-cash/pktd/lnd/routing/route"
- "github.com/pkt-cash/pktd/wire"
-)
-
-// HTLCAttemptInfo contains static information about a specific HTLC attempt
-// for a payment. This information is used by the router to handle any errors
-// coming back after an attempt is made, and to query the switch about the
-// status of the attempt.
-type HTLCAttemptInfo struct {
- // AttemptID is the unique ID used for this attempt.
- AttemptID uint64
-
- // SessionKey is the ephemeral key used for this attempt.
- SessionKey *btcec.PrivateKey
-
- // Route is the route attempted to send the HTLC.
- Route route.Route
-
- // AttemptTime is the time at which this HTLC was attempted.
- AttemptTime time.Time
-}
-
-// HTLCAttempt contains information about a specific HTLC attempt for a given
-// payment. It contains the HTLCAttemptInfo used to send the HTLC, as well
-// as a timestamp and any known outcome of the attempt.
-type HTLCAttempt struct {
- HTLCAttemptInfo
-
- // Settle is the preimage of a successful payment. This serves as a
- // proof of payment. It will only be non-nil for settled payments.
- //
- // NOTE: Can be nil if payment is not settled.
- Settle *HTLCSettleInfo
-
- // Fail is a failure reason code indicating the reason the payment
- // failed. It is only non-nil for failed payments.
- //
- // NOTE: Can be nil if payment is not failed.
- Failure *HTLCFailInfo
-}
-
-// HTLCSettleInfo encapsulates the information that augments an HTLCAttempt in
-// the event that the HTLC is successful.
-type HTLCSettleInfo struct {
- // Preimage is the preimage of a successful HTLC. This serves as a proof
- // of payment.
- Preimage lntypes.Preimage
-
- // SettleTime is the time at which this HTLC was settled.
- SettleTime time.Time
-}
-
-// HTLCFailReason is the reason an htlc failed.
-type HTLCFailReason byte
-
-const (
- // HTLCFailUnknown is recorded for htlcs that failed with an unknown
- // reason.
- HTLCFailUnknown HTLCFailReason = 0
-
- // HTLCFailUnknown is recorded for htlcs that had a failure message that
- // couldn't be decrypted.
- HTLCFailUnreadable HTLCFailReason = 1
-
- // HTLCFailInternal is recorded for htlcs that failed because of an
- // internal error.
- HTLCFailInternal HTLCFailReason = 2
-
- // HTLCFailMessage is recorded for htlcs that failed with a network
- // failure message.
- HTLCFailMessage HTLCFailReason = 3
-)
-
-// HTLCFailInfo encapsulates the information that augments an HTLCAttempt in the
-// event that the HTLC fails.
-type HTLCFailInfo struct {
- // FailTime is the time at which this HTLC was failed.
- FailTime time.Time
-
- // Message is the wire message that failed this HTLC. This field will be
- // populated when the failure reason is HTLCFailMessage.
- Message lnwire.FailureMessage
-
- // Reason is the failure reason for this HTLC.
- Reason HTLCFailReason
-
- // The position in the path of the intermediate or final node that
- // generated the failure message. Position zero is the sender node. This
- // field will be populated when the failure reason is either
- // HTLCFailMessage or HTLCFailUnknown.
- FailureSourceIndex uint32
-}
-
-// MPPayment is a wrapper around a payment's PaymentCreationInfo and
-// HTLCAttempts. All payments will have the PaymentCreationInfo set, any
-// HTLCs made in attempts to be completed will populated in the HTLCs slice.
-// Each populated HTLCAttempt represents an attempted HTLC, each of which may
-// have the associated Settle or Fail struct populated if the HTLC is no longer
-// in-flight.
-type MPPayment struct {
- // SequenceNum is a unique identifier used to sort the payments in
- // order of creation.
- SequenceNum uint64
-
- // Info holds all static information about this payment, and is
- // populated when the payment is initiated.
- Info *PaymentCreationInfo
-
- // HTLCs holds the information about individual HTLCs that we send in
- // order to make the payment.
- HTLCs []HTLCAttempt
-
- // FailureReason is the failure reason code indicating the reason the
- // payment failed.
- //
- // NOTE: Will only be set once the daemon has given up on the payment
- // altogether.
- FailureReason *FailureReason
-
- // Status is the current PaymentStatus of this payment.
- Status PaymentStatus
-}
-
-// TerminalInfo returns any HTLC settle info recorded. If no settle info is
-// recorded, any payment level failure will be returned. If neither a settle
-// nor a failure is recorded, both return values will be nil.
-func (m *MPPayment) TerminalInfo() (*HTLCSettleInfo, *FailureReason) {
- for _, h := range m.HTLCs {
- if h.Settle != nil {
- return h.Settle, nil
- }
- }
-
- return nil, m.FailureReason
-}
-
-// SentAmt returns the sum of sent amount and fees for HTLCs that are either
-// settled or still in flight.
-func (m *MPPayment) SentAmt() (lnwire.MilliSatoshi, lnwire.MilliSatoshi) {
- var sent, fees lnwire.MilliSatoshi
- for _, h := range m.HTLCs {
- if h.Failure != nil {
- continue
- }
-
- // The attempt was not failed, meaning the amount was
- // potentially sent to the receiver.
- sent += h.Route.ReceiverAmt()
- fees += h.Route.TotalFees()
- }
-
- return sent, fees
-}
-
-// InFlightHTLCs returns the HTLCs that are still in-flight, meaning they have
-// not been settled or failed.
-func (m *MPPayment) InFlightHTLCs() []HTLCAttempt {
- var inflights []HTLCAttempt
- for _, h := range m.HTLCs {
- if h.Settle != nil || h.Failure != nil {
- continue
- }
-
- inflights = append(inflights, h)
- }
-
- return inflights
-}
-
-// GetAttempt returns the specified htlc attempt on the payment.
-func (m *MPPayment) GetAttempt(id uint64) (*HTLCAttempt, er.R) {
- for _, htlc := range m.HTLCs {
- htlc := htlc
- if htlc.AttemptID == id {
- return &htlc, nil
- }
- }
-
- return nil, er.New("htlc attempt not found on payment")
-}
-
-// serializeHTLCSettleInfo serializes the details of a settled htlc.
-func serializeHTLCSettleInfo(w io.Writer, s *HTLCSettleInfo) er.R {
- if _, err := util.Write(w, s.Preimage[:]); err != nil {
- return err
- }
-
- if err := serializeTime(w, s.SettleTime); err != nil {
- return err
- }
-
- return nil
-}
-
-// deserializeHTLCSettleInfo deserializes the details of a settled htlc.
-func deserializeHTLCSettleInfo(r io.Reader) (*HTLCSettleInfo, er.R) {
- s := &HTLCSettleInfo{}
- if _, err := util.ReadFull(r, s.Preimage[:]); err != nil {
- return nil, err
- }
-
- var err er.R
- s.SettleTime, err = deserializeTime(r)
- if err != nil {
- return nil, err
- }
-
- return s, nil
-}
-
-// serializeHTLCFailInfo serializes the details of a failed htlc including the
-// wire failure.
-func serializeHTLCFailInfo(w io.Writer, f *HTLCFailInfo) er.R {
- if err := serializeTime(w, f.FailTime); err != nil {
- return err
- }
-
- // Write failure. If there is no failure message, write an empty
- // byte slice.
- var messageBytes bytes.Buffer
- if f.Message != nil {
- err := lnwire.EncodeFailureMessage(&messageBytes, f.Message, 0)
- if err != nil {
- return err
- }
- }
- if err := wire.WriteVarBytes(w, 0, messageBytes.Bytes()); err != nil {
- return err
- }
-
- return WriteElements(w, byte(f.Reason), f.FailureSourceIndex)
-}
-
-// deserializeHTLCFailInfo deserializes the details of a failed htlc including
-// the wire failure.
-func deserializeHTLCFailInfo(r io.Reader) (*HTLCFailInfo, er.R) {
- f := &HTLCFailInfo{}
- var err er.R
- f.FailTime, err = deserializeTime(r)
- if err != nil {
- return nil, err
- }
-
- // Read failure.
- failureBytes, err := wire.ReadVarBytes(
- r, 0, lnwire.FailureMessageLength, "failure",
- )
- if err != nil {
- return nil, err
- }
- if len(failureBytes) > 0 {
- f.Message, err = lnwire.DecodeFailureMessage(
- bytes.NewReader(failureBytes), 0,
- )
- if err != nil {
- return nil, err
- }
- }
-
- var reason byte
- err = ReadElements(r, &reason, &f.FailureSourceIndex)
- if err != nil {
- return nil, err
- }
- f.Reason = HTLCFailReason(reason)
-
- return f, nil
-}
-
-// deserializeTime deserializes time as unix nanoseconds.
-func deserializeTime(r io.Reader) (time.Time, er.R) {
- var scratch [8]byte
- if _, err := util.ReadFull(r, scratch[:]); err != nil {
- return time.Time{}, err
- }
-
- // Convert to time.Time. Interpret unix nano time zero as a zero
- // time.Time value.
- unixNano := byteOrder.Uint64(scratch[:])
- if unixNano == 0 {
- return time.Time{}, nil
- }
-
- return time.Unix(0, int64(unixNano)), nil
-}
-
-// serializeTime serializes time as unix nanoseconds.
-func serializeTime(w io.Writer, t time.Time) er.R {
- var scratch [8]byte
-
- // Convert to unix nano seconds, but only if time is non-zero. Calling
- // UnixNano() on a zero time yields an undefined result.
- var unixNano int64
- if !t.IsZero() {
- unixNano = t.UnixNano()
- }
-
- byteOrder.PutUint64(scratch[:], uint64(unixNano))
- _, err := util.Write(w, scratch[:])
- return err
-}
diff --git a/lnd/channeldb/nodes.go b/lnd/channeldb/nodes.go
deleted file mode 100644
index 7d3acfc8..00000000
--- a/lnd/channeldb/nodes.go
+++ /dev/null
@@ -1,322 +0,0 @@
-package channeldb
-
-import (
- "bytes"
- "io"
- "net"
- "time"
-
- "github.com/pkt-cash/pktd/btcec"
- "github.com/pkt-cash/pktd/btcutil/er"
- "github.com/pkt-cash/pktd/btcutil/util"
- "github.com/pkt-cash/pktd/lnd/channeldb/kvdb"
- "github.com/pkt-cash/pktd/wire/protocol"
-)
-
-var (
- // nodeInfoBucket stores metadata pertaining to nodes that we've had
- // direct channel-based correspondence with. This bucket allows one to
- // query for all open channels pertaining to the node by exploring each
- // node's sub-bucket within the openChanBucket.
- nodeInfoBucket = []byte("nib")
-)
-
-// LinkNode stores metadata related to node's that we have/had a direct
-// channel open with. Information such as the Bitcoin network the node
-// advertised, and its identity public key are also stored. Additionally, this
-// struct and the bucket its stored within have store data similar to that of
-// Bitcoin's addrmanager. The TCP address information stored within the struct
-// can be used to establish persistent connections will all channel
-// counterparties on daemon startup.
-//
-// TODO(roasbeef): also add current OnionKey plus rotation schedule?
-// TODO(roasbeef): add bitfield for supported services
-// * possibly add a wire.NetAddress type, type
-type LinkNode struct {
- // Network indicates the Bitcoin network that the LinkNode advertises
- // for incoming channel creation.
- Network protocol.BitcoinNet
-
- // IdentityPub is the node's current identity public key. Any
- // channel/topology related information received by this node MUST be
- // signed by this public key.
- IdentityPub *btcec.PublicKey
-
- // LastSeen tracks the last time this node was seen within the network.
- // A node should be marked as seen if the daemon either is able to
- // establish an outgoing connection to the node or receives a new
- // incoming connection from the node. This timestamp (stored in unix
- // epoch) may be used within a heuristic which aims to determine when a
- // channel should be unilaterally closed due to inactivity.
- //
- // TODO(roasbeef): replace with block hash/height?
- // * possibly add a time-value metric into the heuristic?
- LastSeen time.Time
-
- // Addresses is a list of IP address in which either we were able to
- // reach the node over in the past, OR we received an incoming
- // authenticated connection for the stored identity public key.
- Addresses []net.Addr
-
- db *DB
-}
-
-// NewLinkNode creates a new LinkNode from the provided parameters, which is
-// backed by an instance of channeldb.
-func (db *DB) NewLinkNode(bitNet protocol.BitcoinNet, pub *btcec.PublicKey,
- addrs ...net.Addr) *LinkNode {
-
- return &LinkNode{
- Network: bitNet,
- IdentityPub: pub,
- LastSeen: time.Now(),
- Addresses: addrs,
- db: db,
- }
-}
-
-// UpdateLastSeen updates the last time this node was directly encountered on
-// the Lightning Network.
-func (l *LinkNode) UpdateLastSeen(lastSeen time.Time) er.R {
- l.LastSeen = lastSeen
-
- return l.Sync()
-}
-
-// AddAddress appends the specified TCP address to the list of known addresses
-// this node is/was known to be reachable at.
-func (l *LinkNode) AddAddress(addr net.Addr) er.R {
- for _, a := range l.Addresses {
- if a.String() == addr.String() {
- return nil
- }
- }
-
- l.Addresses = append(l.Addresses, addr)
-
- return l.Sync()
-}
-
-// Sync performs a full database sync which writes the current up-to-date data
-// within the struct to the database.
-func (l *LinkNode) Sync() er.R {
-
- // Finally update the database by storing the link node and updating
- // any relevant indexes.
- return kvdb.Update(l.db, func(tx kvdb.RwTx) er.R {
- nodeMetaBucket := tx.ReadWriteBucket(nodeInfoBucket)
- if nodeMetaBucket == nil {
- return ErrLinkNodesNotFound.Default()
- }
-
- return putLinkNode(nodeMetaBucket, l)
- }, func() {})
-}
-
-// putLinkNode serializes then writes the encoded version of the passed link
-// node into the nodeMetaBucket. This function is provided in order to allow
-// the ability to re-use a database transaction across many operations.
-func putLinkNode(nodeMetaBucket kvdb.RwBucket, l *LinkNode) er.R {
- // First serialize the LinkNode into its raw-bytes encoding.
- var b bytes.Buffer
- if err := serializeLinkNode(&b, l); err != nil {
- return err
- }
-
- // Finally insert the link-node into the node metadata bucket keyed
- // according to the its pubkey serialized in compressed form.
- nodePub := l.IdentityPub.SerializeCompressed()
- return nodeMetaBucket.Put(nodePub, b.Bytes())
-}
-
-// DeleteLinkNode removes the link node with the given identity from the
-// database.
-func (db *DB) DeleteLinkNode(identity *btcec.PublicKey) er.R {
- return kvdb.Update(db, func(tx kvdb.RwTx) er.R {
- return db.deleteLinkNode(tx, identity)
- }, func() {})
-}
-
-func (db *DB) deleteLinkNode(tx kvdb.RwTx, identity *btcec.PublicKey) er.R {
- nodeMetaBucket := tx.ReadWriteBucket(nodeInfoBucket)
- if nodeMetaBucket == nil {
- return ErrLinkNodesNotFound.Default()
- }
-
- pubKey := identity.SerializeCompressed()
- return nodeMetaBucket.Delete(pubKey)
-}
-
-// FetchLinkNode attempts to lookup the data for a LinkNode based on a target
-// identity public key. If a particular LinkNode for the passed identity public
-// key cannot be found, then ErrNodeNotFound if returned.
-func (db *DB) FetchLinkNode(identity *btcec.PublicKey) (*LinkNode, er.R) {
- var linkNode *LinkNode
- err := kvdb.View(db, func(tx kvdb.RTx) er.R {
- node, err := fetchLinkNode(tx, identity)
- if err != nil {
- return err
- }
-
- linkNode = node
- return nil
- }, func() {
- linkNode = nil
- })
-
- return linkNode, err
-}
-
-func fetchLinkNode(tx kvdb.RTx, targetPub *btcec.PublicKey) (*LinkNode, er.R) {
- // First fetch the bucket for storing node metadata, bailing out early
- // if it hasn't been created yet.
- nodeMetaBucket := tx.ReadBucket(nodeInfoBucket)
- if nodeMetaBucket == nil {
- return nil, ErrLinkNodesNotFound.Default()
- }
-
- // If a link node for that particular public key cannot be located,
- // then exit early with an ErrNodeNotFound.
- pubKey := targetPub.SerializeCompressed()
- nodeBytes := nodeMetaBucket.Get(pubKey)
- if nodeBytes == nil {
- return nil, ErrNodeNotFound.Default()
- }
-
- // Finally, decode and allocate a fresh LinkNode object to be returned
- // to the caller.
- nodeReader := bytes.NewReader(nodeBytes)
- return deserializeLinkNode(nodeReader)
-}
-
-// TODO(roasbeef): update link node addrs in server upon connection
-
-// FetchAllLinkNodes starts a new database transaction to fetch all nodes with
-// whom we have active channels with.
-func (db *DB) FetchAllLinkNodes() ([]*LinkNode, er.R) {
- var linkNodes []*LinkNode
- err := kvdb.View(db, func(tx kvdb.RTx) er.R {
- nodes, err := db.fetchAllLinkNodes(tx)
- if err != nil {
- return err
- }
-
- linkNodes = nodes
- return nil
- }, func() {
- linkNodes = nil
- })
- if err != nil {
- return nil, err
- }
-
- return linkNodes, nil
-}
-
-// fetchAllLinkNodes uses an existing database transaction to fetch all nodes
-// with whom we have active channels with.
-func (db *DB) fetchAllLinkNodes(tx kvdb.RTx) ([]*LinkNode, er.R) {
- nodeMetaBucket := tx.ReadBucket(nodeInfoBucket)
- if nodeMetaBucket == nil {
- return nil, ErrLinkNodesNotFound.Default()
- }
-
- var linkNodes []*LinkNode
- err := nodeMetaBucket.ForEach(func(k, v []byte) er.R {
- if v == nil {
- return nil
- }
-
- nodeReader := bytes.NewReader(v)
- linkNode, err := deserializeLinkNode(nodeReader)
- if err != nil {
- return err
- }
-
- linkNodes = append(linkNodes, linkNode)
- return nil
- })
- if err != nil {
- return nil, err
- }
-
- return linkNodes, nil
-}
-
-func serializeLinkNode(w io.Writer, l *LinkNode) er.R {
- var buf [8]byte
-
- byteOrder.PutUint32(buf[:4], uint32(l.Network))
- if _, err := util.Write(w, buf[:4]); err != nil {
- return err
- }
-
- serializedID := l.IdentityPub.SerializeCompressed()
- if _, err := util.Write(w, serializedID); err != nil {
- return err
- }
-
- seenUnix := uint64(l.LastSeen.Unix())
- byteOrder.PutUint64(buf[:], seenUnix)
- if _, err := util.Write(w, buf[:]); err != nil {
- return err
- }
-
- numAddrs := uint32(len(l.Addresses))
- byteOrder.PutUint32(buf[:4], numAddrs)
- if _, err := util.Write(w, buf[:4]); err != nil {
- return err
- }
-
- for _, addr := range l.Addresses {
- if err := serializeAddr(w, addr); err != nil {
- return err
- }
- }
-
- return nil
-}
-
-func deserializeLinkNode(r io.Reader) (*LinkNode, er.R) {
- var (
- err er.R
- buf [8]byte
- )
-
- node := &LinkNode{}
-
- if _, err := util.ReadFull(r, buf[:4]); err != nil {
- return nil, err
- }
- node.Network = protocol.BitcoinNet(byteOrder.Uint32(buf[:4]))
-
- var pub [33]byte
- if _, err := util.ReadFull(r, pub[:]); err != nil {
- return nil, err
- }
- node.IdentityPub, err = btcec.ParsePubKey(pub[:], btcec.S256())
- if err != nil {
- return nil, err
- }
-
- if _, err := util.ReadFull(r, buf[:]); err != nil {
- return nil, err
- }
- node.LastSeen = time.Unix(int64(byteOrder.Uint64(buf[:])), 0)
-
- if _, err := util.ReadFull(r, buf[:4]); err != nil {
- return nil, err
- }
- numAddrs := byteOrder.Uint32(buf[:4])
-
- node.Addresses = make([]net.Addr, numAddrs)
- for i := uint32(0); i < numAddrs; i++ {
- addr, err := deserializeAddr(r)
- if err != nil {
- return nil, err
- }
- node.Addresses[i] = addr
- }
-
- return node, nil
-}
diff --git a/lnd/channeldb/nodes_test.go b/lnd/channeldb/nodes_test.go
deleted file mode 100644
index 156c61b4..00000000
--- a/lnd/channeldb/nodes_test.go
+++ /dev/null
@@ -1,140 +0,0 @@
-package channeldb
-
-import (
- "bytes"
- "net"
- "testing"
- "time"
-
- "github.com/pkt-cash/pktd/btcec"
- "github.com/pkt-cash/pktd/wire/protocol"
-)
-
-func TestLinkNodeEncodeDecode(t *testing.T) {
- t.Parallel()
-
- cdb, cleanUp, err := MakeTestDB()
- if err != nil {
- t.Fatalf("unable to make test database: %v", err)
- }
- defer cleanUp()
-
- // First we'll create some initial data to use for populating our test
- // LinkNode instances.
- _, pub1 := btcec.PrivKeyFromBytes(btcec.S256(), key[:])
- _, pub2 := btcec.PrivKeyFromBytes(btcec.S256(), rev[:])
- addr1, errr := net.ResolveTCPAddr("tcp", "10.0.0.1:9000")
- if errr != nil {
- t.Fatalf("unable to create test addr: %v", errr)
- }
- addr2, errr := net.ResolveTCPAddr("tcp", "10.0.0.2:9000")
- if errr != nil {
- t.Fatalf("unable to create test addr: %v", errr)
- }
-
- // Create two fresh link node instances with the above dummy data, then
- // fully sync both instances to disk.
- node1 := cdb.NewLinkNode(protocol.MainNet, pub1, addr1)
- node2 := cdb.NewLinkNode(protocol.TestNet3, pub2, addr2)
- if err := node1.Sync(); err != nil {
- t.Fatalf("unable to sync node: %v", err)
- }
- if err := node2.Sync(); err != nil {
- t.Fatalf("unable to sync node: %v", err)
- }
-
- // Fetch all current link nodes from the database, they should exactly
- // match the two created above.
- originalNodes := []*LinkNode{node2, node1}
- linkNodes, err := cdb.FetchAllLinkNodes()
- if err != nil {
- t.Fatalf("unable to fetch nodes: %v", err)
- }
- for i, node := range linkNodes {
- if originalNodes[i].Network != node.Network {
- t.Fatalf("node networks don't match: expected %v, got %v",
- originalNodes[i].Network, node.Network)
- }
-
- originalPubkey := originalNodes[i].IdentityPub.SerializeCompressed()
- dbPubkey := node.IdentityPub.SerializeCompressed()
- if !bytes.Equal(originalPubkey, dbPubkey) {
- t.Fatalf("node pubkeys don't match: expected %x, got %x",
- originalPubkey, dbPubkey)
- }
- if originalNodes[i].LastSeen.Unix() != node.LastSeen.Unix() {
- t.Fatalf("last seen timestamps don't match: expected %v got %v",
- originalNodes[i].LastSeen.Unix(), node.LastSeen.Unix())
- }
- if originalNodes[i].Addresses[0].String() != node.Addresses[0].String() {
- t.Fatalf("addresses don't match: expected %v, got %v",
- originalNodes[i].Addresses, node.Addresses)
- }
- }
-
- // Next, we'll exercise the methods to append additional IP
- // addresses, and also to update the last seen time.
- if err := node1.UpdateLastSeen(time.Now()); err != nil {
- t.Fatalf("unable to update last seen: %v", err)
- }
- if err := node1.AddAddress(addr2); err != nil {
- t.Fatalf("unable to update addr: %v", err)
- }
-
- // Fetch the same node from the database according to its public key.
- node1DB, err := cdb.FetchLinkNode(pub1)
- if err != nil {
- t.Fatalf("unable to find node: %v", err)
- }
-
- // Both the last seen timestamp and the list of reachable addresses for
- // the node should be updated.
- if node1DB.LastSeen.Unix() != node1.LastSeen.Unix() {
- t.Fatalf("last seen timestamps don't match: expected %v got %v",
- node1.LastSeen.Unix(), node1DB.LastSeen.Unix())
- }
- if len(node1DB.Addresses) != 2 {
- t.Fatalf("wrong length for node1 addresses: expected %v, got %v",
- 2, len(node1DB.Addresses))
- }
- if node1DB.Addresses[0].String() != addr1.String() {
- t.Fatalf("wrong address for node: expected %v, got %v",
- addr1.String(), node1DB.Addresses[0].String())
- }
- if node1DB.Addresses[1].String() != addr2.String() {
- t.Fatalf("wrong address for node: expected %v, got %v",
- addr2.String(), node1DB.Addresses[1].String())
- }
-}
-
-func TestDeleteLinkNode(t *testing.T) {
- t.Parallel()
-
- cdb, cleanUp, err := MakeTestDB()
- if err != nil {
- t.Fatalf("unable to make test database: %v", err)
- }
- defer cleanUp()
-
- _, pubKey := btcec.PrivKeyFromBytes(btcec.S256(), key[:])
- addr := &net.TCPAddr{
- IP: net.ParseIP("127.0.0.1"),
- Port: 1337,
- }
- linkNode := cdb.NewLinkNode(protocol.TestNet3, pubKey, addr)
- if err := linkNode.Sync(); err != nil {
- t.Fatalf("unable to write link node to db: %v", err)
- }
-
- if _, err := cdb.FetchLinkNode(pubKey); err != nil {
- t.Fatalf("unable to find link node: %v", err)
- }
-
- if err := cdb.DeleteLinkNode(pubKey); err != nil {
- t.Fatalf("unable to delete link node from db: %v", err)
- }
-
- if _, err := cdb.FetchLinkNode(pubKey); err == nil {
- t.Fatal("should not have found link node in db, but did")
- }
-}
diff --git a/lnd/channeldb/options.go b/lnd/channeldb/options.go
deleted file mode 100644
index 3103dad7..00000000
--- a/lnd/channeldb/options.go
+++ /dev/null
@@ -1,108 +0,0 @@
-package channeldb
-
-import (
- "time"
-
- "github.com/pkt-cash/pktd/lnd/channeldb/kvdb"
- "github.com/pkt-cash/pktd/lnd/clock"
-)
-
-const (
- // DefaultRejectCacheSize is the default number of rejectCacheEntries to
- // cache for use in the rejection cache of incoming gossip traffic. This
- // produces a cache size of around 1MB.
- DefaultRejectCacheSize = 50000
-
- // DefaultChannelCacheSize is the default number of ChannelEdges cached
- // in order to reply to gossip queries. This produces a cache size of
- // around 40MB.
- DefaultChannelCacheSize = 20000
-)
-
-// Options holds parameters for tuning and customizing a channeldb.DB.
-type Options struct {
- kvdb.BoltBackendConfig
-
- // RejectCacheSize is the maximum number of rejectCacheEntries to hold
- // in the rejection cache.
- RejectCacheSize int
-
- // ChannelCacheSize is the maximum number of ChannelEdges to hold in the
- // channel cache.
- ChannelCacheSize int
-
- // clock is the time source used by the database.
- clock clock.Clock
-
- // dryRun will fail to commit a successful migration when opening the
- // database if set to true.
- dryRun bool
-}
-
-// DefaultOptions returns an Options populated with default values.
-func DefaultOptions() Options {
- return Options{
- BoltBackendConfig: kvdb.BoltBackendConfig{
- NoFreelistSync: true,
- AutoCompact: false,
- AutoCompactMinAge: kvdb.DefaultBoltAutoCompactMinAge,
- },
- RejectCacheSize: DefaultRejectCacheSize,
- ChannelCacheSize: DefaultChannelCacheSize,
- clock: clock.NewDefaultClock(),
- }
-}
-
-// OptionModifier is a function signature for modifying the default Options.
-type OptionModifier func(*Options)
-
-// OptionSetRejectCacheSize sets the RejectCacheSize to n.
-func OptionSetRejectCacheSize(n int) OptionModifier {
- return func(o *Options) {
- o.RejectCacheSize = n
- }
-}
-
-// OptionSetChannelCacheSize sets the ChannelCacheSize to n.
-func OptionSetChannelCacheSize(n int) OptionModifier {
- return func(o *Options) {
- o.ChannelCacheSize = n
- }
-}
-
-// OptionSetSyncFreelist allows the database to sync its freelist.
-func OptionSetSyncFreelist(b bool) OptionModifier {
- return func(o *Options) {
- o.NoFreelistSync = !b
- }
-}
-
-// OptionAutoCompact turns on automatic database compaction on startup.
-func OptionAutoCompact() OptionModifier {
- return func(o *Options) {
- o.AutoCompact = true
- }
-}
-
-// OptionAutoCompactMinAge sets the minimum age for automatic database
-// compaction.
-func OptionAutoCompactMinAge(minAge time.Duration) OptionModifier {
- return func(o *Options) {
- o.AutoCompactMinAge = minAge
- }
-}
-
-// OptionClock sets a non-default clock dependency.
-func OptionClock(clock clock.Clock) OptionModifier {
- return func(o *Options) {
- o.clock = clock
- }
-}
-
-// OptionDryRunMigration controls whether or not to intentially fail to commit a
-// successful migration that occurs when opening the database.
-func OptionDryRunMigration(dryRun bool) OptionModifier {
- return func(o *Options) {
- o.dryRun = dryRun
- }
-}
diff --git a/lnd/channeldb/paginate.go b/lnd/channeldb/paginate.go
deleted file mode 100644
index 1646160d..00000000
--- a/lnd/channeldb/paginate.go
+++ /dev/null
@@ -1,143 +0,0 @@
-package channeldb
-
-import (
- "github.com/pkt-cash/pktd/btcutil/er"
- "github.com/pkt-cash/pktd/lnd/channeldb/kvdb"
-)
-
-type paginator struct {
- // cursor is the cursor which we are using to iterate through a bucket.
- cursor kvdb.RCursor
-
- // reversed indicates whether we are paginating forwards or backwards.
- reversed bool
-
- // indexOffset is the index from which we will begin querying.
- indexOffset uint64
-
- // totalItems is the total number of items we allow in our response.
- totalItems uint64
-}
-
-// newPaginator returns a struct which can be used to query an indexed bucket
-// in pages.
-func newPaginator(c kvdb.RCursor, reversed bool,
- indexOffset, totalItems uint64) paginator {
-
- return paginator{
- cursor: c,
- reversed: reversed,
- indexOffset: indexOffset,
- totalItems: totalItems,
- }
-}
-
-// keyValueForIndex seeks our cursor to a given index and returns the key and
-// value at that position.
-func (p paginator) keyValueForIndex(index uint64) ([]byte, []byte) {
- var keyIndex [8]byte
- byteOrder.PutUint64(keyIndex[:], index)
- return p.cursor.Seek(keyIndex[:])
-}
-
-// lastIndex returns the last value in our index, if our index is empty it
-// returns 0.
-func (p paginator) lastIndex() uint64 {
- keyIndex, _ := p.cursor.Last()
- if keyIndex == nil {
- return 0
- }
-
- return byteOrder.Uint64(keyIndex)
-}
-
-// nextKey is a helper closure to determine what key we should use next when
-// we are iterating, depending on whether we are iterating forwards or in
-// reverse.
-func (p paginator) nextKey() ([]byte, []byte) {
- if p.reversed {
- return p.cursor.Prev()
- }
- return p.cursor.Next()
-}
-
-// cursorStart gets the index key and value for the first item we are looking
-// up, taking into account that we may be paginating in reverse. The index
-// offset provided is *excusive* so we will start with the item after the offset
-// for forwards queries, and the item before the index for backwards queries.
-func (p paginator) cursorStart() ([]byte, []byte) {
- indexKey, indexValue := p.keyValueForIndex(p.indexOffset + 1)
-
- // If the query is specifying reverse iteration, then we must
- // handle a few offset cases.
- if p.reversed {
- switch {
-
- // This indicates the default case, where no offset was
- // specified. In that case we just start from the last
- // entry.
- case p.indexOffset == 0:
- indexKey, indexValue = p.cursor.Last()
-
- // This indicates the offset being set to the very
- // first entry. Since there are no entries before
- // this offset, and the direction is reversed, we can
- // return without adding any invoices to the response.
- case p.indexOffset == 1:
- return nil, nil
-
- // If we have been given an index offset that is beyond our last
- // index value, we just return the last indexed value in our set
- // since we are querying in reverse. We do not cover the case
- // where our index offset equals our last index value, because
- // index offset is exclusive, so we would want to start at the
- // value before our last index.
- case p.indexOffset > p.lastIndex():
- return p.cursor.Last()
-
- // Otherwise we have an index offset which is within our set of
- // indexed keys, and we want to start at the item before our
- // offset. We seek to our index offset, then return the element
- // before it. We do this rather than p.indexOffset-1 to account
- // for indexes that have gaps.
- default:
- p.keyValueForIndex(p.indexOffset)
- indexKey, indexValue = p.cursor.Prev()
- }
- }
-
- return indexKey, indexValue
-}
-
-// query gets the start point for our index offset and iterates through keys
-// in our index until we reach the total number of items required for the query
-// or we run out of cursor values. This function takes a fetchAndAppend function
-// which is responsible for looking up the entry at that index, adding the entry
-// to its set of return items (if desired) and return a boolean which indicates
-// whether the item was added. This is required to allow the paginator to
-// determine when the response has the maximum number of required items.
-func (p paginator) query(fetchAndAppend func(k, v []byte) (bool, er.R)) er.R {
- indexKey, indexValue := p.cursorStart()
-
- var totalItems int
- for ; indexKey != nil; indexKey, indexValue = p.nextKey() {
- // If our current return payload exceeds the max number
- // of invoices, then we'll exit now.
- if uint64(totalItems) >= p.totalItems {
- break
- }
-
- added, err := fetchAndAppend(indexKey, indexValue)
- if err != nil {
- return err
- }
-
- // If we added an item to our set in the latest fetch and append
- // we increment our total count.
- if added {
- totalItems++
- }
- }
-
- return nil
-}
diff --git a/lnd/channeldb/payment_control.go b/lnd/channeldb/payment_control.go
deleted file mode 100644
index 30f2f376..00000000
--- a/lnd/channeldb/payment_control.go
+++ /dev/null
@@ -1,727 +0,0 @@
-package channeldb
-
-import (
- "bytes"
- "encoding/binary"
- "io"
-
- "github.com/pkt-cash/pktd/btcutil/er"
- "github.com/pkt-cash/pktd/lnd/channeldb/kvdb"
- "github.com/pkt-cash/pktd/lnd/lntypes"
-)
-
-var (
- // ErrAlreadyPaid signals we have already paid this payment hash.
- ErrAlreadyPaid = Err.CodeWithDetail("ErrAlreadyPaid", "invoice is already paid")
-
- // ErrPaymentInFlight signals that payment for this payment hash is
- // already "in flight" on the network.
- ErrPaymentInFlight = Err.CodeWithDetail("ErrPaymentInFlight", "payment is in transition")
-
- // ErrPaymentNotInitiated is returned if the payment wasn't initiated.
- ErrPaymentNotInitiated = Err.CodeWithDetail("ErrPaymentNotInitiated", "payment isn't initiated")
-
- // ErrPaymentAlreadySucceeded is returned in the event we attempt to
- // change the status of a payment already succeeded.
- ErrPaymentAlreadySucceeded = Err.CodeWithDetail("ErrPaymentAlreadySucceeded", "payment is already succeeded")
-
- // ErrPaymentAlreadyFailed is returned in the event we attempt to alter
- // a failed payment.
- ErrPaymentAlreadyFailed = Err.CodeWithDetail("ErrPaymentAlreadyFailed", "payment has already failed")
-
- // ErrUnknownPaymentStatus is returned when we do not recognize the
- // existing state of a payment.
- ErrUnknownPaymentStatus = Err.CodeWithDetail("ErrUnknownPaymentStatus", "unknown payment status")
-
- // ErrPaymentTerminal is returned if we attempt to alter a payment that
- // already has reached a terminal condition.
- ErrPaymentTerminal = Err.CodeWithDetail("ErrPaymentTerminal", "payment has reached terminal condition")
-
- // ErrAttemptAlreadySettled is returned if we try to alter an already
- // settled HTLC attempt.
- ErrAttemptAlreadySettled = Err.CodeWithDetail("ErrAttemptAlreadySettled", "attempt already settled")
-
- // ErrAttemptAlreadyFailed is returned if we try to alter an already
- // failed HTLC attempt.
- ErrAttemptAlreadyFailed = Err.CodeWithDetail("ErrAttemptAlreadyFailed", "attempt already failed")
-
- // ErrValueMismatch is returned if we try to register a non-MPP attempt
- // with an amount that doesn't match the payment amount.
- ErrValueMismatch = Err.CodeWithDetail("ErrValueMismatch",
- "attempted value doesn't match payment amount")
-
- // ErrValueExceedsAmt is returned if we try to register an attempt that
- // would take the total sent amount above the payment amount.
- ErrValueExceedsAmt = Err.CodeWithDetail("ErrValueExceedsAmt",
- "attempted value exceeds payment amount")
-
- // ErrNonMPPayment is returned if we try to register an MPP attempt for
- // a payment that already has a non-MPP attempt regitered.
- ErrNonMPPayment = Err.CodeWithDetail("ErrNonMPPayment", "payment has non-MPP attempts")
-
- // ErrMPPayment is returned if we try to register a non-MPP attempt for
- // a payment that already has an MPP attempt regitered.
- ErrMPPayment = Err.CodeWithDetail("ErrMPPayment", "payment has MPP attempts")
-
- // ErrMPPPaymentAddrMismatch is returned if we try to register an MPP
- // shard where the payment address doesn't match existing shards.
- ErrMPPPaymentAddrMismatch = Err.CodeWithDetail("ErrMPPPaymentAddrMismatch", "payment address mismatch")
-
- // ErrMPPTotalAmountMismatch is returned if we try to register an MPP
- // shard where the total amount doesn't match existing shards.
- ErrMPPTotalAmountMismatch = Err.CodeWithDetail("ErrMPPTotalAmountMismatch", "mp payment total amount mismatch")
-
- // errNoAttemptInfo is returned when no attempt info is stored yet.
- errNoAttemptInfo = Err.CodeWithDetail("errNoAttemptInfo", "unable to find attempt info for "+
- "inflight payment")
-
- // errNoSequenceNrIndex is returned when an attempt to lookup a payment
- // index is made for a sequence number that is not indexed.
- errNoSequenceNrIndex = Err.CodeWithDetail("errNoSequenceNrIndex", "payment sequence number index "+
- "does not exist")
-)
-
-// PaymentControl implements persistence for payments and payment attempts.
-type PaymentControl struct {
- db *DB
-}
-
-// NewPaymentControl creates a new instance of the PaymentControl.
-func NewPaymentControl(db *DB) *PaymentControl {
- return &PaymentControl{
- db: db,
- }
-}
-
-// InitPayment checks or records the given PaymentCreationInfo with the DB,
-// making sure it does not already exist as an in-flight payment. When this
-// method returns successfully, the payment is guranteeed to be in the InFlight
-// state.
-func (p *PaymentControl) InitPayment(paymentHash lntypes.Hash,
- info *PaymentCreationInfo) er.R {
-
- var b bytes.Buffer
- if err := serializePaymentCreationInfo(&b, info); err != nil {
- return err
- }
- infoBytes := b.Bytes()
-
- var updateErr er.R
- err := kvdb.Batch(p.db.Backend, func(tx kvdb.RwTx) er.R {
- // Reset the update error, to avoid carrying over an error
- // from a previous execution of the batched db transaction.
- updateErr = nil
-
- bucket, err := createPaymentBucket(tx, paymentHash)
- if err != nil {
- return err
- }
-
- // Get the existing status of this payment, if any.
- paymentStatus, err := fetchPaymentStatus(bucket)
- if err != nil {
- return err
- }
-
- switch paymentStatus {
-
- // We allow retrying failed payments.
- case StatusFailed:
-
- // This is a new payment that is being initialized for the
- // first time.
- case StatusUnknown:
-
- // We already have an InFlight payment on the network. We will
- // disallow any new payments.
- case StatusInFlight:
- updateErr = ErrPaymentInFlight.Default()
- return nil
-
- // We've already succeeded a payment to this payment hash,
- // forbid the switch from sending another.
- case StatusSucceeded:
- updateErr = ErrAlreadyPaid.Default()
- return nil
-
- default:
- updateErr = ErrUnknownPaymentStatus.Default()
- return nil
- }
-
- // Obtain a new sequence number for this payment. This is used
- // to sort the payments in order of creation, and also acts as
- // a unique identifier for each payment.
- sequenceNum, err := nextPaymentSequence(tx)
- if err != nil {
- return err
- }
-
- // Before we set our new sequence number, we check whether this
- // payment has a previously set sequence number and remove its
- // index entry if it exists. This happens in the case where we
- // have a previously attempted payment which was left in a state
- // where we can retry.
- seqBytes := bucket.Get(paymentSequenceKey)
- if seqBytes != nil {
- indexBucket := tx.ReadWriteBucket(paymentsIndexBucket)
- if err := indexBucket.Delete(seqBytes); err != nil {
- return err
- }
- }
-
- // Once we have obtained a sequence number, we add an entry
- // to our index bucket which will map the sequence number to
- // our payment hash.
- err = createPaymentIndexEntry(tx, sequenceNum, info.PaymentHash)
- if err != nil {
- return err
- }
-
- err = bucket.Put(paymentSequenceKey, sequenceNum)
- if err != nil {
- return err
- }
-
- // Add the payment info to the bucket, which contains the
- // static information for this payment
- err = bucket.Put(paymentCreationInfoKey, infoBytes)
- if err != nil {
- return err
- }
-
- // We'll delete any lingering HTLCs to start with, in case we
- // are initializing a payment that was attempted earlier, but
- // left in a state where we could retry.
- if err := bucket.DeleteNestedBucket(paymentHtlcsBucket); err != nil && !kvdb.ErrBucketNotFound.Is(err) {
- return err
- }
-
- // Also delete any lingering failure info now that we are
- // re-attempting.
- return bucket.Delete(paymentFailInfoKey)
- })
- if err != nil {
- return err
- }
-
- return updateErr
-}
-
-// paymentIndexTypeHash is a payment index type which indicates that we have
-// created an index of payment sequence number to payment hash.
-type paymentIndexType uint8
-
-// paymentIndexTypeHash is a payment index type which indicates that we have
-// created an index of payment sequence number to payment hash.
-const paymentIndexTypeHash paymentIndexType = 0
-
-// createPaymentIndexEntry creates a payment hash typed index for a payment. The
-// index produced contains a payment index type (which can be used in future to
-// signal different payment index types) and the payment hash.
-func createPaymentIndexEntry(tx kvdb.RwTx, sequenceNumber []byte,
- hash lntypes.Hash) er.R {
-
- var b bytes.Buffer
- if err := WriteElements(&b, paymentIndexTypeHash, hash[:]); err != nil {
- return err
- }
-
- indexes := tx.ReadWriteBucket(paymentsIndexBucket)
- return indexes.Put(sequenceNumber, b.Bytes())
-}
-
-// deserializePaymentIndex deserializes a payment index entry. This function
-// currently only supports deserialization of payment hash indexes, and will
-// fail for other types.
-func deserializePaymentIndex(r io.Reader) (lntypes.Hash, er.R) {
- var (
- indexType paymentIndexType
- paymentHash []byte
- )
-
- if err := ReadElements(r, &indexType, &paymentHash); err != nil {
- return lntypes.Hash{}, err
- }
-
- // While we only have on payment index type, we do not need to use our
- // index type to deserialize the index. However, we sanity check that
- // this type is as expected, since we had to read it out anyway.
- if indexType != paymentIndexTypeHash {
- return lntypes.Hash{}, er.Errorf("unknown payment index "+
- "type: %v", indexType)
- }
-
- hash, err := lntypes.MakeHash(paymentHash)
- if err != nil {
- return lntypes.Hash{}, err
- }
-
- return hash, nil
-}
-
-// RegisterAttempt atomically records the provided HTLCAttemptInfo to the
-// DB.
-func (p *PaymentControl) RegisterAttempt(paymentHash lntypes.Hash,
- attempt *HTLCAttemptInfo) (*MPPayment, er.R) {
-
- // Serialize the information before opening the db transaction.
- var a bytes.Buffer
- err := serializeHTLCAttemptInfo(&a, attempt)
- if err != nil {
- return nil, err
- }
- htlcInfoBytes := a.Bytes()
-
- htlcIDBytes := make([]byte, 8)
- binary.BigEndian.PutUint64(htlcIDBytes, attempt.AttemptID)
-
- var payment *MPPayment
- err = kvdb.Batch(p.db.Backend, func(tx kvdb.RwTx) er.R {
- bucket, err := fetchPaymentBucketUpdate(tx, paymentHash)
- if err != nil {
- return err
- }
-
- p, err := fetchPayment(bucket)
- if err != nil {
- return err
- }
-
- // Ensure the payment is in-flight.
- if err := ensureInFlight(p); err != nil {
- return err
- }
-
- // We cannot register a new attempt if the payment already has
- // reached a terminal condition:
- settle, fail := p.TerminalInfo()
- if settle != nil || fail != nil {
- return ErrPaymentTerminal.Default()
- }
-
- // Make sure any existing shards match the new one with regards
- // to MPP options.
- mpp := attempt.Route.FinalHop().MPP
- for _, h := range p.InFlightHTLCs() {
- hMpp := h.Route.FinalHop().MPP
-
- switch {
-
- // We tried to register a non-MPP attempt for a MPP
- // payment.
- case mpp == nil && hMpp != nil:
- return ErrMPPayment.Default()
-
- // We tried to register a MPP shard for a non-MPP
- // payment.
- case mpp != nil && hMpp == nil:
- return ErrNonMPPayment.Default()
-
- // Non-MPP payment, nothing more to validate.
- case mpp == nil:
- continue
- }
-
- // Check that MPP options match.
- if mpp.PaymentAddr() != hMpp.PaymentAddr() {
- return ErrMPPPaymentAddrMismatch.Default()
- }
-
- if mpp.TotalMsat() != hMpp.TotalMsat() {
- return ErrMPPTotalAmountMismatch.Default()
- }
- }
-
- // If this is a non-MPP attempt, it must match the total amount
- // exactly.
- amt := attempt.Route.ReceiverAmt()
- if mpp == nil && amt != p.Info.Value {
- return ErrValueMismatch.Default()
- }
-
- // Ensure we aren't sending more than the total payment amount.
- sentAmt, _ := p.SentAmt()
- if sentAmt+amt > p.Info.Value {
- return ErrValueExceedsAmt.Default()
- }
-
- htlcsBucket, err := bucket.CreateBucketIfNotExists(
- paymentHtlcsBucket,
- )
- if err != nil {
- return err
- }
-
- // Create bucket for this attempt. Fail if the bucket already
- // exists.
- htlcBucket, err := htlcsBucket.CreateBucket(htlcIDBytes)
- if err != nil {
- return err
- }
-
- err = htlcBucket.Put(htlcAttemptInfoKey, htlcInfoBytes)
- if err != nil {
- return err
- }
-
- // Retrieve attempt info for the notification.
- payment, err = fetchPayment(bucket)
- return err
- })
- if err != nil {
- return nil, err
- }
-
- return payment, err
-}
-
-// SettleAttempt marks the given attempt settled with the preimage. If this is
-// a multi shard payment, this might implicitly mean that the full payment
-// succeeded.
-//
-// After invoking this method, InitPayment should always return an error to
-// prevent us from making duplicate payments to the same payment hash. The
-// provided preimage is atomically saved to the DB for record keeping.
-func (p *PaymentControl) SettleAttempt(hash lntypes.Hash,
- attemptID uint64, settleInfo *HTLCSettleInfo) (*MPPayment, er.R) {
-
- var b bytes.Buffer
- if err := serializeHTLCSettleInfo(&b, settleInfo); err != nil {
- return nil, err
- }
- settleBytes := b.Bytes()
-
- return p.updateHtlcKey(hash, attemptID, htlcSettleInfoKey, settleBytes)
-}
-
-// FailAttempt marks the given payment attempt failed.
-func (p *PaymentControl) FailAttempt(hash lntypes.Hash,
- attemptID uint64, failInfo *HTLCFailInfo) (*MPPayment, er.R) {
-
- var b bytes.Buffer
- if err := serializeHTLCFailInfo(&b, failInfo); err != nil {
- return nil, err
- }
- failBytes := b.Bytes()
-
- return p.updateHtlcKey(hash, attemptID, htlcFailInfoKey, failBytes)
-}
-
-// updateHtlcKey updates a database key for the specified htlc.
-func (p *PaymentControl) updateHtlcKey(paymentHash lntypes.Hash,
- attemptID uint64, key, value []byte) (*MPPayment, er.R) {
-
- htlcIDBytes := make([]byte, 8)
- binary.BigEndian.PutUint64(htlcIDBytes, attemptID)
-
- var payment *MPPayment
- err := kvdb.Batch(p.db.Backend, func(tx kvdb.RwTx) er.R {
- payment = nil
-
- bucket, err := fetchPaymentBucketUpdate(tx, paymentHash)
- if err != nil {
- return err
- }
-
- p, err := fetchPayment(bucket)
- if err != nil {
- return err
- }
-
- // We can only update keys of in-flight payments. We allow
- // updating keys even if the payment has reached a terminal
- // condition, since the HTLC outcomes must still be updated.
- if err := ensureInFlight(p); err != nil {
- return err
- }
-
- htlcsBucket := bucket.NestedReadWriteBucket(paymentHtlcsBucket)
- if htlcsBucket == nil {
- return er.Errorf("htlcs bucket not found")
- }
-
- htlcBucket := htlcsBucket.NestedReadWriteBucket(htlcIDBytes)
- if htlcBucket == nil {
- return er.Errorf("HTLC with ID %v not registered",
- attemptID)
- }
-
- // Make sure the shard is not already failed or settled.
- if htlcBucket.Get(htlcFailInfoKey) != nil {
- return ErrAttemptAlreadyFailed.Default()
- }
-
- if htlcBucket.Get(htlcSettleInfoKey) != nil {
- return ErrAttemptAlreadySettled.Default()
- }
-
- // Add or update the key for this htlc.
- err = htlcBucket.Put(key, value)
- if err != nil {
- return err
- }
-
- // Retrieve attempt info for the notification.
- payment, err = fetchPayment(bucket)
- return err
- })
- if err != nil {
- return nil, err
- }
-
- return payment, err
-}
-
-// Fail transitions a payment into the Failed state, and records the reason the
-// payment failed. After invoking this method, InitPayment should return nil on
-// its next call for this payment hash, allowing the switch to make a
-// subsequent payment.
-func (p *PaymentControl) Fail(paymentHash lntypes.Hash,
- reason FailureReason) (*MPPayment, er.R) {
-
- var (
- updateErr er.R
- payment *MPPayment
- )
- err := kvdb.Batch(p.db.Backend, func(tx kvdb.RwTx) er.R {
- // Reset the update error, to avoid carrying over an error
- // from a previous execution of the batched db transaction.
- updateErr = nil
- payment = nil
-
- bucket, err := fetchPaymentBucketUpdate(tx, paymentHash)
- if ErrPaymentNotInitiated.Is(err) {
- updateErr = ErrPaymentNotInitiated.Default()
- return nil
- } else if err != nil {
- return err
- }
-
- // We mark the payent as failed as long as it is known. This
- // lets the last attempt to fail with a terminal write its
- // failure to the PaymentControl without synchronizing with
- // other attempts.
- paymentStatus, err := fetchPaymentStatus(bucket)
- if err != nil {
- return err
- }
-
- if paymentStatus == StatusUnknown {
- updateErr = ErrPaymentNotInitiated.Default()
- return nil
- }
-
- // Put the failure reason in the bucket for record keeping.
- v := []byte{byte(reason)}
- err = bucket.Put(paymentFailInfoKey, v)
- if err != nil {
- return err
- }
-
- // Retrieve attempt info for the notification, if available.
- payment, err = fetchPayment(bucket)
- if err != nil {
- return err
- }
-
- return nil
- })
- if err != nil {
- return nil, err
- }
-
- return payment, updateErr
-}
-
-// FetchPayment returns information about a payment from the database.
-func (p *PaymentControl) FetchPayment(paymentHash lntypes.Hash) (
- *MPPayment, er.R) {
-
- var payment *MPPayment
- err := kvdb.View(p.db, func(tx kvdb.RTx) er.R {
- bucket, err := fetchPaymentBucket(tx, paymentHash)
- if err != nil {
- return err
- }
-
- payment, err = fetchPayment(bucket)
-
- return err
- }, func() {
- payment = nil
- })
- if err != nil {
- return nil, err
- }
-
- return payment, nil
-}
-
-// createPaymentBucket creates or fetches the sub-bucket assigned to this
-// payment hash.
-func createPaymentBucket(tx kvdb.RwTx, paymentHash lntypes.Hash) (
- kvdb.RwBucket, er.R) {
-
- payments, err := tx.CreateTopLevelBucket(paymentsRootBucket)
- if err != nil {
- return nil, err
- }
-
- return payments.CreateBucketIfNotExists(paymentHash[:])
-}
-
-// fetchPaymentBucket fetches the sub-bucket assigned to this payment hash. If
-// the bucket does not exist, it returns ErrPaymentNotInitiated.
-func fetchPaymentBucket(tx kvdb.RTx, paymentHash lntypes.Hash) (
- kvdb.RBucket, er.R) {
-
- payments := tx.ReadBucket(paymentsRootBucket)
- if payments == nil {
- return nil, ErrPaymentNotInitiated.Default()
- }
-
- bucket := payments.NestedReadBucket(paymentHash[:])
- if bucket == nil {
- return nil, ErrPaymentNotInitiated.Default()
- }
-
- return bucket, nil
-
-}
-
-// fetchPaymentBucketUpdate is identical to fetchPaymentBucket, but it returns a
-// bucket that can be written to.
-func fetchPaymentBucketUpdate(tx kvdb.RwTx, paymentHash lntypes.Hash) (
- kvdb.RwBucket, er.R) {
-
- payments := tx.ReadWriteBucket(paymentsRootBucket)
- if payments == nil {
- return nil, ErrPaymentNotInitiated.Default()
- }
-
- bucket := payments.NestedReadWriteBucket(paymentHash[:])
- if bucket == nil {
- return nil, ErrPaymentNotInitiated.Default()
- }
-
- return bucket, nil
-}
-
-// nextPaymentSequence returns the next sequence number to store for a new
-// payment.
-func nextPaymentSequence(tx kvdb.RwTx) ([]byte, er.R) {
- payments, err := tx.CreateTopLevelBucket(paymentsRootBucket)
- if err != nil {
- return nil, err
- }
-
- seq, errr := payments.NextSequence()
- if errr != nil {
- return nil, errr
- }
-
- b := make([]byte, 8)
- binary.BigEndian.PutUint64(b, seq)
- return b, nil
-}
-
-// fetchPaymentStatus fetches the payment status of the payment. If the payment
-// isn't found, it will default to "StatusUnknown".
-func fetchPaymentStatus(bucket kvdb.RBucket) (PaymentStatus, er.R) {
- // Creation info should be set for all payments, regardless of state.
- // If not, it is unknown.
- if bucket.Get(paymentCreationInfoKey) == nil {
- return StatusUnknown, nil
- }
-
- payment, err := fetchPayment(bucket)
- if err != nil {
- return 0, err
- }
-
- return payment.Status, nil
-}
-
-// ensureInFlight checks whether the payment found in the given bucket has
-// status InFlight, and returns an error otherwise. This should be used to
-// ensure we only mark in-flight payments as succeeded or failed.
-func ensureInFlight(payment *MPPayment) er.R {
- paymentStatus := payment.Status
-
- switch {
-
- // The payment was indeed InFlight.
- case paymentStatus == StatusInFlight:
- return nil
-
- // Our records show the payment as unknown, meaning it never
- // should have left the switch.
- case paymentStatus == StatusUnknown:
- return ErrPaymentNotInitiated.Default()
-
- // The payment succeeded previously.
- case paymentStatus == StatusSucceeded:
- return ErrPaymentAlreadySucceeded.Default()
-
- // The payment was already failed.
- case paymentStatus == StatusFailed:
- return ErrPaymentAlreadyFailed.Default()
-
- default:
- return ErrUnknownPaymentStatus.Default()
- }
-}
-
-// InFlightPayment is a wrapper around the info for a payment that has status
-// InFlight.
-type InFlightPayment struct {
- // Info is the PaymentCreationInfo of the in-flight payment.
- Info *PaymentCreationInfo
-}
-
-// FetchInFlightPayments returns all payments with status InFlight.
-func (p *PaymentControl) FetchInFlightPayments() ([]*InFlightPayment, er.R) {
- var inFlights []*InFlightPayment
- err := kvdb.View(p.db, func(tx kvdb.RTx) er.R {
- payments := tx.ReadBucket(paymentsRootBucket)
- if payments == nil {
- return nil
- }
-
- return payments.ForEach(func(k, _ []byte) er.R {
- bucket := payments.NestedReadBucket(k)
- if bucket == nil {
- return er.Errorf("non bucket element")
- }
-
- // If the status is not InFlight, we can return early.
- paymentStatus, err := fetchPaymentStatus(bucket)
- if err != nil {
- return err
- }
-
- if paymentStatus != StatusInFlight {
- return nil
- }
-
- inFlight := &InFlightPayment{}
-
- // Get the CreationInfo.
- inFlight.Info, err = fetchCreationInfo(bucket)
- if err != nil {
- return err
- }
-
- inFlights = append(inFlights, inFlight)
- return nil
- })
- }, func() {
- inFlights = nil
- })
- if err != nil {
- return nil, err
- }
-
- return inFlights, nil
-}
diff --git a/lnd/channeldb/payment_control_test.go b/lnd/channeldb/payment_control_test.go
deleted file mode 100644
index d1f16567..00000000
--- a/lnd/channeldb/payment_control_test.go
+++ /dev/null
@@ -1,1022 +0,0 @@
-package channeldb
-
-import (
- "bytes"
- "crypto/rand"
- "crypto/sha256"
- "fmt"
- "reflect"
- "testing"
- "time"
-
- "github.com/davecgh/go-spew/spew"
- "github.com/pkt-cash/pktd/btcutil/er"
- "github.com/pkt-cash/pktd/btcutil/util"
- "github.com/pkt-cash/pktd/lnd/channeldb/kvdb"
- "github.com/pkt-cash/pktd/lnd/lntypes"
- "github.com/pkt-cash/pktd/lnd/record"
- "github.com/pkt-cash/pktd/pktwallet/walletdb"
- "github.com/stretchr/testify/assert"
- "github.com/stretchr/testify/require"
-)
-
-func genPreimage() ([32]byte, er.R) {
- var preimage [32]byte
- if _, err := util.ReadFull(rand.Reader, preimage[:]); err != nil {
- return preimage, err
- }
- return preimage, nil
-}
-
-func genInfo() (*PaymentCreationInfo, *HTLCAttemptInfo,
- lntypes.Preimage, er.R) {
-
- preimage, err := genPreimage()
- if err != nil {
- return nil, nil, preimage, er.Errorf("unable to "+
- "generate preimage: %v", err)
- }
-
- rhash := sha256.Sum256(preimage[:])
- return &PaymentCreationInfo{
- PaymentHash: rhash,
- Value: testRoute.ReceiverAmt(),
- CreationTime: time.Unix(time.Now().Unix(), 0),
- PaymentRequest: []byte("hola"),
- },
- &HTLCAttemptInfo{
- AttemptID: 0,
- SessionKey: priv,
- Route: *testRoute.Copy(),
- }, preimage, nil
-}
-
-// TestPaymentControlSwitchFail checks that payment status returns to Failed
-// status after failing, and that InitPayment allows another HTLC for the
-// same payment hash.
-func TestPaymentControlSwitchFail(t *testing.T) {
- t.Parallel()
-
- db, cleanup, err := MakeTestDB()
- defer cleanup()
- if err != nil {
- t.Fatalf("unable to init db: %v", err)
- }
-
- pControl := NewPaymentControl(db)
-
- info, attempt, preimg, err := genInfo()
- if err != nil {
- t.Fatalf("unable to generate htlc message: %v", err)
- }
-
- // Sends base htlc message which initiate StatusInFlight.
- err = pControl.InitPayment(info.PaymentHash, info)
- if err != nil {
- t.Fatalf("unable to send htlc message: %v", err)
- }
-
- assertPaymentIndex(t, pControl, info.PaymentHash)
- assertPaymentStatus(t, pControl, info.PaymentHash, StatusInFlight)
- assertPaymentInfo(
- t, pControl, info.PaymentHash, info, nil, nil,
- )
-
- // Fail the payment, which should moved it to Failed.
- failReason := FailureReasonNoRoute
- _, err = pControl.Fail(info.PaymentHash, failReason)
- if err != nil {
- t.Fatalf("unable to fail payment hash: %v", err)
- }
-
- // Verify the status is indeed Failed.
- assertPaymentStatus(t, pControl, info.PaymentHash, StatusFailed)
- assertPaymentInfo(
- t, pControl, info.PaymentHash, info, &failReason, nil,
- )
-
- // Lookup the payment so we can get its old sequence number before it is
- // overwritten.
- payment, err := pControl.FetchPayment(info.PaymentHash)
- util.RequireNoErr(t, err)
-
- // Sends the htlc again, which should succeed since the prior payment
- // failed.
- err = pControl.InitPayment(info.PaymentHash, info)
- if err != nil {
- t.Fatalf("unable to send htlc message: %v", err)
- }
-
- // Check that our index has been updated, and the old index has been
- // removed.
- assertPaymentIndex(t, pControl, info.PaymentHash)
- assertNoIndex(t, pControl, payment.SequenceNum)
-
- assertPaymentStatus(t, pControl, info.PaymentHash, StatusInFlight)
- assertPaymentInfo(
- t, pControl, info.PaymentHash, info, nil, nil,
- )
-
- // Record a new attempt. In this test scenario, the attempt fails.
- // However, this is not communicated to control tower in the current
- // implementation. It only registers the initiation of the attempt.
- _, err = pControl.RegisterAttempt(info.PaymentHash, attempt)
- if err != nil {
- t.Fatalf("unable to register attempt: %v", err)
- }
-
- htlcReason := HTLCFailUnreadable
- _, err = pControl.FailAttempt(
- info.PaymentHash, attempt.AttemptID,
- &HTLCFailInfo{
- Reason: htlcReason,
- },
- )
- if err != nil {
- t.Fatal(err)
- }
- assertPaymentStatus(t, pControl, info.PaymentHash, StatusInFlight)
-
- htlc := &htlcStatus{
- HTLCAttemptInfo: attempt,
- failure: &htlcReason,
- }
-
- assertPaymentInfo(t, pControl, info.PaymentHash, info, nil, htlc)
-
- // Record another attempt.
- attempt.AttemptID = 1
- _, err = pControl.RegisterAttempt(info.PaymentHash, attempt)
- if err != nil {
- t.Fatalf("unable to send htlc message: %v", err)
- }
- assertPaymentStatus(t, pControl, info.PaymentHash, StatusInFlight)
-
- htlc = &htlcStatus{
- HTLCAttemptInfo: attempt,
- }
-
- assertPaymentInfo(
- t, pControl, info.PaymentHash, info, nil, htlc,
- )
-
- // Settle the attempt and verify that status was changed to
- // StatusSucceeded.
- payment, err = pControl.SettleAttempt(
- info.PaymentHash, attempt.AttemptID,
- &HTLCSettleInfo{
- Preimage: preimg,
- },
- )
- if err != nil {
- t.Fatalf("error shouldn't have been received, got: %v", err)
- }
-
- if len(payment.HTLCs) != 2 {
- t.Fatalf("payment should have two htlcs, got: %d",
- len(payment.HTLCs))
- }
-
- err = assertRouteEqual(&payment.HTLCs[0].Route, &attempt.Route)
- if err != nil {
- t.Fatalf("unexpected route returned: %v vs %v: %v",
- spew.Sdump(attempt.Route),
- spew.Sdump(payment.HTLCs[0].Route), err)
- }
-
- assertPaymentStatus(t, pControl, info.PaymentHash, StatusSucceeded)
-
- htlc.settle = &preimg
- assertPaymentInfo(
- t, pControl, info.PaymentHash, info, nil, htlc,
- )
-
- // Attempt a final payment, which should now fail since the prior
- // payment succeed.
- err = pControl.InitPayment(info.PaymentHash, info)
- if !ErrAlreadyPaid.Is(err) {
- t.Fatalf("unable to send htlc message: %v", err)
- }
-}
-
-// TestPaymentControlSwitchDoubleSend checks the ability of payment control to
-// prevent double sending of htlc message, when message is in StatusInFlight.
-func TestPaymentControlSwitchDoubleSend(t *testing.T) {
- t.Parallel()
-
- db, cleanup, err := MakeTestDB()
- defer cleanup()
-
- if err != nil {
- t.Fatalf("unable to init db: %v", err)
- }
-
- pControl := NewPaymentControl(db)
-
- info, attempt, preimg, err := genInfo()
- if err != nil {
- t.Fatalf("unable to generate htlc message: %v", err)
- }
-
- // Sends base htlc message which initiate base status and move it to
- // StatusInFlight and verifies that it was changed.
- err = pControl.InitPayment(info.PaymentHash, info)
- if err != nil {
- t.Fatalf("unable to send htlc message: %v", err)
- }
-
- assertPaymentIndex(t, pControl, info.PaymentHash)
- assertPaymentStatus(t, pControl, info.PaymentHash, StatusInFlight)
- assertPaymentInfo(
- t, pControl, info.PaymentHash, info, nil, nil,
- )
-
- // Try to initiate double sending of htlc message with the same
- // payment hash, should result in error indicating that payment has
- // already been sent.
- err = pControl.InitPayment(info.PaymentHash, info)
- if !ErrPaymentInFlight.Is(err) {
- t.Fatalf("payment control wrong behaviour: " +
- "double sending must trigger ErrPaymentInFlight error")
- }
-
- // Record an attempt.
- _, err = pControl.RegisterAttempt(info.PaymentHash, attempt)
- if err != nil {
- t.Fatalf("unable to send htlc message: %v", err)
- }
- assertPaymentStatus(t, pControl, info.PaymentHash, StatusInFlight)
-
- htlc := &htlcStatus{
- HTLCAttemptInfo: attempt,
- }
- assertPaymentInfo(
- t, pControl, info.PaymentHash, info, nil, htlc,
- )
-
- // Sends base htlc message which initiate StatusInFlight.
- err = pControl.InitPayment(info.PaymentHash, info)
- if !ErrPaymentInFlight.Is(err) {
- t.Fatalf("payment control wrong behaviour: " +
- "double sending must trigger ErrPaymentInFlight error")
- }
-
- // After settling, the error should be ErrAlreadyPaid.
- _, err = pControl.SettleAttempt(
- info.PaymentHash, attempt.AttemptID,
- &HTLCSettleInfo{
- Preimage: preimg,
- },
- )
- if err != nil {
- t.Fatalf("error shouldn't have been received, got: %v", err)
- }
- assertPaymentStatus(t, pControl, info.PaymentHash, StatusSucceeded)
-
- htlc.settle = &preimg
- assertPaymentInfo(t, pControl, info.PaymentHash, info, nil, htlc)
-
- err = pControl.InitPayment(info.PaymentHash, info)
- if !ErrAlreadyPaid.Is(err) {
- t.Fatalf("unable to send htlc message: %v", err)
- }
-}
-
-// TestPaymentControlSuccessesWithoutInFlight checks that the payment
-// control will disallow calls to Success when no payment is in flight.
-func TestPaymentControlSuccessesWithoutInFlight(t *testing.T) {
- t.Parallel()
-
- db, cleanup, err := MakeTestDB()
- defer cleanup()
-
- if err != nil {
- t.Fatalf("unable to init db: %v", err)
- }
-
- pControl := NewPaymentControl(db)
-
- info, _, preimg, err := genInfo()
- if err != nil {
- t.Fatalf("unable to generate htlc message: %v", err)
- }
-
- // Attempt to complete the payment should fail.
- _, err = pControl.SettleAttempt(
- info.PaymentHash, 0,
- &HTLCSettleInfo{
- Preimage: preimg,
- },
- )
- if !ErrPaymentNotInitiated.Is(err) {
- t.Fatalf("expected ErrPaymentNotInitiated, got %v", err)
- }
-
- assertPaymentStatus(t, pControl, info.PaymentHash, StatusUnknown)
-}
-
-// TestPaymentControlFailsWithoutInFlight checks that a strict payment
-// control will disallow calls to Fail when no payment is in flight.
-func TestPaymentControlFailsWithoutInFlight(t *testing.T) {
- t.Parallel()
-
- db, cleanup, err := MakeTestDB()
- defer cleanup()
-
- if err != nil {
- t.Fatalf("unable to init db: %v", err)
- }
-
- pControl := NewPaymentControl(db)
-
- info, _, _, err := genInfo()
- if err != nil {
- t.Fatalf("unable to generate htlc message: %v", err)
- }
-
- // Calling Fail should return an error.
- _, err = pControl.Fail(info.PaymentHash, FailureReasonNoRoute)
- if !ErrPaymentNotInitiated.Is(err) {
- t.Fatalf("expected ErrPaymentNotInitiated, got %v", err)
- }
-
- assertPaymentStatus(t, pControl, info.PaymentHash, StatusUnknown)
-}
-
-// TestPaymentControlDeleteNonInFlight checks that calling DeletePayments only
-// deletes payments from the database that are not in-flight.
-func TestPaymentControlDeleteNonInFligt(t *testing.T) {
- t.Parallel()
-
- db, cleanup, err := MakeTestDB()
- defer cleanup()
-
- if err != nil {
- t.Fatalf("unable to init db: %v", err)
- }
-
- // Create a sequence number for duplicate payments that will not collide
- // with the sequence numbers for the payments we create. These values
- // start at 1, so 9999 is a safe bet for this test.
- var duplicateSeqNr = 9999
-
- pControl := NewPaymentControl(db)
-
- payments := []struct {
- failed bool
- success bool
- hasDuplicate bool
- }{
- {
- failed: true,
- success: false,
- hasDuplicate: false,
- },
- {
- failed: false,
- success: true,
- hasDuplicate: false,
- },
- {
- failed: false,
- success: false,
- hasDuplicate: false,
- },
- {
- failed: false,
- success: true,
- hasDuplicate: true,
- },
- }
-
- for _, p := range payments {
- info, attempt, preimg, err := genInfo()
- if err != nil {
- t.Fatalf("unable to generate htlc message: %v", err)
- }
-
- // Sends base htlc message which initiate StatusInFlight.
- err = pControl.InitPayment(info.PaymentHash, info)
- if err != nil {
- t.Fatalf("unable to send htlc message: %v", err)
- }
- _, err = pControl.RegisterAttempt(info.PaymentHash, attempt)
- if err != nil {
- t.Fatalf("unable to send htlc message: %v", err)
- }
-
- htlc := &htlcStatus{
- HTLCAttemptInfo: attempt,
- }
-
- if p.failed {
- // Fail the payment attempt.
- htlcFailure := HTLCFailUnreadable
- _, err := pControl.FailAttempt(
- info.PaymentHash, attempt.AttemptID,
- &HTLCFailInfo{
- Reason: htlcFailure,
- },
- )
- if err != nil {
- t.Fatalf("unable to fail htlc: %v", err)
- }
-
- // Fail the payment, which should moved it to Failed.
- failReason := FailureReasonNoRoute
- _, err = pControl.Fail(info.PaymentHash, failReason)
- if err != nil {
- t.Fatalf("unable to fail payment hash: %v", err)
- }
-
- // Verify the status is indeed Failed.
- assertPaymentStatus(t, pControl, info.PaymentHash, StatusFailed)
-
- htlc.failure = &htlcFailure
- assertPaymentInfo(
- t, pControl, info.PaymentHash, info,
- &failReason, htlc,
- )
- } else if p.success {
- // Verifies that status was changed to StatusSucceeded.
- _, err := pControl.SettleAttempt(
- info.PaymentHash, attempt.AttemptID,
- &HTLCSettleInfo{
- Preimage: preimg,
- },
- )
- if err != nil {
- t.Fatalf("error shouldn't have been received, got: %v", err)
- }
-
- assertPaymentStatus(t, pControl, info.PaymentHash, StatusSucceeded)
-
- htlc.settle = &preimg
- assertPaymentInfo(
- t, pControl, info.PaymentHash, info, nil, htlc,
- )
- } else {
- assertPaymentStatus(t, pControl, info.PaymentHash, StatusInFlight)
- assertPaymentInfo(
- t, pControl, info.PaymentHash, info, nil, htlc,
- )
- }
-
- // If the payment is intended to have a duplicate payment, we
- // add one.
- if p.hasDuplicate {
- appendDuplicatePayment(
- t, pControl.db, info.PaymentHash,
- uint64(duplicateSeqNr),
- )
- duplicateSeqNr++
- }
- }
-
- // Delete payments.
- if err := db.DeletePayments(); err != nil {
- t.Fatal(err)
- }
-
- // This should leave the in-flight payment.
- dbPayments, err := db.FetchPayments()
- if err != nil {
- t.Fatal(err)
- }
-
- if len(dbPayments) != 1 {
- t.Fatalf("expected one payment, got %d", len(dbPayments))
- }
-
- status := dbPayments[0].Status
- if status != StatusInFlight {
- t.Fatalf("expected in-fligth status, got %v", status)
- }
-
- // Finally, check that we only have a single index left in the payment
- // index bucket.
- var indexCount int
- err = kvdb.View(db, func(tx walletdb.ReadTx) er.R {
- index := tx.ReadBucket(paymentsIndexBucket)
-
- return index.ForEach(func(k, v []byte) er.R {
- indexCount++
- return nil
- })
- }, func() { indexCount = 0 })
- util.RequireNoErr(t, err)
-
- require.Equal(t, 1, indexCount)
-}
-
-// TestPaymentControlMultiShard checks the ability of payment control to
-// have multiple in-flight HTLCs for a single payment.
-func TestPaymentControlMultiShard(t *testing.T) {
- t.Parallel()
-
- // We will register three HTLC attempts, and always fail the second
- // one. We'll generate all combinations of settling/failing the first
- // and third HTLC, and assert that the payment status end up as we
- // expect.
- type testCase struct {
- settleFirst bool
- settleLast bool
- }
-
- var tests []testCase
- for _, f := range []bool{true, false} {
- for _, l := range []bool{true, false} {
- tests = append(tests, testCase{f, l})
- }
- }
-
- runSubTest := func(t *testing.T, test testCase) {
- db, cleanup, err := MakeTestDB()
- defer cleanup()
-
- if err != nil {
- t.Fatalf("unable to init db: %v", err)
- }
-
- pControl := NewPaymentControl(db)
-
- info, attempt, preimg, err := genInfo()
- if err != nil {
- t.Fatalf("unable to generate htlc message: %v", err)
- }
-
- // Init the payment, moving it to the StatusInFlight state.
- err = pControl.InitPayment(info.PaymentHash, info)
- if err != nil {
- t.Fatalf("unable to send htlc message: %v", err)
- }
-
- assertPaymentIndex(t, pControl, info.PaymentHash)
- assertPaymentStatus(t, pControl, info.PaymentHash, StatusInFlight)
- assertPaymentInfo(
- t, pControl, info.PaymentHash, info, nil, nil,
- )
-
- // Create three unique attempts we'll use for the test, and
- // register them with the payment control. We set each
- // attempts's value to one third of the payment amount, and
- // populate the MPP options.
- shardAmt := info.Value / 3
- attempt.Route.FinalHop().AmtToForward = shardAmt
- attempt.Route.FinalHop().MPP = record.NewMPP(
- info.Value, [32]byte{1},
- )
-
- var attempts []*HTLCAttemptInfo
- for i := uint64(0); i < 3; i++ {
- a := *attempt
- a.AttemptID = i
- attempts = append(attempts, &a)
-
- _, err = pControl.RegisterAttempt(info.PaymentHash, &a)
- if err != nil {
- t.Fatalf("unable to send htlc message: %v", err)
- }
- assertPaymentStatus(
- t, pControl, info.PaymentHash, StatusInFlight,
- )
-
- htlc := &htlcStatus{
- HTLCAttemptInfo: &a,
- }
- assertPaymentInfo(
- t, pControl, info.PaymentHash, info, nil, htlc,
- )
- }
-
- // For a fourth attempt, check that attempting to
- // register it will fail since the total sent amount
- // will be too large.
- b := *attempt
- b.AttemptID = 3
- _, err = pControl.RegisterAttempt(info.PaymentHash, &b)
- if !ErrValueExceedsAmt.Is(err) {
- t.Fatalf("expected ErrValueExceedsAmt, got: %v",
- err)
- }
-
- // Fail the second attempt.
- a := attempts[1]
- htlcFail := HTLCFailUnreadable
- _, err = pControl.FailAttempt(
- info.PaymentHash, a.AttemptID,
- &HTLCFailInfo{
- Reason: htlcFail,
- },
- )
- if err != nil {
- t.Fatal(err)
- }
-
- htlc := &htlcStatus{
- HTLCAttemptInfo: a,
- failure: &htlcFail,
- }
- assertPaymentInfo(
- t, pControl, info.PaymentHash, info, nil, htlc,
- )
-
- // Payment should still be in-flight.
- assertPaymentStatus(t, pControl, info.PaymentHash, StatusInFlight)
-
- // Depending on the test case, settle or fail the first attempt.
- a = attempts[0]
- htlc = &htlcStatus{
- HTLCAttemptInfo: a,
- }
-
- var firstFailReason *FailureReason
- if test.settleFirst {
- _, err := pControl.SettleAttempt(
- info.PaymentHash, a.AttemptID,
- &HTLCSettleInfo{
- Preimage: preimg,
- },
- )
- if err != nil {
- t.Fatalf("error shouldn't have been "+
- "received, got: %v", err)
- }
-
- // Assert that the HTLC has had the preimage recorded.
- htlc.settle = &preimg
- assertPaymentInfo(
- t, pControl, info.PaymentHash, info, nil, htlc,
- )
- } else {
- _, err := pControl.FailAttempt(
- info.PaymentHash, a.AttemptID,
- &HTLCFailInfo{
- Reason: htlcFail,
- },
- )
- if err != nil {
- t.Fatalf("error shouldn't have been "+
- "received, got: %v", err)
- }
-
- // Assert the failure was recorded.
- htlc.failure = &htlcFail
- assertPaymentInfo(
- t, pControl, info.PaymentHash, info, nil, htlc,
- )
-
- // We also record a payment level fail, to move it into
- // a terminal state.
- failReason := FailureReasonNoRoute
- _, err = pControl.Fail(info.PaymentHash, failReason)
- if err != nil {
- t.Fatalf("unable to fail payment hash: %v", err)
- }
-
- // Record the reason we failed the payment, such that
- // we can assert this later in the test.
- firstFailReason = &failReason
- }
-
- // The payment should still be considered in-flight, since there
- // is still an active HTLC.
- assertPaymentStatus(t, pControl, info.PaymentHash, StatusInFlight)
-
- // Try to register yet another attempt. This should fail now
- // that the payment has reached a terminal condition.
- b = *attempt
- b.AttemptID = 3
- _, err = pControl.RegisterAttempt(info.PaymentHash, &b)
- if !ErrPaymentTerminal.Is(err) {
- t.Fatalf("expected ErrPaymentTerminal, got: %v", err)
- }
-
- assertPaymentStatus(t, pControl, info.PaymentHash, StatusInFlight)
-
- // Settle or fail the remaining attempt based on the testcase.
- a = attempts[2]
- htlc = &htlcStatus{
- HTLCAttemptInfo: a,
- }
- if test.settleLast {
- // Settle the last outstanding attempt.
- _, err = pControl.SettleAttempt(
- info.PaymentHash, a.AttemptID,
- &HTLCSettleInfo{
- Preimage: preimg,
- },
- )
- if err != nil {
- t.Fatalf("error shouldn't have been "+
- "received, got: %v", err)
- }
-
- htlc.settle = &preimg
- assertPaymentInfo(
- t, pControl, info.PaymentHash, info,
- firstFailReason, htlc,
- )
- } else {
- // Fail the attempt.
- _, err := pControl.FailAttempt(
- info.PaymentHash, a.AttemptID,
- &HTLCFailInfo{
- Reason: htlcFail,
- },
- )
- if err != nil {
- t.Fatalf("error shouldn't have been "+
- "received, got: %v", err)
- }
-
- // Assert the failure was recorded.
- htlc.failure = &htlcFail
- assertPaymentInfo(
- t, pControl, info.PaymentHash, info,
- firstFailReason, htlc,
- )
-
- // Check that we can override any perevious terminal
- // failure. This is to allow multiple concurrent shard
- // write a terminal failure to the database without
- // syncing.
- failReason := FailureReasonPaymentDetails
- _, err = pControl.Fail(info.PaymentHash, failReason)
- if err != nil {
- t.Fatalf("unable to fail payment hash: %v", err)
- }
- }
-
- // If any of the two attempts settled, the payment should end
- // up in the Succeeded state. If both failed the payment should
- // also be Failed at this poinnt.
- finalStatus := StatusFailed
- expRegErr := ErrPaymentAlreadyFailed
- if test.settleFirst || test.settleLast {
- finalStatus = StatusSucceeded
- expRegErr = ErrPaymentAlreadySucceeded
- }
-
- assertPaymentStatus(t, pControl, info.PaymentHash, finalStatus)
-
- // Finally assert we cannot register more attempts.
- _, err = pControl.RegisterAttempt(info.PaymentHash, &b)
- if !expRegErr.Is(err) {
- t.Fatalf("expected error %v, got: %v", expRegErr, err)
- }
- }
-
- for _, test := range tests {
- test := test
- subTest := fmt.Sprintf("first=%v, second=%v",
- test.settleFirst, test.settleLast)
-
- t.Run(subTest, func(t *testing.T) {
- runSubTest(t, test)
- })
- }
-}
-
-func TestPaymentControlMPPRecordValidation(t *testing.T) {
- t.Parallel()
-
- db, cleanup, err := MakeTestDB()
- defer cleanup()
-
- if err != nil {
- t.Fatalf("unable to init db: %v", err)
- }
-
- pControl := NewPaymentControl(db)
-
- info, attempt, _, err := genInfo()
- if err != nil {
- t.Fatalf("unable to generate htlc message: %v", err)
- }
-
- // Init the payment.
- err = pControl.InitPayment(info.PaymentHash, info)
- if err != nil {
- t.Fatalf("unable to send htlc message: %v", err)
- }
-
- // Create three unique attempts we'll use for the test, and
- // register them with the payment control. We set each
- // attempts's value to one third of the payment amount, and
- // populate the MPP options.
- shardAmt := info.Value / 3
- attempt.Route.FinalHop().AmtToForward = shardAmt
- attempt.Route.FinalHop().MPP = record.NewMPP(
- info.Value, [32]byte{1},
- )
-
- _, err = pControl.RegisterAttempt(info.PaymentHash, attempt)
- if err != nil {
- t.Fatalf("unable to send htlc message: %v", err)
- }
-
- // Now try to register a non-MPP attempt, which should fail.
- b := *attempt
- b.AttemptID = 1
- b.Route.FinalHop().MPP = nil
- _, err = pControl.RegisterAttempt(info.PaymentHash, &b)
- if !ErrMPPayment.Is(err) {
- t.Fatalf("expected ErrMPPayment, got: %v", err)
- }
-
- // Try to register attempt one with a different payment address.
- b.Route.FinalHop().MPP = record.NewMPP(
- info.Value, [32]byte{2},
- )
- _, err = pControl.RegisterAttempt(info.PaymentHash, &b)
- if !ErrMPPPaymentAddrMismatch.Is(err) {
- t.Fatalf("expected ErrMPPPaymentAddrMismatch, got: %v", err)
- }
-
- // Try registering one with a different total amount.
- b.Route.FinalHop().MPP = record.NewMPP(
- info.Value/2, [32]byte{1},
- )
- _, err = pControl.RegisterAttempt(info.PaymentHash, &b)
- if !ErrMPPTotalAmountMismatch.Is(err) {
- t.Fatalf("expected ErrMPPTotalAmountMismatch, got: %v", err)
- }
-
- // Create and init a new payment. This time we'll check that we cannot
- // register an MPP attempt if we already registered a non-MPP one.
- info, attempt, _, err = genInfo()
- if err != nil {
- t.Fatalf("unable to generate htlc message: %v", err)
- }
-
- err = pControl.InitPayment(info.PaymentHash, info)
- if err != nil {
- t.Fatalf("unable to send htlc message: %v", err)
- }
-
- attempt.Route.FinalHop().MPP = nil
- _, err = pControl.RegisterAttempt(info.PaymentHash, attempt)
- if err != nil {
- t.Fatalf("unable to send htlc message: %v", err)
- }
-
- // Attempt to register an MPP attempt, which should fail.
- b = *attempt
- b.AttemptID = 1
- b.Route.FinalHop().MPP = record.NewMPP(
- info.Value, [32]byte{1},
- )
-
- _, err = pControl.RegisterAttempt(info.PaymentHash, &b)
- if !ErrNonMPPayment.Is(err) {
- t.Fatalf("expected ErrNonMPPayment, got: %v", err)
- }
-}
-
-// assertPaymentStatus retrieves the status of the payment referred to by hash
-// and compares it with the expected state.
-func assertPaymentStatus(t *testing.T, p *PaymentControl,
- hash lntypes.Hash, expStatus PaymentStatus) {
-
- t.Helper()
-
- payment, err := p.FetchPayment(hash)
- if expStatus == StatusUnknown && ErrPaymentNotInitiated.Is(err) {
- return
- }
- if err != nil {
- t.Fatal(err)
- }
-
- if payment.Status != expStatus {
- t.Fatalf("payment status mismatch: expected %v, got %v",
- expStatus, payment.Status)
- }
-}
-
-type htlcStatus struct {
- *HTLCAttemptInfo
- settle *lntypes.Preimage
- failure *HTLCFailReason
-}
-
-// assertPaymentInfo retrieves the payment referred to by hash and verifies the
-// expected values.
-func assertPaymentInfo(t *testing.T, p *PaymentControl, hash lntypes.Hash,
- c *PaymentCreationInfo, f *FailureReason, a *htlcStatus) {
-
- t.Helper()
-
- payment, err := p.FetchPayment(hash)
- if err != nil {
- t.Fatal(err)
- }
-
- if !reflect.DeepEqual(payment.Info, c) {
- t.Fatalf("PaymentCreationInfos don't match: %v vs %v",
- spew.Sdump(payment.Info), spew.Sdump(c))
- }
-
- if f != nil {
- if *payment.FailureReason != *f {
- t.Fatal("unexpected failure reason")
- }
- } else {
- if payment.FailureReason != nil {
- t.Fatal("unexpected failure reason")
- }
- }
-
- if a == nil {
- if len(payment.HTLCs) > 0 {
- t.Fatal("expected no htlcs")
- }
- return
- }
-
- htlc := payment.HTLCs[a.AttemptID]
- if err := assertRouteEqual(&htlc.Route, &a.Route); err != nil {
- t.Fatal("routes do not match")
- }
-
- if htlc.AttemptID != a.AttemptID {
- t.Fatalf("unnexpected attempt ID %v, expected %v",
- htlc.AttemptID, a.AttemptID)
- }
-
- if a.failure != nil {
- if htlc.Failure == nil {
- t.Fatalf("expected HTLC to be failed")
- }
-
- if htlc.Failure.Reason != *a.failure {
- t.Fatalf("expected HTLC failure %v, had %v",
- *a.failure, htlc.Failure.Reason)
- }
- } else if htlc.Failure != nil {
- t.Fatalf("expected no HTLC failure")
- }
-
- if a.settle != nil {
- if htlc.Settle.Preimage != *a.settle {
- t.Fatalf("Preimages don't match: %x vs %x",
- htlc.Settle.Preimage, a.settle)
- }
- } else if htlc.Settle != nil {
- t.Fatal("expected no settle info")
- }
-}
-
-// fetchPaymentIndexEntry gets the payment hash for the sequence number provided
-// from our payment indexes bucket.
-func fetchPaymentIndexEntry(_ *testing.T, p *PaymentControl,
- sequenceNumber uint64) (*lntypes.Hash, er.R) {
-
- var hash lntypes.Hash
-
- if err := kvdb.View(p.db, func(tx walletdb.ReadTx) er.R {
- indexBucket := tx.ReadBucket(paymentsIndexBucket)
- key := make([]byte, 8)
- byteOrder.PutUint64(key, sequenceNumber)
-
- indexValue := indexBucket.Get(key)
- if indexValue == nil {
- return errNoSequenceNrIndex.Default()
- }
-
- r := bytes.NewReader(indexValue)
-
- var err er.R
- hash, err = deserializePaymentIndex(r)
- return err
- }, func() {
- hash = lntypes.Hash{}
- }); err != nil {
- return nil, err
- }
-
- return &hash, nil
-}
-
-// assertPaymentIndex looks up the index for a payment in the db and checks
-// that its payment hash matches the expected hash passed in.
-func assertPaymentIndex(t *testing.T, p *PaymentControl,
- expectedHash lntypes.Hash) {
-
- // Lookup the payment so that we have its sequence number and check
- // that is has correctly been indexed in the payment indexes bucket.
- pmt, err := p.FetchPayment(expectedHash)
- util.RequireNoErr(t, err)
-
- hash, err := fetchPaymentIndexEntry(t, p, pmt.SequenceNum)
- util.RequireNoErr(t, err)
- assert.Equal(t, expectedHash, *hash)
-}
-
-// assertNoIndex checks that an index for the sequence number provided does not
-// exist.
-func assertNoIndex(t *testing.T, p *PaymentControl, seqNr uint64) {
- _, err := fetchPaymentIndexEntry(t, p, seqNr)
- require.True(t, errNoSequenceNrIndex.Is(err))
-}
diff --git a/lnd/channeldb/payments.go b/lnd/channeldb/payments.go
deleted file mode 100644
index 378f9f35..00000000
--- a/lnd/channeldb/payments.go
+++ /dev/null
@@ -1,1084 +0,0 @@
-package channeldb
-
-import (
- "bytes"
- "encoding/binary"
- "io"
- "sort"
- "time"
-
- "github.com/pkt-cash/pktd/btcutil/er"
- "github.com/pkt-cash/pktd/btcutil/util"
- "github.com/pkt-cash/pktd/lnd/channeldb/kvdb"
- "github.com/pkt-cash/pktd/lnd/lntypes"
- "github.com/pkt-cash/pktd/lnd/lnwire"
- "github.com/pkt-cash/pktd/lnd/record"
- "github.com/pkt-cash/pktd/lnd/routing/route"
- "github.com/pkt-cash/pktd/lnd/tlv"
- "github.com/pkt-cash/pktd/wire"
-)
-
-var (
- // paymentsRootBucket is the name of the top-level bucket within the
- // database that stores all data related to payments. Within this
- // bucket, each payment hash its own sub-bucket keyed by its payment
- // hash.
- //
- // Bucket hierarchy:
- //
- // root-bucket
- // |
- // |--
- // | |--sequence-key:
- // | |--creation-info-key:
- // | |--fail-info-key: <(optional) fail info>
- // | |
- // | |--payment-htlcs-bucket (shard-bucket)
- // | | |
- // | | |--
- // | | | |--htlc-attempt-info-key:
- // | | | |--htlc-settle-info-key: <(optional) settle info>
- // | | | |--htlc-fail-info-key: <(optional) fail info>
- // | | |
- // | | |--
- // | | | |
- // | | ... ...
- // | |
- // | |
- // | |--duplicate-bucket (only for old, completed payments)
- // | |
- // | |--
- // | | |--sequence-key:
- // | | |--creation-info-key:
- // | | |--attempt-info-key:
- // | | |--settle-info-key:
- // | | |--fail-info-key:
- // | |
- // | |--
- // | | |
- // | ... ...
- // |
- // |--
- // | |
- // | ...
- // ...
- //
- paymentsRootBucket = []byte("payments-root-bucket")
-
- // paymentSequenceKey is a key used in the payment's sub-bucket to
- // store the sequence number of the payment.
- paymentSequenceKey = []byte("payment-sequence-key")
-
- // paymentCreationInfoKey is a key used in the payment's sub-bucket to
- // store the creation info of the payment.
- paymentCreationInfoKey = []byte("payment-creation-info")
-
- // paymentHtlcsBucket is a bucket where we'll store the information
- // about the HTLCs that were attempted for a payment.
- paymentHtlcsBucket = []byte("payment-htlcs-bucket")
-
- // htlcAttemptInfoKey is a key used in a HTLC's sub-bucket to store the
- // info about the attempt that was done for the HTLC in question.
- htlcAttemptInfoKey = []byte("htlc-attempt-info")
-
- // htlcSettleInfoKey is a key used in a HTLC's sub-bucket to store the
- // settle info, if any.
- htlcSettleInfoKey = []byte("htlc-settle-info")
-
- // htlcFailInfoKey is a key used in a HTLC's sub-bucket to store
- // failure information, if any.
- htlcFailInfoKey = []byte("htlc-fail-info")
-
- // paymentFailInfoKey is a key used in the payment's sub-bucket to
- // store information about the reason a payment failed.
- paymentFailInfoKey = []byte("payment-fail-info")
-
- // paymentsIndexBucket is the name of the top-level bucket within the
- // database that stores an index of payment sequence numbers to its
- // payment hash.
- // payments-sequence-index-bucket
- // |--:
- // |--...
- // |--:
- paymentsIndexBucket = []byte("payments-index-bucket")
-)
-
-var (
- // ErrNoSequenceNumber is returned if we lookup a payment which does
- // not have a sequence number.
- ErrNoSequenceNumber = Err.CodeWithDetail("ErrNoSequenceNumber", "sequence number not found")
-
- // ErrDuplicateNotFound is returned when we lookup a payment by its
- // index and cannot find a payment with a matching sequence number.
- ErrDuplicateNotFound = Err.CodeWithDetail("ErrDuplicateNotFound", "duplicate payment not found")
-
- // ErrNoDuplicateBucket is returned when we expect to find duplicates
- // when looking up a payment from its index, but the payment does not
- // have any.
- ErrNoDuplicateBucket = Err.CodeWithDetail("ErrNoDuplicateBucket", "expected duplicate bucket")
-
- // ErrNoDuplicateNestedBucket is returned if we do not find duplicate
- // payments in their own sub-bucket.
- ErrNoDuplicateNestedBucket = Err.CodeWithDetail("ErrNoDuplicateNestedBucket", "nested duplicate bucket not "+
- "found")
-)
-
-// FailureReason encodes the reason a payment ultimately failed.
-type FailureReason byte
-
-const (
- // FailureReasonTimeout indicates that the payment did timeout before a
- // successful payment attempt was made.
- FailureReasonTimeout FailureReason = 0
-
- // FailureReasonNoRoute indicates no successful route to the
- // destination was found during path finding.
- FailureReasonNoRoute FailureReason = 1
-
- // FailureReasonError indicates that an unexpected error happened during
- // payment.
- FailureReasonError FailureReason = 2
-
- // FailureReasonPaymentDetails indicates that either the hash is unknown
- // or the final cltv delta or amount is incorrect.
- FailureReasonPaymentDetails FailureReason = 3
-
- // FailureReasonInsufficientBalance indicates that we didn't have enough
- // balance to complete the payment.
- FailureReasonInsufficientBalance FailureReason = 4
-
- // TODO(halseth): cancel state.
-
- // TODO(joostjager): Add failure reasons for:
- // LocalLiquidityInsufficient, RemoteCapacityInsufficient.
-)
-
-// Error returns a human readable error string for the FailureReason.
-func (r FailureReason) Error() string {
- return r.String()
-}
-
-// String returns a human readable FailureReason.
-func (r FailureReason) String() string {
- switch r {
- case FailureReasonTimeout:
- return "timeout"
- case FailureReasonNoRoute:
- return "no_route"
- case FailureReasonError:
- return "error"
- case FailureReasonPaymentDetails:
- return "incorrect_payment_details"
- case FailureReasonInsufficientBalance:
- return "insufficient_balance"
- }
-
- return "unknown"
-}
-
-// PaymentStatus represent current status of payment
-type PaymentStatus byte
-
-const (
- // StatusUnknown is the status where a payment has never been initiated
- // and hence is unknown.
- StatusUnknown PaymentStatus = 0
-
- // StatusInFlight is the status where a payment has been initiated, but
- // a response has not been received.
- StatusInFlight PaymentStatus = 1
-
- // StatusSucceeded is the status where a payment has been initiated and
- // the payment was completed successfully.
- StatusSucceeded PaymentStatus = 2
-
- // StatusFailed is the status where a payment has been initiated and a
- // failure result has come back.
- StatusFailed PaymentStatus = 3
-)
-
-// String returns readable representation of payment status.
-func (ps PaymentStatus) String() string {
- switch ps {
- case StatusUnknown:
- return "Unknown"
- case StatusInFlight:
- return "In Flight"
- case StatusSucceeded:
- return "Succeeded"
- case StatusFailed:
- return "Failed"
- default:
- return "Unknown"
- }
-}
-
-// PaymentCreationInfo is the information necessary to have ready when
-// initiating a payment, moving it into state InFlight.
-type PaymentCreationInfo struct {
- // PaymentHash is the hash this payment is paying to.
- PaymentHash lntypes.Hash
-
- // Value is the amount we are paying.
- Value lnwire.MilliSatoshi
-
- // CreationTime is the time when this payment was initiated.
- CreationTime time.Time
-
- // PaymentRequest is the full payment request, if any.
- PaymentRequest []byte
-}
-
-// FetchPayments returns all sent payments found in the DB.
-//
-// nolint: dupl
-func (db *DB) FetchPayments() ([]*MPPayment, er.R) {
- var payments []*MPPayment
-
- err := kvdb.View(db, func(tx kvdb.RTx) er.R {
- paymentsBucket := tx.ReadBucket(paymentsRootBucket)
- if paymentsBucket == nil {
- return nil
- }
-
- return paymentsBucket.ForEach(func(k, v []byte) er.R {
- bucket := paymentsBucket.NestedReadBucket(k)
- if bucket == nil {
- // We only expect sub-buckets to be found in
- // this top-level bucket.
- return er.Errorf("non bucket element in " +
- "payments bucket")
- }
-
- p, err := fetchPayment(bucket)
- if err != nil {
- return err
- }
-
- payments = append(payments, p)
-
- // For older versions of lnd, duplicate payments to a
- // payment has was possible. These will be found in a
- // sub-bucket indexed by their sequence number if
- // available.
- duplicatePayments, err := fetchDuplicatePayments(bucket)
- if err != nil {
- return err
- }
-
- payments = append(payments, duplicatePayments...)
- return nil
- })
- }, func() {
- payments = nil
- })
- if err != nil {
- return nil, err
- }
-
- // Before returning, sort the payments by their sequence number.
- sort.Slice(payments, func(i, j int) bool {
- return payments[i].SequenceNum < payments[j].SequenceNum
- })
-
- return payments, nil
-}
-
-func fetchCreationInfo(bucket kvdb.RBucket) (*PaymentCreationInfo, er.R) {
- b := bucket.Get(paymentCreationInfoKey)
- if b == nil {
- return nil, er.Errorf("creation info not found")
- }
-
- r := bytes.NewReader(b)
- return deserializePaymentCreationInfo(r)
-}
-
-func fetchPayment(bucket kvdb.RBucket) (*MPPayment, er.R) {
- seqBytes := bucket.Get(paymentSequenceKey)
- if seqBytes == nil {
- return nil, er.Errorf("sequence number not found")
- }
-
- sequenceNum := binary.BigEndian.Uint64(seqBytes)
-
- // Get the PaymentCreationInfo.
- creationInfo, err := fetchCreationInfo(bucket)
- if err != nil {
- return nil, err
-
- }
-
- var htlcs []HTLCAttempt
- htlcsBucket := bucket.NestedReadBucket(paymentHtlcsBucket)
- if htlcsBucket != nil {
- // Get the payment attempts. This can be empty.
- htlcs, err = fetchHtlcAttempts(htlcsBucket)
- if err != nil {
- return nil, err
- }
- }
-
- // Get failure reason if available.
- var failureReason *FailureReason
- b := bucket.Get(paymentFailInfoKey)
- if b != nil {
- reason := FailureReason(b[0])
- failureReason = &reason
- }
-
- // Go through all HTLCs for this payment, noting whether we have any
- // settled HTLC, and any still in-flight.
- var inflight, settled bool
- for _, h := range htlcs {
- if h.Failure != nil {
- continue
- }
-
- if h.Settle != nil {
- settled = true
- continue
- }
-
- // If any of the HTLCs are not failed nor settled, we
- // still have inflight HTLCs.
- inflight = true
- }
-
- // Use the DB state to determine the status of the payment.
- var paymentStatus PaymentStatus
-
- switch {
-
- // If any of the the HTLCs did succeed and there are no HTLCs in
- // flight, the payment succeeded.
- case !inflight && settled:
- paymentStatus = StatusSucceeded
-
- // If we have no in-flight HTLCs, and the payment failure is set, the
- // payment is considered failed.
- case !inflight && failureReason != nil:
- paymentStatus = StatusFailed
-
- // Otherwise it is still in flight.
- default:
- paymentStatus = StatusInFlight
- }
-
- return &MPPayment{
- SequenceNum: sequenceNum,
- Info: creationInfo,
- HTLCs: htlcs,
- FailureReason: failureReason,
- Status: paymentStatus,
- }, nil
-}
-
-// fetchHtlcAttempts retrives all htlc attempts made for the payment found in
-// the given bucket.
-func fetchHtlcAttempts(bucket kvdb.RBucket) ([]HTLCAttempt, er.R) {
- htlcs := make([]HTLCAttempt, 0)
-
- err := bucket.ForEach(func(k, _ []byte) er.R {
- aid := byteOrder.Uint64(k)
- htlcBucket := bucket.NestedReadBucket(k)
-
- attemptInfo, err := fetchHtlcAttemptInfo(
- htlcBucket,
- )
- if err != nil {
- return err
- }
- attemptInfo.AttemptID = aid
-
- htlc := HTLCAttempt{
- HTLCAttemptInfo: *attemptInfo,
- }
-
- // Settle info might be nil.
- htlc.Settle, err = fetchHtlcSettleInfo(htlcBucket)
- if err != nil {
- return err
- }
-
- // Failure info might be nil.
- htlc.Failure, err = fetchHtlcFailInfo(htlcBucket)
- if err != nil {
- return err
- }
-
- htlcs = append(htlcs, htlc)
- return nil
- })
- if err != nil {
- return nil, err
- }
-
- return htlcs, nil
-}
-
-// fetchHtlcAttemptInfo fetches the payment attempt info for this htlc from the
-// bucket.
-func fetchHtlcAttemptInfo(bucket kvdb.RBucket) (*HTLCAttemptInfo, er.R) {
- b := bucket.Get(htlcAttemptInfoKey)
- if b == nil {
- return nil, errNoAttemptInfo.Default()
- }
-
- r := bytes.NewReader(b)
- return deserializeHTLCAttemptInfo(r)
-}
-
-// fetchHtlcSettleInfo retrieves the settle info for the htlc. If the htlc isn't
-// settled, nil is returned.
-func fetchHtlcSettleInfo(bucket kvdb.RBucket) (*HTLCSettleInfo, er.R) {
- b := bucket.Get(htlcSettleInfoKey)
- if b == nil {
- // Settle info is optional.
- return nil, nil
- }
-
- r := bytes.NewReader(b)
- return deserializeHTLCSettleInfo(r)
-}
-
-// fetchHtlcFailInfo retrieves the failure info for the htlc. If the htlc hasn't
-// failed, nil is returned.
-func fetchHtlcFailInfo(bucket kvdb.RBucket) (*HTLCFailInfo, er.R) {
- b := bucket.Get(htlcFailInfoKey)
- if b == nil {
- // Fail info is optional.
- return nil, nil
- }
-
- r := bytes.NewReader(b)
- return deserializeHTLCFailInfo(r)
-}
-
-// PaymentsQuery represents a query to the payments database starting or ending
-// at a certain offset index. The number of retrieved records can be limited.
-type PaymentsQuery struct {
- // IndexOffset determines the starting point of the payments query and
- // is always exclusive. In normal order, the query starts at the next
- // higher (available) index compared to IndexOffset. In reversed order,
- // the query ends at the next lower (available) index compared to the
- // IndexOffset. In the case of a zero index_offset, the query will start
- // with the oldest payment when paginating forwards, or will end with
- // the most recent payment when paginating backwards.
- IndexOffset uint64
-
- // MaxPayments is the maximal number of payments returned in the
- // payments query.
- MaxPayments uint64
-
- // Reversed gives a meaning to the IndexOffset. If reversed is set to
- // true, the query will fetch payments with indices lower than the
- // IndexOffset, otherwise, it will return payments with indices greater
- // than the IndexOffset.
- Reversed bool
-
- // If IncludeIncomplete is true, then return payments that have not yet
- // fully completed. This means that pending payments, as well as failed
- // payments will show up if this field is set to true.
- IncludeIncomplete bool
-}
-
-// PaymentsResponse contains the result of a query to the payments database.
-// It includes the set of payments that match the query and integers which
-// represent the index of the first and last item returned in the series of
-// payments. These integers allow callers to resume their query in the event
-// that the query's response exceeds the max number of returnable events.
-type PaymentsResponse struct {
- // Payments is the set of payments returned from the database for the
- // PaymentsQuery.
- Payments []*MPPayment
-
- // FirstIndexOffset is the index of the first element in the set of
- // returned MPPayments. Callers can use this to resume their query
- // in the event that the slice has too many events to fit into a single
- // response. The offset can be used to continue reverse pagination.
- FirstIndexOffset uint64
-
- // LastIndexOffset is the index of the last element in the set of
- // returned MPPayments. Callers can use this to resume their query
- // in the event that the slice has too many events to fit into a single
- // response. The offset can be used to continue forward pagination.
- LastIndexOffset uint64
-}
-
-// QueryPayments is a query to the payments database which is restricted
-// to a subset of payments by the payments query, containing an offset
-// index and a maximum number of returned payments.
-func (db *DB) QueryPayments(query PaymentsQuery) (PaymentsResponse, er.R) {
- var resp PaymentsResponse
-
- if err := kvdb.View(db, func(tx kvdb.RTx) er.R {
- // Get the root payments bucket.
- paymentsBucket := tx.ReadBucket(paymentsRootBucket)
- if paymentsBucket == nil {
- return nil
- }
-
- // Get the index bucket which maps sequence number -> payment
- // hash and duplicate bool. If we have a payments bucket, we
- // should have an indexes bucket as well.
- indexes := tx.ReadBucket(paymentsIndexBucket)
- if indexes == nil {
- return er.Errorf("index bucket does not exist")
- }
-
- // accumulatePayments gets payments with the sequence number
- // and hash provided and adds them to our list of payments if
- // they meet the criteria of our query. It returns the number
- // of payments that were added.
- accumulatePayments := func(sequenceKey, hash []byte) (bool, er.R) {
-
- r := bytes.NewReader(hash)
- paymentHash, err := deserializePaymentIndex(r)
- if err != nil {
- return false, err
- }
-
- payment, err := fetchPaymentWithSequenceNumber(
- tx, paymentHash, sequenceKey,
- )
- if err != nil {
- return false, err
- }
-
- // To keep compatibility with the old API, we only
- // return non-succeeded payments if requested.
- if payment.Status != StatusSucceeded &&
- !query.IncludeIncomplete {
-
- return false, err
- }
-
- // At this point, we've exhausted the offset, so we'll
- // begin collecting invoices found within the range.
- resp.Payments = append(resp.Payments, payment)
- return true, nil
- }
-
- // Create a paginator which reads from our sequence index bucket
- // with the parameters provided by the payments query.
- paginator := newPaginator(
- indexes.ReadCursor(), query.Reversed, query.IndexOffset,
- query.MaxPayments,
- )
-
- // Run a paginated query, adding payments to our response.
- if err := paginator.query(accumulatePayments); err != nil {
- return err
- }
-
- return nil
- }, func() {
- resp = PaymentsResponse{}
- }); err != nil {
- return resp, err
- }
-
- // Need to swap the payments slice order if reversed order.
- if query.Reversed {
- for l, r := 0, len(resp.Payments)-1; l < r; l, r = l+1, r-1 {
- resp.Payments[l], resp.Payments[r] =
- resp.Payments[r], resp.Payments[l]
- }
- }
-
- // Set the first and last index of the returned payments so that the
- // caller can resume from this point later on.
- if len(resp.Payments) > 0 {
- resp.FirstIndexOffset = resp.Payments[0].SequenceNum
- resp.LastIndexOffset =
- resp.Payments[len(resp.Payments)-1].SequenceNum
- }
-
- return resp, nil
-}
-
-// fetchPaymentWithSequenceNumber get the payment which matches the payment hash
-// *and* sequence number provided from the database. This is required because
-// we previously had more than one payment per hash, so we have multiple indexes
-// pointing to a single payment; we want to retrieve the correct one.
-func fetchPaymentWithSequenceNumber(tx kvdb.RTx, paymentHash lntypes.Hash,
- sequenceNumber []byte) (*MPPayment, er.R) {
-
- // We can now lookup the payment keyed by its hash in
- // the payments root bucket.
- bucket, err := fetchPaymentBucket(tx, paymentHash)
- if err != nil {
- return nil, err
- }
-
- // A single payment hash can have multiple payments associated with it.
- // We lookup our sequence number first, to determine whether this is
- // the payment we are actually looking for.
- seqBytes := bucket.Get(paymentSequenceKey)
- if seqBytes == nil {
- return nil, ErrNoSequenceNumber.Default()
- }
-
- // If this top level payment has the sequence number we are looking for,
- // return it.
- if bytes.Equal(seqBytes, sequenceNumber) {
- return fetchPayment(bucket)
- }
-
- // If we were not looking for the top level payment, we are looking for
- // one of our duplicate payments. We need to iterate through the seq
- // numbers in this bucket to find the correct payments. If we do not
- // find a duplicate payments bucket here, something is wrong.
- dup := bucket.NestedReadBucket(duplicatePaymentsBucket)
- if dup == nil {
- return nil, ErrNoDuplicateBucket.Default()
- }
-
- var duplicatePayment *MPPayment
- err = dup.ForEach(func(k, v []byte) er.R {
- subBucket := dup.NestedReadBucket(k)
- if subBucket == nil {
- // We one bucket for each duplicate to be found.
- return ErrNoDuplicateNestedBucket.Default()
- }
-
- seqBytes := subBucket.Get(duplicatePaymentSequenceKey)
- if seqBytes == nil {
- return err
- }
-
- // If this duplicate payment is not the sequence number we are
- // looking for, we can continue.
- if !bytes.Equal(seqBytes, sequenceNumber) {
- return nil
- }
-
- duplicatePayment, err = fetchDuplicatePayment(subBucket)
- if err != nil {
- return err
- }
-
- return nil
- })
- if err != nil {
- return nil, err
- }
-
- // If none of the duplicate payments matched our sequence number, we
- // failed to find the payment with this sequence number; something is
- // wrong.
- if duplicatePayment == nil {
- return nil, ErrDuplicateNotFound.Default()
- }
-
- return duplicatePayment, nil
-}
-
-// DeletePayments deletes all completed and failed payments from the DB.
-func (db *DB) DeletePayments() er.R {
- return kvdb.Update(db, func(tx kvdb.RwTx) er.R {
- payments := tx.ReadWriteBucket(paymentsRootBucket)
- if payments == nil {
- return nil
- }
-
- var (
- // deleteBuckets is the set of payment buckets we need
- // to delete.
- deleteBuckets [][]byte
-
- // deleteIndexes is the set of indexes pointing to these
- // payments that need to be deleted.
- deleteIndexes [][]byte
- )
- err := payments.ForEach(func(k, _ []byte) er.R {
- bucket := payments.NestedReadWriteBucket(k)
- if bucket == nil {
- // We only expect sub-buckets to be found in
- // this top-level bucket.
- return er.Errorf("non bucket element in " +
- "payments bucket")
- }
-
- // If the status is InFlight, we cannot safely delete
- // the payment information, so we return early.
- paymentStatus, err := fetchPaymentStatus(bucket)
- if err != nil {
- return err
- }
-
- // If the status is InFlight, we cannot safely delete
- // the payment information, so we return early.
- if paymentStatus == StatusInFlight {
- return nil
- }
-
- // Add the bucket to the set of buckets we can delete.
- deleteBuckets = append(deleteBuckets, k)
-
- // Get all the sequence number associated with the
- // payment, including duplicates.
- seqNrs, err := fetchSequenceNumbers(bucket)
- if err != nil {
- return err
- }
-
- deleteIndexes = append(deleteIndexes, seqNrs...)
-
- return nil
- })
- if err != nil {
- return err
- }
-
- for _, k := range deleteBuckets {
- if err := payments.DeleteNestedBucket(k); err != nil {
- return err
- }
- }
-
- // Get our index bucket and delete all indexes pointing to the
- // payments we are deleting.
- indexBucket := tx.ReadWriteBucket(paymentsIndexBucket)
- for _, k := range deleteIndexes {
- if err := indexBucket.Delete(k); err != nil {
- return err
- }
- }
-
- return nil
- }, func() {})
-}
-
-// fetchSequenceNumbers fetches all the sequence numbers associated with a
-// payment, including those belonging to any duplicate payments.
-func fetchSequenceNumbers(paymentBucket kvdb.RBucket) ([][]byte, er.R) {
- seqNum := paymentBucket.Get(paymentSequenceKey)
- if seqNum == nil {
- return nil, er.New("expected sequence number")
- }
-
- sequenceNumbers := [][]byte{seqNum}
-
- // Get the duplicate payments bucket, if it has no duplicates, just
- // return early with the payment sequence number.
- duplicates := paymentBucket.NestedReadBucket(duplicatePaymentsBucket)
- if duplicates == nil {
- return sequenceNumbers, nil
- }
-
- // If we do have duplicated, they are keyed by sequence number, so we
- // iterate through the duplicates bucket and add them to our set of
- // sequence numbers.
- if err := duplicates.ForEach(func(k, v []byte) er.R {
- sequenceNumbers = append(sequenceNumbers, k)
- return nil
- }); err != nil {
- return nil, err
- }
-
- return sequenceNumbers, nil
-}
-
-// nolint: dupl
-func serializePaymentCreationInfo(w io.Writer, c *PaymentCreationInfo) er.R {
- var scratch [8]byte
-
- if _, err := util.Write(w, c.PaymentHash[:]); err != nil {
- return err
- }
-
- byteOrder.PutUint64(scratch[:], uint64(c.Value))
- if _, err := util.Write(w, scratch[:]); err != nil {
- return err
- }
-
- if err := serializeTime(w, c.CreationTime); err != nil {
- return err
- }
-
- byteOrder.PutUint32(scratch[:4], uint32(len(c.PaymentRequest)))
- if _, err := util.Write(w, scratch[:4]); err != nil {
- return err
- }
-
- if _, err := util.Write(w, c.PaymentRequest[:]); err != nil {
- return err
- }
-
- return nil
-}
-
-func deserializePaymentCreationInfo(r io.Reader) (*PaymentCreationInfo, er.R) {
- var scratch [8]byte
-
- c := &PaymentCreationInfo{}
-
- if _, err := util.ReadFull(r, c.PaymentHash[:]); err != nil {
- return nil, err
- }
-
- if _, err := util.ReadFull(r, scratch[:]); err != nil {
- return nil, err
- }
- c.Value = lnwire.MilliSatoshi(byteOrder.Uint64(scratch[:]))
-
- creationTime, err := deserializeTime(r)
- if err != nil {
- return nil, err
- }
- c.CreationTime = creationTime
-
- if _, err := util.ReadFull(r, scratch[:4]); err != nil {
- return nil, err
- }
-
- reqLen := uint32(byteOrder.Uint32(scratch[:4]))
- payReq := make([]byte, reqLen)
- if reqLen > 0 {
- if _, err := util.ReadFull(r, payReq); err != nil {
- return nil, err
- }
- }
- c.PaymentRequest = payReq
-
- return c, nil
-}
-
-func serializeHTLCAttemptInfo(w io.Writer, a *HTLCAttemptInfo) er.R {
- if err := WriteElements(w, a.SessionKey); err != nil {
- return err
- }
-
- if err := SerializeRoute(w, a.Route); err != nil {
- return err
- }
-
- return serializeTime(w, a.AttemptTime)
-}
-
-func deserializeHTLCAttemptInfo(r io.Reader) (*HTLCAttemptInfo, er.R) {
- a := &HTLCAttemptInfo{}
- err := ReadElements(r, &a.SessionKey)
- if err != nil {
- return nil, err
- }
- a.Route, err = DeserializeRoute(r)
- if err != nil {
- return nil, err
- }
-
- a.AttemptTime, err = deserializeTime(r)
- if err != nil {
- return nil, err
- }
-
- return a, nil
-}
-
-func serializeHop(w io.Writer, h *route.Hop) er.R {
- if err := WriteElements(w,
- h.PubKeyBytes[:],
- h.ChannelID,
- h.OutgoingTimeLock,
- h.AmtToForward,
- ); err != nil {
- return err
- }
-
- if err := util.WriteBin(w, byteOrder, h.LegacyPayload); err != nil {
- return err
- }
-
- // For legacy payloads, we don't need to write any TLV records, so
- // we'll write a zero indicating the our serialized TLV map has no
- // records.
- if h.LegacyPayload {
- return WriteElements(w, uint32(0))
- }
-
- // Gather all non-primitive TLV records so that they can be serialized
- // as a single blob.
- //
- // TODO(conner): add migration to unify all fields in a single TLV
- // blobs. The split approach will cause headaches down the road as more
- // fields are added, which we can avoid by having a single TLV stream
- // for all payload fields.
- var records []tlv.Record
- if h.MPP != nil {
- records = append(records, h.MPP.Record())
- }
-
- // Final sanity check to absolutely rule out custom records that are not
- // custom and write into the standard range.
- if err := h.CustomRecords.Validate(); err != nil {
- return err
- }
-
- // Convert custom records to tlv and add to the record list.
- // MapToRecords sorts the list, so adding it here will keep the list
- // canonical.
- tlvRecords := tlv.MapToRecords(h.CustomRecords)
- records = append(records, tlvRecords...)
-
- // Otherwise, we'll transform our slice of records into a map of the
- // raw bytes, then serialize them in-line with a length (number of
- // elements) prefix.
- mapRecords, err := tlv.RecordsToMap(records)
- if err != nil {
- return err
- }
-
- numRecords := uint32(len(mapRecords))
- if err := WriteElements(w, numRecords); err != nil {
- return err
- }
-
- for recordType, rawBytes := range mapRecords {
- if err := WriteElements(w, recordType); err != nil {
- return err
- }
-
- if err := wire.WriteVarBytes(w, 0, rawBytes); err != nil {
- return err
- }
- }
-
- return nil
-}
-
-// maxOnionPayloadSize is the largest Sphinx payload possible, so we don't need
-// to read/write a TLV stream larger than this.
-const maxOnionPayloadSize = 1300
-
-func deserializeHop(r io.Reader) (*route.Hop, er.R) {
- h := &route.Hop{}
-
- var pub []byte
- if err := ReadElements(r, &pub); err != nil {
- return nil, err
- }
- copy(h.PubKeyBytes[:], pub)
-
- if err := ReadElements(r,
- &h.ChannelID, &h.OutgoingTimeLock, &h.AmtToForward,
- ); err != nil {
- return nil, err
- }
-
- // TODO(roasbeef): change field to allow LegacyPayload false to be the
- // legacy default?
- err := util.ReadBin(r, byteOrder, &h.LegacyPayload)
- if err != nil {
- return nil, err
- }
-
- var numElements uint32
- if err := ReadElements(r, &numElements); err != nil {
- return nil, err
- }
-
- // If there're no elements, then we can return early.
- if numElements == 0 {
- return h, nil
- }
-
- tlvMap := make(map[uint64][]byte)
- for i := uint32(0); i < numElements; i++ {
- var tlvType uint64
- if err := ReadElements(r, &tlvType); err != nil {
- return nil, err
- }
-
- rawRecordBytes, err := wire.ReadVarBytes(
- r, 0, maxOnionPayloadSize, "tlv",
- )
- if err != nil {
- return nil, err
- }
-
- tlvMap[tlvType] = rawRecordBytes
- }
-
- // If the MPP type is present, remove it from the generic TLV map and
- // parse it back into a proper MPP struct.
- //
- // TODO(conner): add migration to unify all fields in a single TLV
- // blobs. The split approach will cause headaches down the road as more
- // fields are added, which we can avoid by having a single TLV stream
- // for all payload fields.
- mppType := uint64(record.MPPOnionType)
- if mppBytes, ok := tlvMap[mppType]; ok {
- delete(tlvMap, mppType)
-
- var (
- mpp = &record.MPP{}
- mppRec = mpp.Record()
- r = bytes.NewReader(mppBytes)
- )
- err := mppRec.Decode(r, uint64(len(mppBytes)))
- if err != nil {
- return nil, err
- }
- h.MPP = mpp
- }
-
- h.CustomRecords = tlvMap
-
- return h, nil
-}
-
-// SerializeRoute serializes a route.
-func SerializeRoute(w io.Writer, r route.Route) er.R {
- if err := WriteElements(w,
- r.TotalTimeLock, r.TotalAmount, r.SourcePubKey[:],
- ); err != nil {
- return err
- }
-
- if err := WriteElements(w, uint32(len(r.Hops))); err != nil {
- return err
- }
-
- for _, h := range r.Hops {
- if err := serializeHop(w, h); err != nil {
- return err
- }
- }
-
- return nil
-}
-
-// DeserializeRoute deserializes a route.
-func DeserializeRoute(r io.Reader) (route.Route, er.R) {
- rt := route.Route{}
- if err := ReadElements(r,
- &rt.TotalTimeLock, &rt.TotalAmount,
- ); err != nil {
- return rt, err
- }
-
- var pub []byte
- if err := ReadElements(r, &pub); err != nil {
- return rt, err
- }
- copy(rt.SourcePubKey[:], pub)
-
- var numHops uint32
- if err := ReadElements(r, &numHops); err != nil {
- return rt, err
- }
-
- var hops []*route.Hop
- for i := uint32(0); i < numHops; i++ {
- hop, err := deserializeHop(r)
- if err != nil {
- return rt, err
- }
- hops = append(hops, hop)
- }
- rt.Hops = hops
-
- return rt, nil
-}
diff --git a/lnd/channeldb/payments_test.go b/lnd/channeldb/payments_test.go
deleted file mode 100644
index 7ef6868b..00000000
--- a/lnd/channeldb/payments_test.go
+++ /dev/null
@@ -1,715 +0,0 @@
-package channeldb
-
-import (
- "bytes"
- "math"
- "reflect"
- "testing"
- "time"
-
- "github.com/davecgh/go-spew/spew"
- "github.com/pkt-cash/pktd/btcec"
- "github.com/pkt-cash/pktd/btcutil/er"
- "github.com/pkt-cash/pktd/btcutil/util"
- "github.com/pkt-cash/pktd/lnd/channeldb/kvdb"
- "github.com/pkt-cash/pktd/lnd/lntypes"
- "github.com/pkt-cash/pktd/lnd/record"
- "github.com/pkt-cash/pktd/lnd/routing/route"
- "github.com/pkt-cash/pktd/pktwallet/walletdb"
- "github.com/stretchr/testify/require"
-)
-
-var (
- priv, _ = btcec.NewPrivateKey(btcec.S256())
- pub = priv.PubKey()
-
- testHop1 = &route.Hop{
- PubKeyBytes: route.NewVertex(pub),
- ChannelID: 12345,
- OutgoingTimeLock: 111,
- AmtToForward: 555,
- CustomRecords: record.CustomSet{
- 65536: []byte{},
- 80001: []byte{},
- },
- MPP: record.NewMPP(32, [32]byte{0x42}),
- }
-
- testHop2 = &route.Hop{
- PubKeyBytes: route.NewVertex(pub),
- ChannelID: 12345,
- OutgoingTimeLock: 111,
- AmtToForward: 555,
- LegacyPayload: true,
- }
-
- testRoute = route.Route{
- TotalTimeLock: 123,
- TotalAmount: 1234567,
- SourcePubKey: route.NewVertex(pub),
- Hops: []*route.Hop{
- testHop2,
- testHop1,
- },
- }
-)
-
-func makeFakeInfo() (*PaymentCreationInfo, *HTLCAttemptInfo) {
- var preimg lntypes.Preimage
- copy(preimg[:], rev[:])
-
- c := &PaymentCreationInfo{
- PaymentHash: preimg.Hash(),
- Value: 1000,
- // Use single second precision to avoid false positive test
- // failures due to the monotonic time component.
- CreationTime: time.Unix(time.Now().Unix(), 0),
- PaymentRequest: []byte(""),
- }
-
- a := &HTLCAttemptInfo{
- AttemptID: 44,
- SessionKey: priv,
- Route: testRoute,
- AttemptTime: time.Unix(100, 0),
- }
- return c, a
-}
-
-func TestSentPaymentSerialization(t *testing.T) {
- t.Parallel()
-
- c, s := makeFakeInfo()
-
- var b bytes.Buffer
- if err := serializePaymentCreationInfo(&b, c); err != nil {
- t.Fatalf("unable to serialize creation info: %v", err)
- }
-
- newCreationInfo, err := deserializePaymentCreationInfo(&b)
- if err != nil {
- t.Fatalf("unable to deserialize creation info: %v", err)
- }
-
- if !reflect.DeepEqual(c, newCreationInfo) {
- t.Fatalf("Payments do not match after "+
- "serialization/deserialization %v vs %v",
- spew.Sdump(c), spew.Sdump(newCreationInfo),
- )
- }
-
- b.Reset()
- if err := serializeHTLCAttemptInfo(&b, s); err != nil {
- t.Fatalf("unable to serialize info: %v", err)
- }
-
- newWireInfo, err := deserializeHTLCAttemptInfo(&b)
- if err != nil {
- t.Fatalf("unable to deserialize info: %v", err)
- }
- newWireInfo.AttemptID = s.AttemptID
-
- // First we verify all the records match up porperly, as they aren't
- // able to be properly compared using reflect.DeepEqual.
- err = assertRouteEqual(&s.Route, &newWireInfo.Route)
- if err != nil {
- t.Fatalf("Routes do not match after "+
- "serialization/deserialization: %v", err)
- }
-
- // Clear routes to allow DeepEqual to compare the remaining fields.
- newWireInfo.Route = route.Route{}
- s.Route = route.Route{}
-
- if !reflect.DeepEqual(s, newWireInfo) {
- s.SessionKey.Curve = nil
- newWireInfo.SessionKey.Curve = nil
- t.Fatalf("Payments do not match after "+
- "serialization/deserialization %v vs %v",
- spew.Sdump(s), spew.Sdump(newWireInfo),
- )
- }
-}
-
-// assertRouteEquals compares to routes for equality and returns an error if
-// they are not equal.
-func assertRouteEqual(a, b *route.Route) er.R {
- if !reflect.DeepEqual(a, b) {
- return er.Errorf("HTLCAttemptInfos don't match: %v vs %v",
- spew.Sdump(a), spew.Sdump(b))
- }
-
- return nil
-}
-
-func TestRouteSerialization(t *testing.T) {
- t.Parallel()
-
- var b bytes.Buffer
- if err := SerializeRoute(&b, testRoute); err != nil {
- t.Fatal(err)
- }
-
- r := bytes.NewReader(b.Bytes())
- route2, err := DeserializeRoute(r)
- if err != nil {
- t.Fatal(err)
- }
-
- // First we verify all the records match up porperly, as they aren't
- // able to be properly compared using reflect.DeepEqual.
- err = assertRouteEqual(&testRoute, &route2)
- if err != nil {
- t.Fatalf("routes not equal: \n%v vs \n%v",
- spew.Sdump(testRoute), spew.Sdump(route2))
- }
-}
-
-// deletePayment removes a payment with paymentHash from the payments database.
-func deletePayment(t *testing.T, db *DB, paymentHash lntypes.Hash, seqNr uint64) {
- t.Helper()
-
- err := kvdb.Update(db, func(tx kvdb.RwTx) er.R {
- payments := tx.ReadWriteBucket(paymentsRootBucket)
-
- // Delete the payment bucket.
- err := payments.DeleteNestedBucket(paymentHash[:])
- if err != nil {
- return err
- }
-
- key := make([]byte, 8)
- byteOrder.PutUint64(key, seqNr)
-
- // Delete the index that references this payment.
- indexes := tx.ReadWriteBucket(paymentsIndexBucket)
- return indexes.Delete(key)
- }, func() {})
-
- if err != nil {
- t.Fatalf("could not delete "+
- "payment: %v", err)
- }
-}
-
-// TestQueryPayments tests retrieval of payments with forwards and reversed
-// queries.
-func TestQueryPayments(t *testing.T) {
- // Define table driven test for QueryPayments.
- // Test payments have sequence indices [1, 3, 4, 5, 6, 7].
- // Note that the payment with index 7 has the same payment hash as 6,
- // and is stored in a nested bucket within payment 6 rather than being
- // its own entry in the payments bucket. We do this to test retrieval
- // of legacy payments.
- tests := []struct {
- name string
- query PaymentsQuery
- firstIndex uint64
- lastIndex uint64
-
- // expectedSeqNrs contains the set of sequence numbers we expect
- // our query to return.
- expectedSeqNrs []uint64
- }{
- {
- name: "IndexOffset at the end of the payments range",
- query: PaymentsQuery{
- IndexOffset: 7,
- MaxPayments: 7,
- Reversed: false,
- IncludeIncomplete: true,
- },
- firstIndex: 0,
- lastIndex: 0,
- expectedSeqNrs: nil,
- },
- {
- name: "query in forwards order, start at beginning",
- query: PaymentsQuery{
- IndexOffset: 0,
- MaxPayments: 2,
- Reversed: false,
- IncludeIncomplete: true,
- },
- firstIndex: 1,
- lastIndex: 3,
- expectedSeqNrs: []uint64{1, 3},
- },
- {
- name: "query in forwards order, start at end, overflow",
- query: PaymentsQuery{
- IndexOffset: 6,
- MaxPayments: 2,
- Reversed: false,
- IncludeIncomplete: true,
- },
- firstIndex: 7,
- lastIndex: 7,
- expectedSeqNrs: []uint64{7},
- },
- {
- name: "start at offset index outside of payments",
- query: PaymentsQuery{
- IndexOffset: 20,
- MaxPayments: 2,
- Reversed: false,
- IncludeIncomplete: true,
- },
- firstIndex: 0,
- lastIndex: 0,
- expectedSeqNrs: nil,
- },
- {
- name: "overflow in forwards order",
- query: PaymentsQuery{
- IndexOffset: 4,
- MaxPayments: math.MaxUint64,
- Reversed: false,
- IncludeIncomplete: true,
- },
- firstIndex: 5,
- lastIndex: 7,
- expectedSeqNrs: []uint64{5, 6, 7},
- },
- {
- name: "start at offset index outside of payments, " +
- "reversed order",
- query: PaymentsQuery{
- IndexOffset: 9,
- MaxPayments: 2,
- Reversed: true,
- IncludeIncomplete: true,
- },
- firstIndex: 6,
- lastIndex: 7,
- expectedSeqNrs: []uint64{6, 7},
- },
- {
- name: "query in reverse order, start at end",
- query: PaymentsQuery{
- IndexOffset: 0,
- MaxPayments: 2,
- Reversed: true,
- IncludeIncomplete: true,
- },
- firstIndex: 6,
- lastIndex: 7,
- expectedSeqNrs: []uint64{6, 7},
- },
- {
- name: "query in reverse order, starting in middle",
- query: PaymentsQuery{
- IndexOffset: 4,
- MaxPayments: 2,
- Reversed: true,
- IncludeIncomplete: true,
- },
- firstIndex: 1,
- lastIndex: 3,
- expectedSeqNrs: []uint64{1, 3},
- },
- {
- name: "query in reverse order, starting in middle, " +
- "with underflow",
- query: PaymentsQuery{
- IndexOffset: 4,
- MaxPayments: 5,
- Reversed: true,
- IncludeIncomplete: true,
- },
- firstIndex: 1,
- lastIndex: 3,
- expectedSeqNrs: []uint64{1, 3},
- },
- {
- name: "all payments in reverse, order maintained",
- query: PaymentsQuery{
- IndexOffset: 0,
- MaxPayments: 7,
- Reversed: true,
- IncludeIncomplete: true,
- },
- firstIndex: 1,
- lastIndex: 7,
- expectedSeqNrs: []uint64{1, 3, 4, 5, 6, 7},
- },
- {
- name: "exclude incomplete payments",
- query: PaymentsQuery{
- IndexOffset: 0,
- MaxPayments: 7,
- Reversed: false,
- IncludeIncomplete: false,
- },
- firstIndex: 0,
- lastIndex: 0,
- expectedSeqNrs: nil,
- },
- {
- name: "query payments at index gap",
- query: PaymentsQuery{
- IndexOffset: 1,
- MaxPayments: 7,
- Reversed: false,
- IncludeIncomplete: true,
- },
- firstIndex: 3,
- lastIndex: 7,
- expectedSeqNrs: []uint64{3, 4, 5, 6, 7},
- },
- {
- name: "query payments reverse before index gap",
- query: PaymentsQuery{
- IndexOffset: 3,
- MaxPayments: 7,
- Reversed: true,
- IncludeIncomplete: true,
- },
- firstIndex: 1,
- lastIndex: 1,
- expectedSeqNrs: []uint64{1},
- },
- {
- name: "query payments reverse on index gap",
- query: PaymentsQuery{
- IndexOffset: 2,
- MaxPayments: 7,
- Reversed: true,
- IncludeIncomplete: true,
- },
- firstIndex: 1,
- lastIndex: 1,
- expectedSeqNrs: []uint64{1},
- },
- {
- name: "query payments forward on index gap",
- query: PaymentsQuery{
- IndexOffset: 2,
- MaxPayments: 2,
- Reversed: false,
- IncludeIncomplete: true,
- },
- firstIndex: 3,
- lastIndex: 4,
- expectedSeqNrs: []uint64{3, 4},
- },
- }
-
- for _, tt := range tests {
- tt := tt
- t.Run(tt.name, func(t *testing.T) {
- t.Parallel()
-
- db, cleanup, err := MakeTestDB()
- if err != nil {
- t.Fatalf("unable to init db: %v", err)
- }
- defer cleanup()
-
- // Make a preliminary query to make sure it's ok to
- // query when we have no payments.
- resp, err := db.QueryPayments(tt.query)
- util.RequireNoErr(t, err)
- require.Len(t, resp.Payments, 0)
-
- // Populate the database with a set of test payments.
- // We create 6 original payments, deleting the payment
- // at index 2 so that we cover the case where sequence
- // numbers are missing. We also add a duplicate payment
- // to the last payment added to test the legacy case
- // where we have duplicates in the nested duplicates
- // bucket.
- nonDuplicatePayments := 6
- pControl := NewPaymentControl(db)
-
- for i := 0; i < nonDuplicatePayments; i++ {
- // Generate a test payment.
- info, _, _, err := genInfo()
- if err != nil {
- t.Fatalf("unable to create test "+
- "payment: %v", err)
- }
-
- // Create a new payment entry in the database.
- err = pControl.InitPayment(info.PaymentHash, info)
- if err != nil {
- t.Fatalf("unable to initialize "+
- "payment in database: %v", err)
- }
-
- // Immediately delete the payment with index 2.
- if i == 1 {
- pmt, err := pControl.FetchPayment(
- info.PaymentHash,
- )
- util.RequireNoErr(t, err)
-
- deletePayment(t, db, info.PaymentHash,
- pmt.SequenceNum)
- }
-
- // If we are on the last payment entry, add a
- // duplicate payment with sequence number equal
- // to the parent payment + 1.
- if i == (nonDuplicatePayments - 1) {
- pmt, err := pControl.FetchPayment(
- info.PaymentHash,
- )
- util.RequireNoErr(t, err)
-
- appendDuplicatePayment(
- t, pControl.db,
- info.PaymentHash,
- pmt.SequenceNum+1,
- )
- }
- }
-
- // Fetch all payments in the database.
- allPayments, err := db.FetchPayments()
- if err != nil {
- t.Fatalf("payments could not be fetched from "+
- "database: %v", err)
- }
-
- if len(allPayments) != 6 {
- t.Fatalf("Number of payments received does not "+
- "match expected one. Got %v, want %v.",
- len(allPayments), 6)
- }
-
- querySlice, err := db.QueryPayments(tt.query)
- if err != nil {
- t.Fatalf("unexpected error: %v", err)
- }
- if tt.firstIndex != querySlice.FirstIndexOffset ||
- tt.lastIndex != querySlice.LastIndexOffset {
- t.Errorf("First or last index does not match "+
- "expected index. Want (%d, %d), got (%d, %d).",
- tt.firstIndex, tt.lastIndex,
- querySlice.FirstIndexOffset,
- querySlice.LastIndexOffset)
- }
-
- if len(querySlice.Payments) != len(tt.expectedSeqNrs) {
- t.Errorf("expected: %v payments, got: %v",
- len(allPayments), len(querySlice.Payments))
- }
-
- for i, seqNr := range tt.expectedSeqNrs {
- q := querySlice.Payments[i]
- if seqNr != q.SequenceNum {
- t.Errorf("sequence numbers do not match, "+
- "got %v, want %v", q.SequenceNum, seqNr)
- }
- }
- })
- }
-}
-
-// TestFetchPaymentWithSequenceNumber tests lookup of payments with their
-// sequence number. It sets up one payment with no duplicates, and another with
-// two duplicates in its duplicates bucket then uses these payments to test the
-// case where a specific duplicate is not found and the duplicates bucket is not
-// present when we expect it to be.
-func TestFetchPaymentWithSequenceNumber(t *testing.T) {
- db, cleanup, err := MakeTestDB()
- util.RequireNoErr(t, err)
-
- defer cleanup()
-
- pControl := NewPaymentControl(db)
-
- // Generate a test payment which does not have duplicates.
- noDuplicates, _, _, err := genInfo()
- util.RequireNoErr(t, err)
-
- // Create a new payment entry in the database.
- err = pControl.InitPayment(noDuplicates.PaymentHash, noDuplicates)
- util.RequireNoErr(t, err)
-
- // Fetch the payment so we can get its sequence nr.
- noDuplicatesPayment, err := pControl.FetchPayment(
- noDuplicates.PaymentHash,
- )
- util.RequireNoErr(t, err)
-
- // Generate a test payment which we will add duplicates to.
- hasDuplicates, _, _, err := genInfo()
- util.RequireNoErr(t, err)
-
- // Create a new payment entry in the database.
- err = pControl.InitPayment(hasDuplicates.PaymentHash, hasDuplicates)
- util.RequireNoErr(t, err)
-
- // Fetch the payment so we can get its sequence nr.
- hasDuplicatesPayment, err := pControl.FetchPayment(
- hasDuplicates.PaymentHash,
- )
- util.RequireNoErr(t, err)
-
- // We declare the sequence numbers used here so that we can reference
- // them in tests.
- var (
- duplicateOneSeqNr = hasDuplicatesPayment.SequenceNum + 1
- duplicateTwoSeqNr = hasDuplicatesPayment.SequenceNum + 2
- )
-
- // Add two duplicates to our second payment.
- appendDuplicatePayment(
- t, db, hasDuplicates.PaymentHash, duplicateOneSeqNr,
- )
- appendDuplicatePayment(
- t, db, hasDuplicates.PaymentHash, duplicateTwoSeqNr,
- )
-
- tests := []struct {
- name string
- paymentHash lntypes.Hash
- sequenceNumber uint64
- expectedErr *er.ErrorCode
- }{
- {
- name: "lookup payment without duplicates",
- paymentHash: noDuplicates.PaymentHash,
- sequenceNumber: noDuplicatesPayment.SequenceNum,
- expectedErr: nil,
- },
- {
- name: "lookup payment with duplicates",
- paymentHash: hasDuplicates.PaymentHash,
- sequenceNumber: hasDuplicatesPayment.SequenceNum,
- expectedErr: nil,
- },
- {
- name: "lookup first duplicate",
- paymentHash: hasDuplicates.PaymentHash,
- sequenceNumber: duplicateOneSeqNr,
- expectedErr: nil,
- },
- {
- name: "lookup second duplicate",
- paymentHash: hasDuplicates.PaymentHash,
- sequenceNumber: duplicateTwoSeqNr,
- expectedErr: nil,
- },
- {
- name: "lookup non-existent duplicate",
- paymentHash: hasDuplicates.PaymentHash,
- sequenceNumber: 999999,
- expectedErr: ErrDuplicateNotFound,
- },
- {
- name: "lookup duplicate, no duplicates bucket",
- paymentHash: noDuplicates.PaymentHash,
- sequenceNumber: duplicateTwoSeqNr,
- expectedErr: ErrNoDuplicateBucket,
- },
- }
-
- for _, test := range tests {
- test := test
-
- t.Run(test.name, func(t *testing.T) {
- err := kvdb.Update(db,
- func(tx walletdb.ReadWriteTx) er.R {
-
- var seqNrBytes [8]byte
- byteOrder.PutUint64(
- seqNrBytes[:], test.sequenceNumber,
- )
-
- _, err := fetchPaymentWithSequenceNumber(
- tx, test.paymentHash, seqNrBytes[:],
- )
- return err
- }, func() {})
- require.True(t, er.Cis(test.expectedErr, err))
- })
- }
-}
-
-// appendDuplicatePayment adds a duplicate payment to an existing payment. Note
-// that this function requires a unique sequence number.
-//
-// This code is *only* intended to replicate legacy duplicate payments in lnd,
-// our current schema does not allow duplicates.
-func appendDuplicatePayment(t *testing.T, db *DB, paymentHash lntypes.Hash,
- seqNr uint64) {
-
- err := kvdb.Update(db, func(tx walletdb.ReadWriteTx) er.R {
- bucket, err := fetchPaymentBucketUpdate(
- tx, paymentHash,
- )
- if err != nil {
- return err
- }
-
- // Create the duplicates bucket if it is not
- // present.
- dup, err := bucket.CreateBucketIfNotExists(
- duplicatePaymentsBucket,
- )
- if err != nil {
- return err
- }
-
- var sequenceKey [8]byte
- byteOrder.PutUint64(sequenceKey[:], seqNr)
-
- // Create duplicate payments for the two dup
- // sequence numbers we've setup.
- putDuplicatePayment(t, dup, sequenceKey[:], paymentHash)
-
- // Finally, once we have created our entry we add an index for
- // it.
- err = createPaymentIndexEntry(tx, sequenceKey[:], paymentHash)
- util.RequireNoErr(t, err)
-
- return nil
- }, func() {})
- if err != nil {
- t.Fatalf("could not create payment: %v", err)
- }
-}
-
-// putDuplicatePayment creates a duplicate payment in the duplicates bucket
-// provided with the minimal information required for successful reading.
-func putDuplicatePayment(t *testing.T, duplicateBucket kvdb.RwBucket,
- sequenceKey []byte, paymentHash lntypes.Hash) {
-
- paymentBucket, err := duplicateBucket.CreateBucketIfNotExists(
- sequenceKey,
- )
- util.RequireNoErr(t, err)
-
- err = paymentBucket.Put(duplicatePaymentSequenceKey, sequenceKey)
- util.RequireNoErr(t, err)
-
- // Generate fake information for the duplicate payment.
- info, _, _, err := genInfo()
- util.RequireNoErr(t, err)
-
- // Write the payment info to disk under the creation info key. This code
- // is copied rather than using serializePaymentCreationInfo to ensure
- // we always write in the legacy format used by duplicate payments.
- var b bytes.Buffer
- var scratch [8]byte
- _, errr := b.Write(paymentHash[:])
- require.NoError(t, errr)
-
- byteOrder.PutUint64(scratch[:], uint64(info.Value))
- _, errr = b.Write(scratch[:])
- require.NoError(t, errr)
-
- err = serializeTime(&b, info.CreationTime)
- util.RequireNoErr(t, err)
-
- byteOrder.PutUint32(scratch[:4], 0)
- _, errr = b.Write(scratch[:4])
- require.NoError(t, errr)
-
- // Get the PaymentCreationInfo.
- err = paymentBucket.Put(duplicatePaymentCreationInfoKey, b.Bytes())
- util.RequireNoErr(t, err)
-}
diff --git a/lnd/channeldb/peers.go b/lnd/channeldb/peers.go
deleted file mode 100644
index 2e818a41..00000000
--- a/lnd/channeldb/peers.go
+++ /dev/null
@@ -1,122 +0,0 @@
-package channeldb
-
-import (
- "bytes"
- "time"
-
- "github.com/pkt-cash/pktd/btcutil/er"
- "github.com/pkt-cash/pktd/lnd/channeldb/kvdb"
- "github.com/pkt-cash/pktd/lnd/routing/route"
-)
-
-var (
- // peersBucket is the name of a top level bucket in which we store
- // information about our peers. Information for different peers is
- // stored in buckets keyed by their public key.
- //
- //
- // peers-bucket
- // |
- // |--
- // | |--flap-count-key:
- // |
- // |--
- // | |--flap-count-key:
- peersBucket = []byte("peers-bucket")
-
- // flapCountKey is a key used in the peer pubkey sub-bucket that stores
- // the timestamp of a peer's last flap count and its all time flap
- // count.
- flapCountKey = []byte("flap-count")
-)
-
-var (
- // ErrNoPeerBucket is returned when we try to read entries for a peer
- // that is not tracked.
- ErrNoPeerBucket = Err.CodeWithDetail("ErrNoPeerBucket", "peer bucket not found")
-)
-
-// FlapCount contains information about a peer's flap count.
-type FlapCount struct {
- // Count provides the total flap count for a peer.
- Count uint32
-
- // LastFlap is the timestamp of the last flap recorded for a peer.
- LastFlap time.Time
-}
-
-// WriteFlapCounts writes the flap count for a set of peers to disk, creating a
-// bucket for the peer's pubkey if necessary. Note that this function overwrites
-// the current value.
-func (d *DB) WriteFlapCounts(flapCounts map[route.Vertex]*FlapCount) er.R {
- return kvdb.Update(d, func(tx kvdb.RwTx) er.R {
- // Run through our set of flap counts and record them for
- // each peer, creating a bucket for the peer pubkey if required.
- for peer, flapCount := range flapCounts {
- peers := tx.ReadWriteBucket(peersBucket)
-
- peerBucket, err := peers.CreateBucketIfNotExists(
- peer[:],
- )
- if err != nil {
- return err
- }
-
- var b bytes.Buffer
- errr := serializeTime(&b, flapCount.LastFlap)
- if errr != nil {
- return errr
- }
-
- if errr = WriteElement(&b, flapCount.Count); errr != nil {
- return errr
- }
-
- err = peerBucket.Put(flapCountKey, b.Bytes())
- if err != nil {
- return err
- }
- }
-
- return nil
- }, func() {})
-}
-
-// ReadFlapCount attempts to read the flap count for a peer, failing if the
-// peer is not found or we do not have flap count stored.
-func (d *DB) ReadFlapCount(pubkey route.Vertex) (*FlapCount, er.R) {
- var flapCount FlapCount
-
- if err := kvdb.View(d, func(tx kvdb.RTx) er.R {
- peers := tx.ReadBucket(peersBucket)
-
- peerBucket := peers.NestedReadBucket(pubkey[:])
- if peerBucket == nil {
- return ErrNoPeerBucket.Default()
- }
-
- flapBytes := peerBucket.Get(flapCountKey)
- if flapBytes == nil {
- return er.Errorf("flap count not recorded for: %v",
- pubkey)
- }
-
- var (
- err er.R
- r = bytes.NewReader(flapBytes)
- )
-
- flapCount.LastFlap, err = deserializeTime(r)
- if err != nil {
- return err
- }
-
- return ReadElements(r, &flapCount.Count)
- }, func() {
- flapCount = FlapCount{}
- }); err != nil {
- return nil, err
- }
-
- return &flapCount, nil
-}
diff --git a/lnd/channeldb/peers_test.go b/lnd/channeldb/peers_test.go
deleted file mode 100644
index c90dbf5f..00000000
--- a/lnd/channeldb/peers_test.go
+++ /dev/null
@@ -1,51 +0,0 @@
-package channeldb
-
-import (
- "testing"
- "time"
-
- "github.com/pkt-cash/pktd/btcutil/util"
- "github.com/pkt-cash/pktd/lnd/routing/route"
- "github.com/stretchr/testify/require"
-)
-
-// TestFlapCount tests lookup and writing of flap count to disk.
-func TestFlapCount(t *testing.T) {
- db, cleanup, err := MakeTestDB()
- util.RequireNoErr(t, err)
- defer cleanup()
-
- // Try to read flap count for a peer that we have no records for.
- _, err = db.ReadFlapCount(testPub)
- require.True(t, ErrNoPeerBucket.Is(err))
-
- var (
- testPub2 = route.Vertex{2, 2, 2}
- peer1FlapCount = &FlapCount{
- Count: 20,
- LastFlap: time.Unix(100, 23),
- }
- peer2FlapCount = &FlapCount{
- Count: 39,
- LastFlap: time.Unix(200, 23),
- }
- )
-
- peers := map[route.Vertex]*FlapCount{
- testPub: peer1FlapCount,
- testPub2: peer2FlapCount,
- }
-
- err = db.WriteFlapCounts(peers)
- util.RequireNoErr(t, err)
-
- // Lookup flap count for our first pubkey.
- count, err := db.ReadFlapCount(testPub)
- util.RequireNoErr(t, err)
- require.Equal(t, peer1FlapCount, count)
-
- // Lookup our flap count for the second peer.
- count, err = db.ReadFlapCount(testPub2)
- util.RequireNoErr(t, err)
- require.Equal(t, peer2FlapCount, count)
-}
diff --git a/lnd/channeldb/reject_cache.go b/lnd/channeldb/reject_cache.go
deleted file mode 100644
index acadb878..00000000
--- a/lnd/channeldb/reject_cache.go
+++ /dev/null
@@ -1,95 +0,0 @@
-package channeldb
-
-// rejectFlags is a compact representation of various metadata stored by the
-// reject cache about a particular channel.
-type rejectFlags uint8
-
-const (
- // rejectFlagExists is a flag indicating whether the channel exists,
- // i.e. the channel is open and has a recent channel update. If this
- // flag is not set, the channel is either a zombie or unknown.
- rejectFlagExists rejectFlags = 1 << iota
-
- // rejectFlagZombie is a flag indicating whether the channel is a
- // zombie, i.e. the channel is open but has no recent channel updates.
- rejectFlagZombie
-)
-
-// packRejectFlags computes the rejectFlags corresponding to the passed boolean
-// values indicating whether the edge exists or is a zombie.
-func packRejectFlags(exists, isZombie bool) rejectFlags {
- var flags rejectFlags
- if exists {
- flags |= rejectFlagExists
- }
- if isZombie {
- flags |= rejectFlagZombie
- }
-
- return flags
-}
-
-// unpack returns the booleans packed into the rejectFlags. The first indicates
-// if the edge exists in our graph, the second indicates if the edge is a
-// zombie.
-func (f rejectFlags) unpack() (bool, bool) {
- return f&rejectFlagExists == rejectFlagExists,
- f&rejectFlagZombie == rejectFlagZombie
-}
-
-// rejectCacheEntry caches frequently accessed information about a channel,
-// including the timestamps of its latest edge policies and whether or not the
-// channel exists in the graph.
-type rejectCacheEntry struct {
- upd1Time int64
- upd2Time int64
- flags rejectFlags
-}
-
-// rejectCache is an in-memory cache used to improve the performance of
-// HasChannelEdge. It caches information about the whether or channel exists, as
-// well as the most recent timestamps for each policy (if they exists).
-type rejectCache struct {
- n int
- edges map[uint64]rejectCacheEntry
-}
-
-// newRejectCache creates a new rejectCache with maximum capacity of n entries.
-func newRejectCache(n int) *rejectCache {
- return &rejectCache{
- n: n,
- edges: make(map[uint64]rejectCacheEntry, n),
- }
-}
-
-// get returns the entry from the cache for chanid, if it exists.
-func (c *rejectCache) get(chanid uint64) (rejectCacheEntry, bool) {
- entry, ok := c.edges[chanid]
- return entry, ok
-}
-
-// insert adds the entry to the reject cache. If an entry for chanid already
-// exists, it will be replaced with the new entry. If the entry doesn't exists,
-// it will be inserted to the cache, performing a random eviction if the cache
-// is at capacity.
-func (c *rejectCache) insert(chanid uint64, entry rejectCacheEntry) {
- // If entry exists, replace it.
- if _, ok := c.edges[chanid]; ok {
- c.edges[chanid] = entry
- return
- }
-
- // Otherwise, evict an entry at random and insert.
- if len(c.edges) == c.n {
- for id := range c.edges {
- delete(c.edges, id)
- break
- }
- }
- c.edges[chanid] = entry
-}
-
-// remove deletes an entry for chanid from the cache, if it exists.
-func (c *rejectCache) remove(chanid uint64) {
- delete(c.edges, chanid)
-}
diff --git a/lnd/channeldb/reject_cache_test.go b/lnd/channeldb/reject_cache_test.go
deleted file mode 100644
index 6974f425..00000000
--- a/lnd/channeldb/reject_cache_test.go
+++ /dev/null
@@ -1,107 +0,0 @@
-package channeldb
-
-import (
- "reflect"
- "testing"
-)
-
-// TestRejectCache checks the behavior of the rejectCache with respect to insertion,
-// eviction, and removal of cache entries.
-func TestRejectCache(t *testing.T) {
- const cacheSize = 100
-
- // Create a new reject cache with the configured max size.
- c := newRejectCache(cacheSize)
-
- // As a sanity check, assert that querying the empty cache does not
- // return an entry.
- _, ok := c.get(0)
- if ok {
- t.Fatalf("reject cache should be empty")
- }
-
- // Now, fill up the cache entirely.
- for i := uint64(0); i < cacheSize; i++ {
- c.insert(i, entryForInt(i))
- }
-
- // Assert that the cache has all of the entries just inserted, since no
- // eviction should occur until we try to surpass the max size.
- assertHasEntries(t, c, 0, cacheSize)
-
- // Now, insert a new element that causes the cache to evict an element.
- c.insert(cacheSize, entryForInt(cacheSize))
-
- // Assert that the cache has this last entry, as the cache should evict
- // some prior element and not the newly inserted one.
- assertHasEntries(t, c, cacheSize, cacheSize)
-
- // Iterate over all inserted elements and construct a set of the evicted
- // elements.
- evicted := make(map[uint64]struct{})
- for i := uint64(0); i < cacheSize+1; i++ {
- _, ok := c.get(i)
- if !ok {
- evicted[i] = struct{}{}
- }
- }
-
- // Assert that exactly one element has been evicted.
- numEvicted := len(evicted)
- if numEvicted != 1 {
- t.Fatalf("expected one evicted entry, got: %d", numEvicted)
- }
-
- // Remove the highest item which initially caused the eviction and
- // reinsert the element that was evicted prior.
- c.remove(cacheSize)
- for i := range evicted {
- c.insert(i, entryForInt(i))
- }
-
- // Since the removal created an extra slot, the last insertion should
- // not have caused an eviction and the entries for all channels in the
- // original set that filled the cache should be present.
- assertHasEntries(t, c, 0, cacheSize)
-
- // Finally, reinsert the existing set back into the cache and test that
- // the cache still has all the entries. If the randomized eviction were
- // happening on inserts for existing cache items, we expect this to fail
- // with high probability.
- for i := uint64(0); i < cacheSize; i++ {
- c.insert(i, entryForInt(i))
- }
- assertHasEntries(t, c, 0, cacheSize)
-
-}
-
-// assertHasEntries queries the reject cache for all channels in the range [start,
-// end), asserting that they exist and their value matches the entry produced by
-// entryForInt.
-func assertHasEntries(t *testing.T, c *rejectCache, start, end uint64) {
- t.Helper()
-
- for i := start; i < end; i++ {
- entry, ok := c.get(i)
- if !ok {
- t.Fatalf("reject cache should contain chan %d", i)
- }
-
- expEntry := entryForInt(i)
- if !reflect.DeepEqual(entry, expEntry) {
- t.Fatalf("entry mismatch, want: %v, got: %v",
- expEntry, entry)
- }
- }
-}
-
-// entryForInt generates a unique rejectCacheEntry given an integer.
-func entryForInt(i uint64) rejectCacheEntry {
- exists := i%2 == 0
- isZombie := i%3 == 0
- return rejectCacheEntry{
- upd1Time: int64(2 * i),
- upd2Time: int64(2*i + 1),
- flags: packRejectFlags(exists, isZombie),
- }
-}
diff --git a/lnd/channeldb/reports.go b/lnd/channeldb/reports.go
deleted file mode 100644
index b521a1c2..00000000
--- a/lnd/channeldb/reports.go
+++ /dev/null
@@ -1,354 +0,0 @@
-package channeldb
-
-import (
- "bytes"
- "io"
-
- "github.com/pkt-cash/pktd/btcutil"
- "github.com/pkt-cash/pktd/btcutil/er"
- "github.com/pkt-cash/pktd/chaincfg/chainhash"
- "github.com/pkt-cash/pktd/lnd/channeldb/kvdb"
- "github.com/pkt-cash/pktd/lnd/tlv"
- "github.com/pkt-cash/pktd/wire"
-)
-
-var (
- // closeSummaryBucket is a top level bucket which holds additional
- // information about channel closes. It nests channels by chainhash
- // and channel point.
- // [closeSummaryBucket]
- // [chainHashBucket]
- // [channelBucket]
- // [resolversBucket]
- closeSummaryBucket = []byte("close-summaries")
-
- // resolversBucket holds the outcome of a channel's resolvers. It is
- // nested under a channel and chainhash bucket in the close summaries
- // bucket.
- resolversBucket = []byte("resolvers-bucket")
-)
-
-var (
- // ErrNoChainHashBucket is returned when we have not created a bucket
- // for the current chain hash.
- ErrNoChainHashBucket = Err.CodeWithDetail("ErrNoChainHashBucket", "no chain hash bucket")
-
- // ErrNoChannelSummaries is returned when a channel is not found in the
- // chain hash bucket.
- ErrNoChannelSummaries = Err.CodeWithDetail("ErrNoChannelSummaries", "channel bucket not found")
-
- amountType tlv.Type = 1
- resolverType tlv.Type = 2
- outcomeType tlv.Type = 3
- spendTxIDType tlv.Type = 4
-)
-
-// ResolverType indicates the type of resolver that was resolved on chain.
-type ResolverType uint8
-
-const (
- // ResolverTypeAnchor represents a resolver for an anchor output.
- ResolverTypeAnchor ResolverType = 0
-
- // ResolverTypeIncomingHtlc represents resolution of an incoming htlc.
- ResolverTypeIncomingHtlc ResolverType = 1
-
- // ResolverTypeOutgoingHtlc represents resolution of an outgoing htlc.
- ResolverTypeOutgoingHtlc ResolverType = 2
-
- // ResolverTypeCommit represents resolution of our time locked commit
- // when we force close.
- ResolverTypeCommit ResolverType = 3
-)
-
-// ResolverOutcome indicates the outcome for the resolver that that the contract
-// court reached. This state is not necessarily final, since htlcs on our own
-// commitment are resolved across two resolvers.
-type ResolverOutcome uint8
-
-const (
- // ResolverOutcomeClaimed indicates that funds were claimed on chain.
- ResolverOutcomeClaimed ResolverOutcome = 0
-
- // ResolverOutcomeUnclaimed indicates that we did not claim our funds on
- // chain. This may be the case for anchors that we did not sweep, or
- // outputs that were not economical to sweep.
- ResolverOutcomeUnclaimed ResolverOutcome = 1
-
- // ResolverOutcomeAbandoned indicates that we did not attempt to claim
- // an output on chain. This is the case for htlcs that we could not
- // decode to claim, or invoice which we fail when an attempt is made
- // to settle them on chain.
- ResolverOutcomeAbandoned ResolverOutcome = 2
-
- // ResolverOutcomeTimeout indicates that a contract was timed out on
- // chain.
- ResolverOutcomeTimeout ResolverOutcome = 3
-
- // ResolverOutcomeFirstStage indicates that a htlc had to be claimed
- // over two stages, with this outcome representing the confirmation
- // of our success/timeout tx.
- ResolverOutcomeFirstStage ResolverOutcome = 4
-)
-
-// ResolverReport provides an account of the outcome of a resolver. This differs
-// from a ContractReport because it does not necessarily fully resolve the
-// contract; each step of two stage htlc resolution is included.
-type ResolverReport struct {
- // OutPoint is the on chain outpoint that was spent as a result of this
- // resolution. When an output is directly resolved (eg, commitment
- // sweeps and single stage htlcs on the remote party's output) this
- // is an output on the commitment tx that was broadcast. When we resolve
- // across two stages (eg, htlcs on our own force close commit), the
- // first stage outpoint is the output on our commitment and the second
- // stage output is the spend from our htlc success/timeout tx.
- OutPoint wire.OutPoint
-
- // Amount is the value of the output referenced above.
- Amount btcutil.Amount
-
- // ResolverType indicates the type of resolution that occurred.
- ResolverType
-
- // ResolverOutcome indicates the outcome of the resolver.
- ResolverOutcome
-
- // SpendTxID is the transaction ID of the spending transaction that
- // claimed the outpoint. This may be a sweep transaction, or a first
- // stage success/timeout transaction.
- SpendTxID *chainhash.Hash
-}
-
-// PutResolverReport creates and commits a transaction that is used to write a
-// resolver report to disk.
-func (d *DB) PutResolverReport(tx kvdb.RwTx, chainHash chainhash.Hash,
- channelOutpoint *wire.OutPoint, report *ResolverReport) er.R {
-
- putReportFunc := func(tx kvdb.RwTx) er.R {
- return putReport(tx, chainHash, channelOutpoint, report)
- }
-
- // If the transaction is nil, we'll create a new one.
- if tx == nil {
- return kvdb.Update(d, putReportFunc, func() {})
- }
-
- // Otherwise, we can write the report to disk using the existing
- // transaction.
- return putReportFunc(tx)
-}
-
-// putReport puts a report in the bucket provided, with its outpoint as its key.
-func putReport(tx kvdb.RwTx, chainHash chainhash.Hash,
- channelOutpoint *wire.OutPoint, report *ResolverReport) er.R {
-
- channelBucket, err := fetchReportWriteBucket(
- tx, chainHash, channelOutpoint,
- )
- if err != nil {
- return err
- }
-
- // If the resolvers bucket does not exist yet, create it.
- resolvers, err := channelBucket.CreateBucketIfNotExists(
- resolversBucket,
- )
- if err != nil {
- return err
- }
-
- var valueBuf bytes.Buffer
- if err := serializeReport(&valueBuf, report); err != nil {
- return err
- }
-
- // Finally write our outpoint to be used as the key for this record.
- var keyBuf bytes.Buffer
- if err := writeOutpoint(&keyBuf, &report.OutPoint); err != nil {
- return err
- }
-
- return resolvers.Put(keyBuf.Bytes(), valueBuf.Bytes())
-}
-
-// serializeReport serialized a report using a TLV stream to allow for optional
-// fields.
-func serializeReport(w io.Writer, report *ResolverReport) er.R {
- amt := uint64(report.Amount)
- resolver := uint8(report.ResolverType)
- outcome := uint8(report.ResolverOutcome)
-
- // Create a set of TLV records for the values we know to be present.
- records := []tlv.Record{
- tlv.MakePrimitiveRecord(amountType, &amt),
- tlv.MakePrimitiveRecord(resolverType, &resolver),
- tlv.MakePrimitiveRecord(outcomeType, &outcome),
- }
-
- // If our spend txid is non-nil, we add a tlv entry for it.
- if report.SpendTxID != nil {
- var spendBuf bytes.Buffer
- err := WriteElement(&spendBuf, *report.SpendTxID)
- if err != nil {
- return err
- }
- spendBytes := spendBuf.Bytes()
-
- records = append(records, tlv.MakePrimitiveRecord(
- spendTxIDType, &spendBytes,
- ))
- }
-
- // Create our stream and encode it.
- tlvStream, err := tlv.NewStream(records...)
- if err != nil {
- return err
- }
-
- return tlvStream.Encode(w)
-}
-
-// FetchChannelReports fetches the set of reports for a channel.
-func (d DB) FetchChannelReports(chainHash chainhash.Hash,
- outPoint *wire.OutPoint) ([]*ResolverReport, er.R) {
-
- var reports []*ResolverReport
-
- if err := kvdb.View(d, func(tx kvdb.RTx) er.R {
- chanBucket, err := fetchReportReadBucket(
- tx, chainHash, outPoint,
- )
- if err != nil {
- return err
- }
-
- // If there are no resolvers for this channel, we simply
- // return nil, because nothing has been persisted yet.
- resolvers := chanBucket.NestedReadBucket(resolversBucket)
- if resolvers == nil {
- return nil
- }
-
- // Run through each resolution and add it to our set of
- // resolutions.
- return resolvers.ForEach(func(k, v []byte) er.R {
- // Deserialize the contents of our field.
- r := bytes.NewReader(v)
- report, err := deserializeReport(r)
- if err != nil {
- return err
- }
-
- // Once we have read our values out, set the outpoint
- // on the report using the key.
- r = bytes.NewReader(k)
- if err := ReadElement(r, &report.OutPoint); err != nil {
- return err
- }
-
- reports = append(reports, report)
-
- return nil
- })
- }, func() {
- reports = nil
- }); err != nil {
- return nil, err
- }
-
- return reports, nil
-}
-
-// deserializeReport gets a resolver report from a tlv stream. The outpoint on
-// the resolver will not be set because we key reports by their outpoint, and
-// this function reads only the values saved in the stream.
-func deserializeReport(r io.Reader) (*ResolverReport, er.R) {
- var (
- resolver, outcome uint8
- amt uint64
- spentTx []byte
- )
-
- tlvStream, err := tlv.NewStream(
- tlv.MakePrimitiveRecord(amountType, &amt),
- tlv.MakePrimitiveRecord(resolverType, &resolver),
- tlv.MakePrimitiveRecord(outcomeType, &outcome),
- tlv.MakePrimitiveRecord(spendTxIDType, &spentTx),
- )
- if err != nil {
- return nil, err
- }
-
- if err := tlvStream.Decode(r); err != nil {
- return nil, err
- }
-
- report := &ResolverReport{
- Amount: btcutil.Amount(amt),
- ResolverOutcome: ResolverOutcome(outcome),
- ResolverType: ResolverType(resolver),
- }
-
- // If our spend tx is set, we set it on our report.
- if len(spentTx) != 0 {
- spendTx, err := chainhash.NewHash(spentTx)
- if err != nil {
- return nil, err
- }
- report.SpendTxID = spendTx
- }
-
- return report, nil
-}
-
-// fetchReportWriteBucket returns a write channel bucket within the reports
-// top level bucket. If the channel's bucket does not yet exist, it will be
-// created.
-func fetchReportWriteBucket(tx kvdb.RwTx, chainHash chainhash.Hash,
- outPoint *wire.OutPoint) (kvdb.RwBucket, er.R) {
-
- // Get the channel close summary bucket.
- closedBucket := tx.ReadWriteBucket(closeSummaryBucket)
-
- // Create the chain hash bucket if it does not exist.
- chainHashBkt, err := closedBucket.CreateBucketIfNotExists(chainHash[:])
- if err != nil {
- return nil, err
- }
-
- var chanPointBuf bytes.Buffer
- if err := writeOutpoint(&chanPointBuf, outPoint); err != nil {
- return nil, err
- }
-
- return chainHashBkt.CreateBucketIfNotExists(chanPointBuf.Bytes())
-}
-
-// fetchReportReadBucket returns a read channel bucket within the reports
-// top level bucket. If any bucket along the way does not exist, it will error.
-func fetchReportReadBucket(tx kvdb.RTx, chainHash chainhash.Hash,
- outPoint *wire.OutPoint) (kvdb.RBucket, er.R) {
-
- // First fetch the top level channel close summary bucket.
- closeBucket := tx.ReadBucket(closeSummaryBucket)
-
- // Next we get the chain hash bucket for our current chain.
- chainHashBucket := closeBucket.NestedReadBucket(chainHash[:])
- if chainHashBucket == nil {
- return nil, ErrNoChainHashBucket.Default()
- }
-
- // With the bucket for the node and chain fetched, we can now go down
- // another level, for the channel itself.
- var chanPointBuf bytes.Buffer
- if err := writeOutpoint(&chanPointBuf, outPoint); err != nil {
- return nil, err
- }
-
- chanBucket := chainHashBucket.NestedReadBucket(chanPointBuf.Bytes())
- if chanBucket == nil {
- return nil, ErrNoChannelSummaries.Default()
- }
-
- return chanBucket, nil
-}
diff --git a/lnd/channeldb/reports_test.go b/lnd/channeldb/reports_test.go
deleted file mode 100644
index 99d0c626..00000000
--- a/lnd/channeldb/reports_test.go
+++ /dev/null
@@ -1,220 +0,0 @@
-package channeldb
-
-import (
- "bytes"
- "testing"
-
- "github.com/pkt-cash/pktd/btcutil/er"
- "github.com/pkt-cash/pktd/btcutil/util"
- "github.com/pkt-cash/pktd/chaincfg/chainhash"
- "github.com/pkt-cash/pktd/lnd/channeldb/kvdb"
- "github.com/pkt-cash/pktd/wire"
- "github.com/stretchr/testify/require"
-)
-
-var (
- testChainHash = [chainhash.HashSize]byte{
- 0x51, 0xb6, 0x37, 0xd8, 0xfc, 0xd2, 0xc6, 0xda,
- 0x48, 0x59, 0xe6, 0x96, 0x31, 0x13, 0xa1, 0x17,
- 0x2d, 0xe7, 0x93, 0xe4,
- }
-
- testChanPoint1 = wire.OutPoint{
- Hash: chainhash.Hash{
- 0x51, 0xb6, 0x37, 0xd8, 0xfc, 0xd2, 0xc6, 0xda,
- 0x48, 0x59, 0xe6, 0x96, 0x31, 0x13, 0xa1, 0x17,
- 0x2d, 0xe7, 0x93, 0xe4,
- },
- Index: 1,
- }
-)
-
-// TestPersistReport tests the writing and retrieval of a report on disk with
-// and without a spend txid.
-func TestPersistReport(t *testing.T) {
- tests := []struct {
- name string
- spendTxID *chainhash.Hash
- }{
- {
- name: "Non-nil spend txid",
- spendTxID: &testChanPoint1.Hash,
- },
- {
- name: "Nil spend txid",
- spendTxID: nil,
- },
- }
-
- for _, test := range tests {
- test := test
-
- t.Run(test.name, func(t *testing.T) {
- db, cleanup, err := MakeTestDB()
- util.RequireNoErr(t, err)
- defer cleanup()
-
- channelOutpoint := testChanPoint1
-
- testOutpoint := testChanPoint1
- testOutpoint.Index++
-
- report := &ResolverReport{
- OutPoint: testOutpoint,
- Amount: 2,
- ResolverType: 1,
- ResolverOutcome: 2,
- SpendTxID: test.spendTxID,
- }
-
- // Write report to disk, and ensure it is identical when
- // it is read.
- err = db.PutResolverReport(
- nil, testChainHash, &channelOutpoint, report,
- )
- util.RequireNoErr(t, err)
-
- reports, err := db.FetchChannelReports(
- testChainHash, &channelOutpoint,
- )
- util.RequireNoErr(t, err)
- require.Equal(t, report, reports[0])
- })
- }
-}
-
-// TestFetchChannelReadBucket tests retrieval of the reports bucket for a
-// channel, testing that the appropriate error is returned based on the state
-// of the existing bucket.
-func TestFetchChannelReadBucket(t *testing.T) {
- db, cleanup, err := MakeTestDB()
- util.RequireNoErr(t, err)
- defer cleanup()
-
- channelOutpoint := testChanPoint1
-
- testOutpoint := testChanPoint1
- testOutpoint.Index++
-
- // If we attempt to get reports when we do not have any present, we
- // expect to fail because our chain hash bucket is not present.
- _, err = db.FetchChannelReports(
- testChainHash, &channelOutpoint,
- )
- require.True(t, ErrNoChainHashBucket.Is(err))
-
- // Finally we write a report to disk and check that we can fetch it.
- report := &ResolverReport{
- OutPoint: testOutpoint,
- Amount: 2,
- ResolverOutcome: 1,
- ResolverType: 2,
- SpendTxID: nil,
- }
-
- err = db.PutResolverReport(
- nil, testChainHash, &channelOutpoint, report,
- )
- util.RequireNoErr(t, err)
-
- // Now that the channel bucket exists, we expect the channel to be
- // successfully fetched, with no reports.
- reports, err := db.FetchChannelReports(testChainHash, &testChanPoint1)
- util.RequireNoErr(t, err)
- require.Equal(t, report, reports[0])
-}
-
-// TestFetchChannelWriteBucket tests the creation of missing buckets when
-// retrieving the reports bucket.
-func TestFetchChannelWriteBucket(t *testing.T) {
- createReportsBucket := func(tx kvdb.RwTx) (kvdb.RwBucket, er.R) {
- return tx.CreateTopLevelBucket(closedChannelBucket)
- }
-
- createChainHashBucket := func(reports kvdb.RwBucket) (kvdb.RwBucket,
- er.R) {
-
- return reports.CreateBucketIfNotExists(testChainHash[:])
- }
-
- createChannelBucket := func(chainHash kvdb.RwBucket) (kvdb.RwBucket,
- er.R) {
-
- var chanPointBuf bytes.Buffer
- err := writeOutpoint(&chanPointBuf, &testChanPoint1)
- util.RequireNoErr(t, err)
-
- return chainHash.CreateBucketIfNotExists(chanPointBuf.Bytes())
- }
-
- tests := []struct {
- name string
- setup func(tx kvdb.RwTx) er.R
- }{
- {
- name: "no existing buckets",
- setup: func(tx kvdb.RwTx) er.R {
- return nil
- },
- },
- {
- name: "reports bucket exists",
- setup: func(tx kvdb.RwTx) er.R {
- _, err := createReportsBucket(tx)
- return err
- },
- },
- {
- name: "chainhash bucket exists",
- setup: func(tx kvdb.RwTx) er.R {
- reports, err := createReportsBucket(tx)
- if err != nil {
- return err
- }
-
- _, err = createChainHashBucket(reports)
- return err
- },
- },
- {
- name: "channel bucket exists",
- setup: func(tx kvdb.RwTx) er.R {
- reports, err := createReportsBucket(tx)
- if err != nil {
- return err
- }
-
- chainHash, err := createChainHashBucket(reports)
- if err != nil {
- return err
- }
-
- _, err = createChannelBucket(chainHash)
- return err
- },
- },
- }
-
- for _, test := range tests {
- test := test
-
- t.Run(test.name, func(t *testing.T) {
- db, cleanup, err := MakeTestDB()
- util.RequireNoErr(t, err)
- defer cleanup()
-
- // Update our db to the starting state we expect.
- err = kvdb.Update(db, test.setup, func() {})
- util.RequireNoErr(t, err)
-
- // Try to get our report bucket.
- err = kvdb.Update(db, func(tx kvdb.RwTx) er.R {
- _, err := fetchReportWriteBucket(
- tx, testChainHash, &testChanPoint1,
- )
- return err
- }, func() {})
- util.RequireNoErr(t, err)
- })
- }
-}
diff --git a/lnd/channeldb/waitingproof.go b/lnd/channeldb/waitingproof.go
deleted file mode 100644
index 12c3caf7..00000000
--- a/lnd/channeldb/waitingproof.go
+++ /dev/null
@@ -1,256 +0,0 @@
-package channeldb
-
-import (
- "encoding/binary"
- "sync"
-
- "io"
-
- "bytes"
-
- "github.com/pkt-cash/pktd/btcutil/er"
- "github.com/pkt-cash/pktd/btcutil/util"
- "github.com/pkt-cash/pktd/lnd/channeldb/kvdb"
- "github.com/pkt-cash/pktd/lnd/lnwire"
-)
-
-var (
- // waitingProofsBucketKey byte string name of the waiting proofs store.
- waitingProofsBucketKey = []byte("waitingproofs")
-
- // ErrWaitingProofNotFound is returned if waiting proofs haven't been
- // found by db.
- ErrWaitingProofNotFound = Err.CodeWithDetail("ErrWaitingProofNotFound", "waiting proofs haven't been "+
- "found")
-
- // ErrWaitingProofAlreadyExist is returned if waiting proofs haven't been
- // found by db.
- ErrWaitingProofAlreadyExist = Err.CodeWithDetail("ErrWaitingProofAlreadyExist", "waiting proof with such "+
- "key already exist")
-)
-
-// WaitingProofStore is the bold db map-like storage for half announcement
-// signatures. The one responsibility of this storage is to be able to
-// retrieve waiting proofs after client restart.
-type WaitingProofStore struct {
- // cache is used in order to reduce the number of redundant get
- // calls, when object isn't stored in it.
- cache map[WaitingProofKey]struct{}
- db *DB
- mu sync.RWMutex
-}
-
-// NewWaitingProofStore creates new instance of proofs storage.
-func NewWaitingProofStore(db *DB) (*WaitingProofStore, er.R) {
- s := &WaitingProofStore{
- db: db,
- }
-
- if err := s.ForAll(func(proof *WaitingProof) er.R {
- s.cache[proof.Key()] = struct{}{}
- return nil
- }, func() {
- s.cache = make(map[WaitingProofKey]struct{})
- }); err != nil && !ErrWaitingProofNotFound.Is(err) {
- return nil, err
- }
-
- return s, nil
-}
-
-// Add adds new waiting proof in the storage.
-func (s *WaitingProofStore) Add(proof *WaitingProof) er.R {
- s.mu.Lock()
- defer s.mu.Unlock()
-
- err := kvdb.Update(s.db, func(tx kvdb.RwTx) er.R {
- var err er.R
- var b bytes.Buffer
-
- // Get or create the bucket.
- bucket, err := tx.CreateTopLevelBucket(waitingProofsBucketKey)
- if err != nil {
- return err
- }
-
- // Encode the objects and place it in the bucket.
- if err := proof.Encode(&b); err != nil {
- return err
- }
-
- key := proof.Key()
-
- return bucket.Put(key[:], b.Bytes())
- }, func() {})
- if err != nil {
- return err
- }
-
- // Knowing that the write succeeded, we can now update the in-memory
- // cache with the proof's key.
- s.cache[proof.Key()] = struct{}{}
-
- return nil
-}
-
-// Remove removes the proof from storage by its key.
-func (s *WaitingProofStore) Remove(key WaitingProofKey) er.R {
- s.mu.Lock()
- defer s.mu.Unlock()
-
- if _, ok := s.cache[key]; !ok {
- return ErrWaitingProofNotFound.Default()
- }
-
- err := kvdb.Update(s.db, func(tx kvdb.RwTx) er.R {
- // Get or create the top bucket.
- bucket := tx.ReadWriteBucket(waitingProofsBucketKey)
- if bucket == nil {
- return ErrWaitingProofNotFound.Default()
- }
-
- return bucket.Delete(key[:])
- }, func() {})
- if err != nil {
- return err
- }
-
- // Since the proof was successfully deleted from the store, we can now
- // remove it from the in-memory cache.
- delete(s.cache, key)
-
- return nil
-}
-
-// ForAll iterates thought all waiting proofs and passing the waiting proof
-// in the given callback.
-func (s *WaitingProofStore) ForAll(cb func(*WaitingProof) er.R, reset func()) er.R {
-
- return kvdb.View(s.db, func(tx kvdb.RTx) er.R {
- bucket := tx.ReadBucket(waitingProofsBucketKey)
- if bucket == nil {
- return ErrWaitingProofNotFound.Default()
- }
-
- // Iterate over objects buckets.
- return bucket.ForEach(func(k, v []byte) er.R {
- // Skip buckets fields.
- if v == nil {
- return nil
- }
-
- r := bytes.NewReader(v)
- proof := &WaitingProof{}
- if err := proof.Decode(r); err != nil {
- return err
- }
-
- return cb(proof)
- })
- }, reset)
-}
-
-// Get returns the object which corresponds to the given index.
-func (s *WaitingProofStore) Get(key WaitingProofKey) (*WaitingProof, er.R) {
- var proof *WaitingProof
-
- s.mu.RLock()
- defer s.mu.RUnlock()
-
- if _, ok := s.cache[key]; !ok {
- return nil, ErrWaitingProofNotFound.Default()
- }
-
- err := kvdb.View(s.db, func(tx kvdb.RTx) er.R {
- bucket := tx.ReadBucket(waitingProofsBucketKey)
- if bucket == nil {
- return ErrWaitingProofNotFound.Default()
- }
-
- // Iterate over objects buckets.
- v := bucket.Get(key[:])
- if v == nil {
- return ErrWaitingProofNotFound.Default()
- }
-
- r := bytes.NewReader(v)
- return proof.Decode(r)
- }, func() {
- proof = &WaitingProof{}
- })
-
- return proof, err
-}
-
-// WaitingProofKey is the proof key which uniquely identifies the waiting
-// proof object. The goal of this key is distinguish the local and remote
-// proof for the same channel id.
-type WaitingProofKey [9]byte
-
-// WaitingProof is the storable object, which encapsulate the half proof and
-// the information about from which side this proof came. This structure is
-// needed to make channel proof exchange persistent, so that after client
-// restart we may receive remote/local half proof and process it.
-type WaitingProof struct {
- *lnwire.AnnounceSignatures
- isRemote bool
-}
-
-// NewWaitingProof constructs a new waiting prof instance.
-func NewWaitingProof(isRemote bool, proof *lnwire.AnnounceSignatures) *WaitingProof {
- return &WaitingProof{
- AnnounceSignatures: proof,
- isRemote: isRemote,
- }
-}
-
-// OppositeKey returns the key which uniquely identifies opposite waiting proof.
-func (p *WaitingProof) OppositeKey() WaitingProofKey {
- var key [9]byte
- binary.BigEndian.PutUint64(key[:8], p.ShortChannelID.ToUint64())
-
- if !p.isRemote {
- key[8] = 1
- }
- return key
-}
-
-// Key returns the key which uniquely identifies waiting proof.
-func (p *WaitingProof) Key() WaitingProofKey {
- var key [9]byte
- binary.BigEndian.PutUint64(key[:8], p.ShortChannelID.ToUint64())
-
- if p.isRemote {
- key[8] = 1
- }
- return key
-}
-
-// Encode writes the internal representation of waiting proof in byte stream.
-func (p *WaitingProof) Encode(w io.Writer) er.R {
- if err := util.WriteBin(w, byteOrder, p.isRemote); err != nil {
- return err
- }
-
- if err := p.AnnounceSignatures.Encode(w, 0); err != nil {
- return err
- }
-
- return nil
-}
-
-// Decode reads the data from the byte stream and initializes the
-// waiting proof object with it.
-func (p *WaitingProof) Decode(r io.Reader) er.R {
- if err := util.ReadBin(r, byteOrder, &p.isRemote); err != nil {
- return err
- }
-
- msg := &lnwire.AnnounceSignatures{}
- if err := msg.Decode(r, 0); err != nil {
- return err
- }
-
- (*p).AnnounceSignatures = msg
- return nil
-}
diff --git a/lnd/channeldb/waitingproof_test.go b/lnd/channeldb/waitingproof_test.go
deleted file mode 100644
index 44bec037..00000000
--- a/lnd/channeldb/waitingproof_test.go
+++ /dev/null
@@ -1,59 +0,0 @@
-package channeldb
-
-import (
- "testing"
-
- "reflect"
-
- "github.com/pkt-cash/pktd/btcutil/er"
- "github.com/pkt-cash/pktd/lnd/lnwire"
-)
-
-// TestWaitingProofStore tests add/get/remove functions of the waiting proof
-// storage.
-func TestWaitingProofStore(t *testing.T) {
- t.Parallel()
-
- db, cleanup, err := MakeTestDB()
- if err != nil {
- t.Fatalf("failed to make test database: %s", err)
- }
- defer cleanup()
-
- proof1 := NewWaitingProof(true, &lnwire.AnnounceSignatures{
- NodeSignature: wireSig,
- BitcoinSignature: wireSig,
- })
-
- store, err := NewWaitingProofStore(db)
- if err != nil {
- t.Fatalf("unable to create the waiting proofs storage: %v",
- err)
- }
-
- if err := store.Add(proof1); err != nil {
- t.Fatalf("unable add proof to storage: %v", err)
- }
-
- proof2, err := store.Get(proof1.Key())
- if err != nil {
- t.Fatalf("unable retrieve proof from storage: %v", err)
- }
- if !reflect.DeepEqual(proof1, proof2) {
- t.Fatal("wrong proof retrieved")
- }
-
- if _, err := store.Get(proof1.OppositeKey()); !ErrWaitingProofNotFound.Is(err) {
- t.Fatalf("proof shouldn't be found: %v", err)
- }
-
- if err := store.Remove(proof1.Key()); err != nil {
- t.Fatalf("unable remove proof from storage: %v", err)
- }
-
- if err := store.ForAll(func(proof *WaitingProof) er.R {
- return er.New("storage should be empty")
- }, func() {}); err != nil && !ErrWaitingProofNotFound.Is(err) {
- t.Fatal(err)
- }
-}
diff --git a/lnd/channeldb/witness_cache.go b/lnd/channeldb/witness_cache.go
deleted file mode 100644
index bd79160f..00000000
--- a/lnd/channeldb/witness_cache.go
+++ /dev/null
@@ -1,232 +0,0 @@
-package channeldb
-
-import (
- "github.com/pkt-cash/pktd/btcutil/er"
- "github.com/pkt-cash/pktd/lnd/channeldb/kvdb"
- "github.com/pkt-cash/pktd/lnd/lntypes"
-)
-
-var (
- // ErrNoWitnesses is an error that's returned when no new witnesses have
- // been added to the WitnessCache.
- ErrNoWitnesses = Err.CodeWithDetail("ErrNoWitnesses",
- "no witnesses")
-
- // ErrUnknownWitnessType is returned if a caller attempts to
- ErrUnknownWitnessType = Err.CodeWithDetail("ErrUnknownWitnessType",
- "unknown witness type")
-)
-
-// WitnessType is enum that denotes what "type" of witness is being
-// stored/retrieved. As the WitnessCache itself is agnostic and doesn't enforce
-// any structure on added witnesses, we use this type to partition the
-// witnesses on disk, and also to know how to map a witness to its look up key.
-type WitnessType uint8
-
-var (
- // Sha256HashWitness is a witness that is simply the pre image to a
- // hash image. In order to map to its key, we'll use sha256.
- Sha256HashWitness WitnessType = 1
-)
-
-// toDBKey is a helper method that maps a witness type to the key that we'll
-// use to store it within the database.
-func (w WitnessType) toDBKey() ([]byte, er.R) {
- switch w {
-
- case Sha256HashWitness:
- return []byte{byte(w)}, nil
-
- default:
- return nil, ErrUnknownWitnessType.Default()
- }
-}
-
-var (
- // witnessBucketKey is the name of the bucket that we use to store all
- // witnesses encountered. Within this bucket, we'll create a sub-bucket for
- // each witness type.
- witnessBucketKey = []byte("byte")
-)
-
-// WitnessCache is a persistent cache of all witnesses we've encountered on the
-// network. In the case of multi-hop, multi-step contracts, a cache of all
-// witnesses can be useful in the case of partial contract resolution. If
-// negotiations break down, we may be forced to locate the witness for a
-// portion of the contract on-chain. In this case, we'll then add that witness
-// to the cache so the incoming contract can fully resolve witness.
-// Additionally, as one MUST always use a unique witness on the network, we may
-// use this cache to detect duplicate witnesses.
-//
-// TODO(roasbeef): need expiry policy?
-// * encrypt?
-type WitnessCache struct {
- db *DB
-}
-
-// NewWitnessCache returns a new instance of the witness cache.
-func (d *DB) NewWitnessCache() *WitnessCache {
- return &WitnessCache{
- db: d,
- }
-}
-
-// witnessEntry is a key-value struct that holds each key -> witness pair, used
-// when inserting records into the cache.
-type witnessEntry struct {
- key []byte
- witness []byte
-}
-
-// AddSha256Witnesses adds a batch of new sha256 preimages into the witness
-// cache. This is an alias for AddWitnesses that uses Sha256HashWitness as the
-// preimages' witness type.
-func (w *WitnessCache) AddSha256Witnesses(preimages ...lntypes.Preimage) er.R {
- // Optimistically compute the preimages' hashes before attempting to
- // start the db transaction.
- entries := make([]witnessEntry, 0, len(preimages))
- for i := range preimages {
- hash := preimages[i].Hash()
- entries = append(entries, witnessEntry{
- key: hash[:],
- witness: preimages[i][:],
- })
- }
-
- return w.addWitnessEntries(Sha256HashWitness, entries)
-}
-
-// addWitnessEntries inserts the witnessEntry key-value pairs into the cache,
-// using the appropriate witness type to segment the namespace of possible
-// witness types.
-func (w *WitnessCache) addWitnessEntries(wType WitnessType,
- entries []witnessEntry) er.R {
-
- // Exit early if there are no witnesses to add.
- if len(entries) == 0 {
- return nil
- }
-
- return kvdb.Batch(w.db.Backend, func(tx kvdb.RwTx) er.R {
- witnessBucket, err := tx.CreateTopLevelBucket(witnessBucketKey)
- if err != nil {
- return err
- }
-
- witnessTypeBucketKey, errr := wType.toDBKey()
- if errr != nil {
- return errr
- }
- witnessTypeBucket, err := witnessBucket.CreateBucketIfNotExists(
- witnessTypeBucketKey,
- )
- if err != nil {
- return err
- }
-
- for _, entry := range entries {
- err = witnessTypeBucket.Put(entry.key, entry.witness)
- if err != nil {
- return err
- }
- }
-
- return nil
- })
-}
-
-// LookupSha256Witness attempts to lookup the preimage for a sha256 hash. If
-// the witness isn't found, ErrNoWitnesses will be returned.
-func (w *WitnessCache) LookupSha256Witness(hash lntypes.Hash) (lntypes.Preimage, er.R) {
- witness, err := w.lookupWitness(Sha256HashWitness, hash[:])
- if err != nil {
- return lntypes.Preimage{}, err
- }
-
- return lntypes.MakePreimage(witness)
-}
-
-// lookupWitness attempts to lookup a witness according to its type and also
-// its witness key. In the case that the witness isn't found, ErrNoWitnesses
-// will be returned.
-func (w *WitnessCache) lookupWitness(wType WitnessType, witnessKey []byte) ([]byte, er.R) {
- var witness []byte
- err := kvdb.View(w.db, func(tx kvdb.RTx) er.R {
- witnessBucket := tx.ReadBucket(witnessBucketKey)
- if witnessBucket == nil {
- return ErrNoWitnesses.Default()
- }
-
- witnessTypeBucketKey, err := wType.toDBKey()
- if err != nil {
- return err
- }
- witnessTypeBucket := witnessBucket.NestedReadBucket(witnessTypeBucketKey)
- if witnessTypeBucket == nil {
- return ErrNoWitnesses.Default()
- }
-
- dbWitness := witnessTypeBucket.Get(witnessKey)
- if dbWitness == nil {
- return ErrNoWitnesses.Default()
- }
-
- witness = make([]byte, len(dbWitness))
- copy(witness[:], dbWitness)
-
- return nil
- }, func() {
- witness = nil
- })
- if err != nil {
- return nil, err
- }
-
- return witness, nil
-}
-
-// DeleteSha256Witness attempts to delete a sha256 preimage identified by hash.
-func (w *WitnessCache) DeleteSha256Witness(hash lntypes.Hash) er.R {
- return w.deleteWitness(Sha256HashWitness, hash[:])
-}
-
-// deleteWitness attempts to delete a particular witness from the database.
-func (w *WitnessCache) deleteWitness(wType WitnessType, witnessKey []byte) er.R {
- return kvdb.Batch(w.db.Backend, func(tx kvdb.RwTx) er.R {
- witnessBucket, err := tx.CreateTopLevelBucket(witnessBucketKey)
- if err != nil {
- return err
- }
-
- witnessTypeBucketKey, errr := wType.toDBKey()
- if errr != nil {
- return errr
- }
- witnessTypeBucket, err := witnessBucket.CreateBucketIfNotExists(
- witnessTypeBucketKey,
- )
- if err != nil {
- return err
- }
-
- return witnessTypeBucket.Delete(witnessKey)
- })
-}
-
-// DeleteWitnessClass attempts to delete an *entire* class of witnesses. After
-// this function return with a non-nil error,
-func (w *WitnessCache) DeleteWitnessClass(wType WitnessType) er.R {
- return kvdb.Batch(w.db.Backend, func(tx kvdb.RwTx) er.R {
- witnessBucket, err := tx.CreateTopLevelBucket(witnessBucketKey)
- if err != nil {
- return err
- }
-
- witnessTypeBucketKey, errr := wType.toDBKey()
- if errr != nil {
- return errr
- }
-
- return witnessBucket.DeleteNestedBucket(witnessTypeBucketKey)
- })
-}
diff --git a/lnd/channeldb/witness_cache_test.go b/lnd/channeldb/witness_cache_test.go
deleted file mode 100644
index 7826fadb..00000000
--- a/lnd/channeldb/witness_cache_test.go
+++ /dev/null
@@ -1,239 +0,0 @@
-package channeldb
-
-import (
- "crypto/sha256"
- "testing"
-
- "github.com/pkt-cash/pktd/btcutil/er"
- "github.com/pkt-cash/pktd/lnd/lntypes"
-)
-
-// TestWitnessCacheSha256Retrieval tests that we're able to add and lookup new
-// sha256 preimages to the witness cache.
-func TestWitnessCacheSha256Retrieval(t *testing.T) {
- t.Parallel()
-
- cdb, cleanUp, err := MakeTestDB()
- if err != nil {
- t.Fatalf("unable to make test database: %v", err)
- }
- defer cleanUp()
-
- wCache := cdb.NewWitnessCache()
-
- // We'll be attempting to add then lookup two simple sha256 preimages
- // within this test.
- preimage1 := lntypes.Preimage(rev)
- preimage2 := lntypes.Preimage(key)
-
- preimages := []lntypes.Preimage{preimage1, preimage2}
- hashes := []lntypes.Hash{preimage1.Hash(), preimage2.Hash()}
-
- // First, we'll attempt to add the preimages to the database.
- err = wCache.AddSha256Witnesses(preimages...)
- if err != nil {
- t.Fatalf("unable to add witness: %v", err)
- }
-
- // With the preimages stored, we'll now attempt to look them up.
- for i, hash := range hashes {
- preimage := preimages[i]
-
- // We should get back the *exact* same preimage as we originally
- // stored.
- dbPreimage, err := wCache.LookupSha256Witness(hash)
- if err != nil {
- t.Fatalf("unable to look up witness: %v", err)
- }
-
- if preimage != dbPreimage {
- t.Fatalf("witnesses don't match: expected %x, got %x",
- preimage[:], dbPreimage[:])
- }
- }
-}
-
-// TestWitnessCacheSha256Deletion tests that we're able to delete a single
-// sha256 preimage, and also a class of witnesses from the cache.
-func TestWitnessCacheSha256Deletion(t *testing.T) {
- t.Parallel()
-
- cdb, cleanUp, err := MakeTestDB()
- if err != nil {
- t.Fatalf("unable to make test database: %v", err)
- }
- defer cleanUp()
-
- wCache := cdb.NewWitnessCache()
-
- // We'll start by adding two preimages to the cache.
- preimage1 := lntypes.Preimage(key)
- hash1 := preimage1.Hash()
-
- preimage2 := lntypes.Preimage(rev)
- hash2 := preimage2.Hash()
-
- if err := wCache.AddSha256Witnesses(preimage1); err != nil {
- t.Fatalf("unable to add witness: %v", err)
- }
-
- if err := wCache.AddSha256Witnesses(preimage2); err != nil {
- t.Fatalf("unable to add witness: %v", err)
- }
-
- // We'll now delete the first preimage. If we attempt to look it up, we
- // should get ErrNoWitnesses.
- err = wCache.DeleteSha256Witness(hash1)
- if err != nil {
- t.Fatalf("unable to delete witness: %v", err)
- }
- _, err = wCache.LookupSha256Witness(hash1)
- if !ErrNoWitnesses.Is(err) {
- t.Fatalf("expected ErrNoWitnesses instead got: %v", err)
- }
-
- // Next, we'll attempt to delete the entire witness class itself. When
- // we try to lookup the second preimage, we should again get
- // ErrNoWitnesses.
- if err := wCache.DeleteWitnessClass(Sha256HashWitness); err != nil {
- t.Fatalf("unable to delete witness class: %v", err)
- }
- _, err = wCache.LookupSha256Witness(hash2)
- if !ErrNoWitnesses.Is(err) {
- t.Fatalf("expected ErrNoWitnesses instead got: %v", err)
- }
-}
-
-// TestWitnessCacheUnknownWitness tests that we get an error if we attempt to
-// query/add/delete an unknown witness.
-func TestWitnessCacheUnknownWitness(t *testing.T) {
- t.Parallel()
-
- cdb, cleanUp, err := MakeTestDB()
- if err != nil {
- t.Fatalf("unable to make test database: %v", err)
- }
- defer cleanUp()
-
- wCache := cdb.NewWitnessCache()
-
- // We'll attempt to add a new, undefined witness type to the database.
- // We should get an error.
- err = wCache.legacyAddWitnesses(234, key[:])
- if !ErrUnknownWitnessType.Is(err) {
- t.Fatalf("expected ErrUnknownWitnessType, got %v", err)
- }
-}
-
-// TestAddSha256Witnesses tests that insertion using AddSha256Witnesses behaves
-// identically to the insertion via the generalized interface.
-func TestAddSha256Witnesses(t *testing.T) {
- cdb, cleanUp, err := MakeTestDB()
- if err != nil {
- t.Fatalf("unable to make test database: %v", err)
- }
- defer cleanUp()
-
- wCache := cdb.NewWitnessCache()
-
- // We'll start by adding a witnesses to the cache using the generic
- // AddWitnesses method.
- witness1 := rev[:]
- preimage1 := lntypes.Preimage(rev)
- hash1 := preimage1.Hash()
-
- witness2 := key[:]
- preimage2 := lntypes.Preimage(key)
- hash2 := preimage2.Hash()
-
- var (
- witnesses = [][]byte{witness1, witness2}
- preimages = []lntypes.Preimage{preimage1, preimage2}
- hashes = []lntypes.Hash{hash1, hash2}
- )
-
- err = wCache.legacyAddWitnesses(Sha256HashWitness, witnesses...)
- if err != nil {
- t.Fatalf("unable to add witness: %v", err)
- }
-
- for i, hash := range hashes {
- preimage := preimages[i]
-
- dbPreimage, err := wCache.LookupSha256Witness(hash)
- if err != nil {
- t.Fatalf("unable to lookup witness: %v", err)
- }
-
- // Assert that the retrieved witness matches the original.
- if dbPreimage != preimage {
- t.Fatalf("retrieved witness mismatch, want: %x, "+
- "got: %x", preimage, dbPreimage)
- }
-
- // We'll now delete the witness, as we'll be reinserting it
- // using the specialized AddSha256Witnesses method.
- err = wCache.DeleteSha256Witness(hash)
- if err != nil {
- t.Fatalf("unable to delete witness: %v", err)
- }
- }
-
- // Now, add the same witnesses using the type-safe interface for
- // lntypes.Preimages..
- err = wCache.AddSha256Witnesses(preimages...)
- if err != nil {
- t.Fatalf("unable to add sha256 preimage: %v", err)
- }
-
- // Finally, iterate over the keys and assert that the returned witnesses
- // match the original witnesses. This asserts that the specialized
- // insertion method behaves identically to the generalized interface.
- for i, hash := range hashes {
- preimage := preimages[i]
-
- dbPreimage, err := wCache.LookupSha256Witness(hash)
- if err != nil {
- t.Fatalf("unable to lookup witness: %v", err)
- }
-
- // Assert that the retrieved witness matches the original.
- if dbPreimage != preimage {
- t.Fatalf("retrieved witness mismatch, want: %x, "+
- "got: %x", preimage, dbPreimage)
- }
- }
-}
-
-// legacyAddWitnesses adds a batch of new witnesses of wType to the witness
-// cache. The type of the witness will be used to map each witness to the key
-// that will be used to look it up. All witnesses should be of the same
-// WitnessType.
-//
-// NOTE: Previously this method exposed a generic interface for adding
-// witnesses, which has since been deprecated in favor of a strongly typed
-// interface for each witness class. We keep this method around to assert the
-// correctness of specialized witness adding methods.
-func (w *WitnessCache) legacyAddWitnesses(wType WitnessType,
- witnesses ...[]byte) er.R {
-
- // Optimistically compute the witness keys before attempting to start
- // the db transaction.
- entries := make([]witnessEntry, 0, len(witnesses))
- for _, witness := range witnesses {
- // Map each witness to its key by applying the appropriate
- // transformation for the given witness type.
- switch wType {
- case Sha256HashWitness:
- key := sha256.Sum256(witness)
- entries = append(entries, witnessEntry{
- key: key[:],
- witness: witness,
- })
- default:
- return ErrUnknownWitnessType.Default()
- }
- }
-
- return w.addWitnessEntries(wType, entries)
-}
diff --git a/lnd/channelnotifier/channelnotifier.go b/lnd/channelnotifier/channelnotifier.go
deleted file mode 100644
index dd19601a..00000000
--- a/lnd/channelnotifier/channelnotifier.go
+++ /dev/null
@@ -1,183 +0,0 @@
-package channelnotifier
-
-import (
- "sync"
-
- "github.com/pkt-cash/pktd/btcutil/er"
- "github.com/pkt-cash/pktd/lnd/channeldb"
- "github.com/pkt-cash/pktd/lnd/subscribe"
- "github.com/pkt-cash/pktd/pktlog/log"
- "github.com/pkt-cash/pktd/wire"
-)
-
-// ChannelNotifier is a subsystem which all active, inactive, and closed channel
-// events pipe through. It takes subscriptions for its events, and whenever
-// it receives a new event it notifies its subscribers over the proper channel.
-type ChannelNotifier struct {
- started sync.Once
- stopped sync.Once
-
- ntfnServer *subscribe.Server
-
- chanDB *channeldb.DB
-}
-
-// PendingOpenChannelEvent represents a new event where a new channel has
-// entered a pending open state.
-type PendingOpenChannelEvent struct {
- // ChannelPoint is the channel outpoint for the new channel.
- ChannelPoint *wire.OutPoint
-
- // PendingChannel is the channel configuration for the newly created
- // channel. This might not have been persisted to the channel DB yet
- // because we are still waiting for the final message from the remote
- // peer.
- PendingChannel *channeldb.OpenChannel
-}
-
-// OpenChannelEvent represents a new event where a channel goes from pending
-// open to open.
-type OpenChannelEvent struct {
- // Channel is the channel that has become open.
- Channel *channeldb.OpenChannel
-}
-
-// ActiveLinkEvent represents a new event where the link becomes active in the
-// switch. This happens before the ActiveChannelEvent.
-type ActiveLinkEvent struct {
- // ChannelPoint is the channel point for the newly active channel.
- ChannelPoint *wire.OutPoint
-}
-
-// ActiveChannelEvent represents a new event where a channel becomes active.
-type ActiveChannelEvent struct {
- // ChannelPoint is the channelpoint for the newly active channel.
- ChannelPoint *wire.OutPoint
-}
-
-// InactiveChannelEvent represents a new event where a channel becomes inactive.
-type InactiveChannelEvent struct {
- // ChannelPoint is the channelpoint for the newly inactive channel.
- ChannelPoint *wire.OutPoint
-}
-
-// ClosedChannelEvent represents a new event where a channel becomes closed.
-type ClosedChannelEvent struct {
- // CloseSummary is the summary of the channel close that has occurred.
- CloseSummary *channeldb.ChannelCloseSummary
-}
-
-// New creates a new channel notifier. The ChannelNotifier gets channel
-// events from peers and from the chain arbitrator, and dispatches them to
-// its clients.
-func New(chanDB *channeldb.DB) *ChannelNotifier {
- return &ChannelNotifier{
- ntfnServer: subscribe.NewServer(),
- chanDB: chanDB,
- }
-}
-
-// Start starts the ChannelNotifier and all goroutines it needs to carry out its task.
-func (c *ChannelNotifier) Start() er.R {
- var err er.R
- c.started.Do(func() {
- log.Trace("ChannelNotifier starting")
- err = c.ntfnServer.Start()
- })
- return err
-}
-
-// Stop signals the notifier for a graceful shutdown.
-func (c *ChannelNotifier) Stop() {
- c.stopped.Do(func() {
- c.ntfnServer.Stop()
- })
-}
-
-// SubscribeChannelEvents returns a subscribe.Client that will receive updates
-// any time the Server is made aware of a new event. The subscription provides
-// channel events from the point of subscription onwards.
-//
-// TODO(carlaKC): update to allow subscriptions to specify a block height from
-// which we would like to subscribe to events.
-func (c *ChannelNotifier) SubscribeChannelEvents() (*subscribe.Client, er.R) {
- return c.ntfnServer.Subscribe()
-}
-
-// NotifyPendingOpenChannelEvent notifies the channelEventNotifier goroutine
-// that a new channel is pending. The pending channel is passed as a parameter
-// instead of read from the database because it might not yet have been
-// persisted to the DB because we still wait for the final message from the
-// remote peer.
-func (c *ChannelNotifier) NotifyPendingOpenChannelEvent(chanPoint wire.OutPoint,
- pendingChan *channeldb.OpenChannel) {
-
- event := PendingOpenChannelEvent{
- ChannelPoint: &chanPoint,
- PendingChannel: pendingChan,
- }
-
- if err := c.ntfnServer.SendUpdate(event); err != nil {
- log.Warnf("Unable to send pending open channel update: %v", err)
- }
-}
-
-// NotifyOpenChannelEvent notifies the channelEventNotifier goroutine that a
-// channel has gone from pending open to open.
-func (c *ChannelNotifier) NotifyOpenChannelEvent(chanPoint wire.OutPoint) {
-
- // Fetch the relevant channel from the database.
- channel, err := c.chanDB.FetchChannel(chanPoint)
- if err != nil {
- log.Warnf("Unable to fetch open channel from the db: %v", err)
- }
-
- // Send the open event to all channel event subscribers.
- event := OpenChannelEvent{Channel: channel}
- if err := c.ntfnServer.SendUpdate(event); err != nil {
- log.Warnf("Unable to send open channel update: %v", err)
- }
-}
-
-// NotifyClosedChannelEvent notifies the channelEventNotifier goroutine that a
-// channel has closed.
-func (c *ChannelNotifier) NotifyClosedChannelEvent(chanPoint wire.OutPoint) {
- // Fetch the relevant closed channel from the database.
- closeSummary, err := c.chanDB.FetchClosedChannel(&chanPoint)
- if err != nil {
- log.Warnf("Unable to fetch closed channel summary from the db: %v", err)
- }
-
- // Send the closed event to all channel event subscribers.
- event := ClosedChannelEvent{CloseSummary: closeSummary}
- if err := c.ntfnServer.SendUpdate(event); err != nil {
- log.Warnf("Unable to send closed channel update: %v", err)
- }
-}
-
-// NotifyActiveLinkEvent notifies the channelEventNotifier goroutine that a
-// link has been added to the switch.
-func (c *ChannelNotifier) NotifyActiveLinkEvent(chanPoint wire.OutPoint) {
- event := ActiveLinkEvent{ChannelPoint: &chanPoint}
- if err := c.ntfnServer.SendUpdate(event); err != nil {
- log.Warnf("Unable to send active link update: %v", err)
- }
-}
-
-// NotifyActiveChannelEvent notifies the channelEventNotifier goroutine that a
-// channel is active.
-func (c *ChannelNotifier) NotifyActiveChannelEvent(chanPoint wire.OutPoint) {
- event := ActiveChannelEvent{ChannelPoint: &chanPoint}
- if err := c.ntfnServer.SendUpdate(event); err != nil {
- log.Warnf("Unable to send active channel update: %v", err)
- }
-}
-
-// NotifyInactiveChannelEvent notifies the channelEventNotifier goroutine that a
-// channel is inactive.
-func (c *ChannelNotifier) NotifyInactiveChannelEvent(chanPoint wire.OutPoint) {
- event := InactiveChannelEvent{ChannelPoint: &chanPoint}
- if err := c.ntfnServer.SendUpdate(event); err != nil {
- log.Warnf("Unable to send inactive channel update: %v", err)
- }
-}
diff --git a/lnd/chanrestore.go b/lnd/chanrestore.go
deleted file mode 100644
index 9d95856f..00000000
--- a/lnd/chanrestore.go
+++ /dev/null
@@ -1,298 +0,0 @@
-package lnd
-
-import (
- "math"
- "net"
-
- "github.com/pkt-cash/pktd/btcec"
- "github.com/pkt-cash/pktd/btcutil/er"
- "github.com/pkt-cash/pktd/chaincfg"
- "github.com/pkt-cash/pktd/chaincfg/chainhash"
- "github.com/pkt-cash/pktd/lnd/chanbackup"
- "github.com/pkt-cash/pktd/lnd/channeldb"
- "github.com/pkt-cash/pktd/lnd/contractcourt"
- "github.com/pkt-cash/pktd/lnd/keychain"
- "github.com/pkt-cash/pktd/lnd/lnwire"
- "github.com/pkt-cash/pktd/lnd/shachain"
- "github.com/pkt-cash/pktd/pktlog/log"
-)
-
-const (
- // mainnetSCBLaunchBlock is the approximate block height of the bitcoin
- // mainnet chain of the date when SCBs first were released in lnd
- // (v0.6.0-beta). The block date is 4/15/2019, 10:54 PM UTC.
- mainnetSCBLaunchBlock = 571800
-
- // testnetSCBLaunchBlock is the approximate block height of the bitcoin
- // testnet3 chain of the date when SCBs first were released in lnd
- // (v0.6.0-beta). The block date is 4/16/2019, 08:04 AM UTC.
- testnetSCBLaunchBlock = 1489300
-)
-
-// chanDBRestorer is an implementation of the chanbackup.ChannelRestorer
-// interface that is able to properly map a Single backup, into a
-// channeldb.ChannelShell which is required to fully restore a channel. We also
-// need the secret key chain in order obtain the prior shachain root so we can
-// verify the DLP protocol as initiated by the remote node.
-type chanDBRestorer struct {
- db *channeldb.DB
-
- secretKeys keychain.SecretKeyRing
-
- chainArb *contractcourt.ChainArbitrator
-}
-
-// openChannelShell maps the static channel back up into an open channel
-// "shell". We say shell as this doesn't include all the information required
-// to continue to use the channel, only the minimal amount of information to
-// insert this shell channel back into the database.
-func (c *chanDBRestorer) openChannelShell(backup chanbackup.Single) (
- *channeldb.ChannelShell, er.R) {
-
- // First, we'll also need to obtain the private key for the shachain
- // root from the encoded public key.
- //
- // TODO(roasbeef): now adds req for hardware signers to impl
- // shachain...
- privKey, err := c.secretKeys.DerivePrivKey(backup.ShaChainRootDesc)
- if err != nil {
- return nil, er.Errorf("unable to derive shachain root key: %v", err)
- }
- revRoot, err := chainhash.NewHash(privKey.Serialize())
- if err != nil {
- return nil, err
- }
- shaChainProducer := shachain.NewRevocationProducer(*revRoot)
-
- // Each of the keys in our local channel config only have their
- // locators populate, so we'll re-derive the raw key now as we'll need
- // it in order to carry out the DLP protocol.
- backup.LocalChanCfg.MultiSigKey, err = c.secretKeys.DeriveKey(
- backup.LocalChanCfg.MultiSigKey.KeyLocator,
- )
- if err != nil {
- return nil, er.Errorf("unable to derive multi sig key: %v", err)
- }
- backup.LocalChanCfg.RevocationBasePoint, err = c.secretKeys.DeriveKey(
- backup.LocalChanCfg.RevocationBasePoint.KeyLocator,
- )
- if err != nil {
- return nil, er.Errorf("unable to derive revocation key: %v", err)
- }
- backup.LocalChanCfg.PaymentBasePoint, err = c.secretKeys.DeriveKey(
- backup.LocalChanCfg.PaymentBasePoint.KeyLocator,
- )
- if err != nil {
- return nil, er.Errorf("unable to derive payment key: %v", err)
- }
- backup.LocalChanCfg.DelayBasePoint, err = c.secretKeys.DeriveKey(
- backup.LocalChanCfg.DelayBasePoint.KeyLocator,
- )
- if err != nil {
- return nil, er.Errorf("unable to derive delay key: %v", err)
- }
- backup.LocalChanCfg.HtlcBasePoint, err = c.secretKeys.DeriveKey(
- backup.LocalChanCfg.HtlcBasePoint.KeyLocator,
- )
- if err != nil {
- return nil, er.Errorf("unable to derive htlc key: %v", err)
- }
-
- var chanType channeldb.ChannelType
- switch backup.Version {
-
- case chanbackup.DefaultSingleVersion:
- chanType = channeldb.SingleFunderBit
-
- case chanbackup.TweaklessCommitVersion:
- chanType = channeldb.SingleFunderTweaklessBit
-
- case chanbackup.AnchorsCommitVersion:
- chanType = channeldb.AnchorOutputsBit
- chanType |= channeldb.SingleFunderTweaklessBit
-
- default:
- return nil, er.Errorf("unknown Single version: %v", err)
- }
-
- log.Infof("SCB Recovery: created channel shell for ChannelPoint(%v), "+
- "chan_type=%v", backup.FundingOutpoint, chanType)
-
- chanShell := channeldb.ChannelShell{
- NodeAddrs: backup.Addresses,
- Chan: &channeldb.OpenChannel{
- ChanType: chanType,
- ChainHash: backup.ChainHash,
- IsInitiator: backup.IsInitiator,
- Capacity: backup.Capacity,
- FundingOutpoint: backup.FundingOutpoint,
- ShortChannelID: backup.ShortChannelID,
- IdentityPub: backup.RemoteNodePub,
- IsPending: false,
- LocalChanCfg: backup.LocalChanCfg,
- RemoteChanCfg: backup.RemoteChanCfg,
- RemoteCurrentRevocation: backup.RemoteNodePub,
- RevocationStore: shachain.NewRevocationStore(),
- RevocationProducer: shaChainProducer,
- },
- }
-
- return &chanShell, nil
-}
-
-// RestoreChansFromSingles attempts to map the set of single channel backups to
-// channel shells that will be stored persistently. Once these shells have been
-// stored on disk, we'll be able to connect to the channel peer an execute the
-// data loss recovery protocol.
-//
-// NOTE: Part of the chanbackup.ChannelRestorer interface.
-func (c *chanDBRestorer) RestoreChansFromSingles(backups ...chanbackup.Single) er.R {
- channelShells := make([]*channeldb.ChannelShell, 0, len(backups))
- firstChanHeight := uint32(math.MaxUint32)
- for _, backup := range backups {
- chanShell, err := c.openChannelShell(backup)
- if err != nil {
- return err
- }
-
- // Find the block height of the earliest channel in this backup.
- chanHeight := chanShell.Chan.ShortChanID().BlockHeight
- if chanHeight != 0 && chanHeight < firstChanHeight {
- firstChanHeight = chanHeight
- }
-
- channelShells = append(channelShells, chanShell)
- }
-
- // In case there were only unconfirmed channels, we will have to scan
- // the chain beginning from the launch date of SCBs.
- if firstChanHeight == math.MaxUint32 {
- chainHash := channelShells[0].Chan.ChainHash
- switch {
- case chainHash.IsEqual(chaincfg.MainNetParams.GenesisHash):
- firstChanHeight = mainnetSCBLaunchBlock
-
- case chainHash.IsEqual(chaincfg.TestNet3Params.GenesisHash):
- firstChanHeight = testnetSCBLaunchBlock
-
- default:
- // Worst case: We have no height hint and start at
- // block 1. Should only happen for SCBs in regtest,
- // simnet and litecoin.
- firstChanHeight = 1
- }
- }
-
- // If there were channels in the backup that were not confirmed at the
- // time of the backup creation, they won't have a block height in the
- // ShortChanID which would lead to an error in the chain watcher.
- // We want to at least set the funding broadcast height that the chain
- // watcher can use instead. We have two possible fallback values for
- // the broadcast height that we are going to try here.
- for _, chanShell := range channelShells {
- channel := chanShell.Chan
-
- switch {
- // Fallback case 1: It is extremely unlikely at this point that
- // a channel we are trying to restore has a coinbase funding TX.
- // Therefore we can be quite certain that if the TxIndex is
- // zero, it was an unconfirmed channel where we used the
- // BlockHeight to encode the funding TX broadcast height. To not
- // end up with an invalid short channel ID that looks valid, we
- // restore the "original" unconfirmed one here.
- case channel.ShortChannelID.TxIndex == 0:
- broadcastHeight := channel.ShortChannelID.BlockHeight
- channel.FundingBroadcastHeight = broadcastHeight
- channel.ShortChannelID.BlockHeight = 0
-
- // Fallback case 2: This is an unconfirmed channel from an old
- // backup file where we didn't have any workaround in place.
- // Best we can do here is set the funding broadcast height to a
- // reasonable value that we determined earlier.
- case channel.ShortChanID().BlockHeight == 0:
- channel.FundingBroadcastHeight = firstChanHeight
- }
- }
-
- log.Infof("Inserting %v SCB channel shells into DB",
- len(channelShells))
-
- // Now that we have all the backups mapped into a series of Singles,
- // we'll insert them all into the database.
- if err := c.db.RestoreChannelShells(channelShells...); err != nil {
- return err
- }
-
- log.Infof("Informing chain watchers of new restored channels")
-
- // Finally, we'll need to inform the chain arbitrator of these new
- // channels so we'll properly watch for their ultimate closure on chain
- // and sweep them via the DLP.
- for _, restoredChannel := range channelShells {
- err := c.chainArb.WatchNewChannel(restoredChannel.Chan)
- if err != nil {
- return err
- }
- }
-
- return nil
-}
-
-// A compile-time constraint to ensure chanDBRestorer implements
-// chanbackup.ChannelRestorer.
-var _ chanbackup.ChannelRestorer = (*chanDBRestorer)(nil)
-
-// ConnectPeer attempts to connect to the target node at the set of available
-// addresses. Once this method returns with a non-nil error, the connector
-// should attempt to persistently connect to the target peer in the background
-// as a persistent attempt.
-//
-// NOTE: Part of the chanbackup.PeerConnector interface.
-func (s *server) ConnectPeer(nodePub *btcec.PublicKey, addrs []net.Addr) er.R {
- // Before we connect to the remote peer, we'll remove any connections
- // to ensure the new connection is created after this new link/channel
- // is known.
- if err := s.DisconnectPeer(nodePub); err != nil {
- log.Infof("Peer(%v) is already connected, proceeding "+
- "with chan restore", nodePub.SerializeCompressed())
- }
-
- // For each of the known addresses, we'll attempt to launch a
- // persistent connection to the (pub, addr) pair. In the event that any
- // of them connect, all the other stale requests will be canceled.
- for _, addr := range addrs {
- netAddr := &lnwire.NetAddress{
- IdentityKey: nodePub,
- Address: addr,
- }
-
- log.Infof("Attempting to connect to %v for SCB restore "+
- "DLP", netAddr)
-
- // Attempt to connect to the peer using this full address. If
- // we're unable to connect to them, then we'll try the next
- // address in place of it.
- err := s.ConnectToPeer(netAddr, true, s.cfg.ConnectionTimeout)
-
- // If we're already connected to this peer, then we don't
- // consider this an error, so we'll exit here.
- errr := er.Wrapped(err)
- if _, ok := errr.(*errPeerAlreadyConnected); ok {
- return nil
-
- } else if err != nil {
- // Otherwise, something else happened, so we'll try the
- // next address.
- log.Errorf("unable to connect to %v to "+
- "complete SCB restore: %v", netAddr, err)
- continue
- }
-
- // If we connected no problem, then we can exit early as our
- // job here is done.
- return nil
- }
-
- return er.Errorf("unable to connect to peer %x for SCB restore",
- nodePub.SerializeCompressed())
-}
diff --git a/lnd/clock/default_clock.go b/lnd/clock/default_clock.go
deleted file mode 100644
index 3a4f8df3..00000000
--- a/lnd/clock/default_clock.go
+++ /dev/null
@@ -1,24 +0,0 @@
-package clock
-
-import (
- "time"
-)
-
-// DefaultClock implements Clock interface by simply calling the appropriate
-// time functions.
-type DefaultClock struct{}
-
-// NewDefaultClock constructs a new DefaultClock.
-func NewDefaultClock() Clock {
- return &DefaultClock{}
-}
-
-// Now simply returns time.Now().
-func (DefaultClock) Now() time.Time {
- return time.Now()
-}
-
-// TickAfter simply wraps time.After().
-func (DefaultClock) TickAfter(duration time.Duration) <-chan time.Time {
- return time.After(duration)
-}
diff --git a/lnd/clock/interface.go b/lnd/clock/interface.go
deleted file mode 100644
index 0450410e..00000000
--- a/lnd/clock/interface.go
+++ /dev/null
@@ -1,16 +0,0 @@
-package clock
-
-import (
- "time"
-)
-
-// Clock is an interface that provides a time functions for LND packages.
-// This is useful during testing when a concrete time reference is needed.
-type Clock interface {
- // Now returns the current local time (as defined by the Clock).
- Now() time.Time
-
- // TickAfter returns a channel that will receive a tick after the specified
- // duration has passed.
- TickAfter(duration time.Duration) <-chan time.Time
-}
diff --git a/lnd/clock/test_clock.go b/lnd/clock/test_clock.go
deleted file mode 100644
index 85e33d4f..00000000
--- a/lnd/clock/test_clock.go
+++ /dev/null
@@ -1,96 +0,0 @@
-package clock
-
-import (
- "sync"
- "time"
-)
-
-// TestClock can be used in tests to mock time.
-type TestClock struct {
- currentTime time.Time
- timeChanMap map[time.Time][]chan time.Time
- timeLock sync.Mutex
- tickSignal chan time.Duration
-}
-
-// NewTestClock returns a new test clock.
-func NewTestClock(startTime time.Time) *TestClock {
- return &TestClock{
- currentTime: startTime,
- timeChanMap: make(map[time.Time][]chan time.Time),
- }
-}
-
-// NewTestClockWithTickSignal will create a new test clock with an added
-// channel which will be used to signal when a new ticker is registered.
-// This is useful when creating a ticker on a separate goroutine and we'd
-// like to wait for that to happen before advancing the test case.
-func NewTestClockWithTickSignal(startTime time.Time,
- tickSignal chan time.Duration) *TestClock {
-
- testClock := NewTestClock(startTime)
- testClock.tickSignal = tickSignal
-
- return testClock
-}
-
-// Now returns the current (test) time.
-func (c *TestClock) Now() time.Time {
- c.timeLock.Lock()
- defer c.timeLock.Unlock()
-
- return c.currentTime
-}
-
-// TickAfter returns a channel that will receive a tick after the specified
-// duration has passed passed by the user set test time.
-func (c *TestClock) TickAfter(duration time.Duration) <-chan time.Time {
- c.timeLock.Lock()
- defer func() {
- c.timeLock.Unlock()
-
- // Signal that the ticker has been added.
- if c.tickSignal != nil {
- c.tickSignal <- duration
- }
- }()
-
- triggerTime := c.currentTime.Add(duration)
- ch := make(chan time.Time, 1)
-
- // If already expired, tick immediately.
- if !triggerTime.After(c.currentTime) {
- ch <- c.currentTime
- return ch
- }
-
- // Otherwise store the channel until the trigger time is there.
- chans := c.timeChanMap[triggerTime]
- chans = append(chans, ch)
- c.timeChanMap[triggerTime] = chans
-
- return ch
-}
-
-// SetTime sets the (test) time and triggers tick channels when they expire.
-func (c *TestClock) SetTime(now time.Time) {
- c.timeLock.Lock()
- defer c.timeLock.Unlock()
-
- c.currentTime = now
- remainingChans := make(map[time.Time][]chan time.Time)
- for triggerTime, chans := range c.timeChanMap {
- // If the trigger time is still in the future, keep this channel
- // in the channel map for later.
- if triggerTime.After(now) {
- remainingChans[triggerTime] = chans
- continue
- }
-
- for _, c := range chans {
- c <- now
- }
- }
-
- c.timeChanMap = remainingChans
-}
diff --git a/lnd/clock/test_clock_test.go b/lnd/clock/test_clock_test.go
deleted file mode 100644
index 36ad3aea..00000000
--- a/lnd/clock/test_clock_test.go
+++ /dev/null
@@ -1,91 +0,0 @@
-package clock
-
-import (
- "testing"
- "time"
-
- "github.com/pkt-cash/pktd/btcutil/er"
- "github.com/stretchr/testify/assert"
-)
-
-var (
- testTime = time.Date(2009, time.January, 3, 12, 0, 0, 0, time.UTC)
-)
-
-func TestNow(t *testing.T) {
- c := NewTestClock(testTime)
- now := c.Now()
- assert.Equal(t, testTime, now)
-
- now = now.Add(time.Hour)
- c.SetTime(now)
- assert.Equal(t, now, c.Now())
-}
-
-func TestTickAfter(t *testing.T) {
- c := NewTestClock(testTime)
-
- // Should be ticking immediately.
- ticker0 := c.TickAfter(0)
-
- // Both should be ticking after SetTime
- ticker1 := c.TickAfter(time.Hour)
- ticker2 := c.TickAfter(time.Hour)
-
- // We don't expect this one to tick.
- ticker3 := c.TickAfter(2 * time.Hour)
-
- tickOrTimeOut := func(ticker <-chan time.Time, expectTick bool) {
- tick := false
- select {
- case <-ticker:
- tick = true
-
- case <-time.After(time.Millisecond):
- }
-
- assert.Equal(t, expectTick, tick)
- }
-
- tickOrTimeOut(ticker0, true)
- tickOrTimeOut(ticker1, false)
- tickOrTimeOut(ticker2, false)
- tickOrTimeOut(ticker3, false)
-
- c.SetTime(c.Now().Add(time.Hour))
-
- tickOrTimeOut(ticker1, true)
- tickOrTimeOut(ticker2, true)
- tickOrTimeOut(ticker3, false)
-}
-
-// TestTickSignal tests that TickAfter signals registration allowing
-// safe time advancement.
-func TestTickSignal(t *testing.T) {
- const interval = time.Second
-
- ch := make(chan time.Duration)
- c := NewTestClockWithTickSignal(testTime, ch)
- err := make(chan er.R, 1)
-
- go func() {
- select {
- // TickAfter will signal registration but will not
- // tick, unless we read the signal and set the time.
- case <-c.TickAfter(interval):
- err <- nil
-
- // Signal timeout if tick didn't happen.
- case <-time.After(time.Second):
- err <- er.Errorf("timeout")
- }
- }()
-
- tick := <-ch
- // Expect that the interval is correctly passed over the channel.
- assert.Equal(t, interval, tick)
-
- // Once the ticker is registered, set the time to make it fire.
- c.SetTime(testTime.Add(time.Second))
- assert.NoError(t, er.Native(<-err))
-}
diff --git a/lnd/cmd/lncli/arg_parse.go b/lnd/cmd/lncli/arg_parse.go
deleted file mode 100644
index 17ca459c..00000000
--- a/lnd/cmd/lncli/arg_parse.go
+++ /dev/null
@@ -1,43 +0,0 @@
-package main
-
-import (
- "regexp"
- "strconv"
- "time"
-
- "github.com/pkt-cash/pktd/btcutil/er"
-)
-
-// reTimeRange matches systemd.time-like short negative timeranges, e.g. "-200s".
-var reTimeRange = regexp.MustCompile(`^-\d{1,18}[s|m|h|d|w|M|y]$`)
-
-// secondsPer allows translating s(seconds), m(minutes), h(ours), d(ays),
-// w(eeks), M(onths) and y(ears) into corresponding seconds.
-var secondsPer = map[string]int64{
- "s": 1,
- "m": 60,
- "h": 3600,
- "d": 86400,
- "w": 604800,
- "M": 2630016, // 30.44 days
- "y": 31557600, // 365.25 days
-}
-
-// parseTime parses UNIX timestamps or short timeranges inspired by sytemd (when starting with "-"),
-// e.g. "-1M" for one month (30.44 days) ago.
-func parseTime(s string, base time.Time) (uint64, er.R) {
- if reTimeRange.MatchString(s) {
- last := len(s) - 1
-
- d, errr := strconv.ParseInt(s[1:last], 10, 64)
- if errr != nil {
- return uint64(0), er.E(errr)
- }
-
- mul := secondsPer[string(s[last])]
- return uint64(base.Unix() - d*mul), nil
- }
-
- i, e := strconv.ParseUint(s, 10, 64)
- return i, er.E(e)
-}
diff --git a/lnd/cmd/lncli/arg_parse_test.go b/lnd/cmd/lncli/arg_parse_test.go
deleted file mode 100644
index 9de6f895..00000000
--- a/lnd/cmd/lncli/arg_parse_test.go
+++ /dev/null
@@ -1,88 +0,0 @@
-package main
-
-import (
- "testing"
- "time"
-)
-
-var now = time.Date(2017, 11, 10, 7, 8, 9, 1234, time.UTC)
-
-var partTimeTests = []struct {
- in string
- expected uint64
- errExpected bool
-}{
- {
- "12345",
- uint64(12345),
- false,
- },
- {
- "-0s",
- uint64(now.Unix()),
- false,
- },
- {
- "-1s",
- uint64(time.Date(2017, 11, 10, 7, 8, 8, 1234, time.UTC).Unix()),
- false,
- },
- {
- "-2h",
- uint64(time.Date(2017, 11, 10, 5, 8, 9, 1234, time.UTC).Unix()),
- false,
- },
- {
- "-3d",
- uint64(time.Date(2017, 11, 7, 7, 8, 9, 1234, time.UTC).Unix()),
- false,
- },
- {
- "-4w",
- uint64(time.Date(2017, 10, 13, 7, 8, 9, 1234, time.UTC).Unix()),
- false,
- },
- {
- "-5M",
- uint64(now.Unix() - 30.44*5*24*60*60),
- false,
- },
- {
- "-6y",
- uint64(now.Unix() - 365.25*6*24*60*60),
- false,
- },
- {
- "-999999999999999999s",
- uint64(now.Unix() - 999999999999999999),
- false,
- },
- {
- "-9999999999999999991s",
- 0,
- true,
- },
- {
- "-7z",
- 0,
- true,
- },
-}
-
-// Test that parsing absolute and relative times works.
-func TestParseTime(t *testing.T) {
- for _, test := range partTimeTests {
- actual, err := parseTime(test.in, now)
- if test.errExpected == (err == nil) {
- t.Fatalf("unexpected error for %s:\n%v\n", test.in, err)
- }
- if actual != test.expected {
- t.Fatalf(
- "for %s actual and expected do not match:\n%d\n%d\n",
- test.in,
- actual,
- test.expected,
- )
- }
- }
-}
diff --git a/lnd/cmd/lncli/autopilotrpc_active.go b/lnd/cmd/lncli/autopilotrpc_active.go
deleted file mode 100644
index 498ae7ea..00000000
--- a/lnd/cmd/lncli/autopilotrpc_active.go
+++ /dev/null
@@ -1,163 +0,0 @@
-// +build autopilotrpc
-
-package main
-
-import (
- "context"
-
- "github.com/pkt-cash/pktd/lnd/lnrpc/autopilotrpc"
- "github.com/urfave/cli"
-)
-
-func getAutopilotClient(ctx *cli.Context) (autopilotrpc.AutopilotClient, func()) {
- conn := getClientConn(ctx, false)
-
- cleanUp := func() {
- conn.Close()
- }
-
- return autopilotrpc.NewAutopilotClient(conn), cleanUp
-}
-
-var getStatusCommand = cli.Command{
- Name: "status",
- Usage: "Get the active status of autopilot.",
- Description: "",
- Action: actionDecorator(getStatus),
-}
-
-func getStatus(ctx *cli.Context) er.R {
- ctxb := context.Background()
- client, cleanUp := getAutopilotClient(ctx)
- defer cleanUp()
-
- req := &autopilotrpc.StatusRequest{}
-
- resp, err := client.Status(ctxb, req)
- if err != nil {
- return err
- }
-
- printRespJSON(resp)
- return nil
-}
-
-var enableCommand = cli.Command{
- Name: "enable",
- Usage: "Enable the autopilot.",
- Description: "",
- Action: actionDecorator(enable),
-}
-
-var disableCommand = cli.Command{
- Name: "disable",
- Usage: "Disable the active autopilot.",
- Description: "",
- Action: actionDecorator(disable),
-}
-
-func enable(ctx *cli.Context) er.R {
- ctxb := context.Background()
- client, cleanUp := getAutopilotClient(ctx)
- defer cleanUp()
-
- // We will enable the autopilot.
- req := &autopilotrpc.ModifyStatusRequest{
- Enable: true,
- }
-
- resp, err := client.ModifyStatus(ctxb, req)
- if err != nil {
- return err
- }
-
- printRespJSON(resp)
- return nil
-}
-
-func disable(ctx *cli.Context) er.R {
- ctxb := context.Background()
- client, cleanUp := getAutopilotClient(ctx)
- defer cleanUp()
-
- // We will disable the autopilot.
- req := &autopilotrpc.ModifyStatusRequest{
- Enable: false,
- }
-
- resp, err := client.ModifyStatus(ctxb, req)
- if err != nil {
- return err
- }
-
- printRespJSON(resp)
- return nil
-}
-
-var queryScoresCommand = cli.Command{
- Name: "query",
- Usage: "Query the autopilot heuristics for nodes' scores.",
- ArgsUsage: "[flags] ...",
- Description: "",
- Action: actionDecorator(queryScores),
- Flags: []cli.Flag{
- cli.BoolFlag{
- Name: "ignorelocalstate, i",
- Usage: "Ignore local channel state when calculating " +
- "scores.",
- },
- },
-}
-
-func queryScores(ctx *cli.Context) er.R {
- ctxb := context.Background()
- client, cleanUp := getAutopilotClient(ctx)
- defer cleanUp()
-
- args := ctx.Args()
- var pubs []string
-
- // Keep reading pubkeys as long as there are arguments.
-loop:
- for {
- switch {
- case args.Present():
- pubs = append(pubs, args.First())
- args = args.Tail()
- default:
- break loop
- }
- }
-
- req := &autopilotrpc.QueryScoresRequest{
- Pubkeys: pubs,
- IgnoreLocalState: ctx.Bool("ignorelocalstate"),
- }
-
- resp, err := client.QueryScores(ctxb, req)
- if err != nil {
- return err
- }
-
- printRespJSON(resp)
- return nil
-}
-
-// autopilotCommands will return the set of commands to enable for autopilotrpc
-// builds.
-func autopilotCommands() []cli.Command {
- return []cli.Command{
- {
- Name: "autopilot",
- Category: "Autopilot",
- Usage: "Interact with a running autopilot.",
- Description: "",
- Subcommands: []cli.Command{
- getStatusCommand,
- enableCommand,
- disableCommand,
- queryScoresCommand,
- },
- },
- }
-}
diff --git a/lnd/cmd/lncli/autopilotrpc_default.go b/lnd/cmd/lncli/autopilotrpc_default.go
deleted file mode 100644
index 49061254..00000000
--- a/lnd/cmd/lncli/autopilotrpc_default.go
+++ /dev/null
@@ -1,10 +0,0 @@
-// +build !autopilotrpc
-
-package main
-
-import "github.com/urfave/cli"
-
-// autopilotCommands will return nil for non-autopilotrpc builds.
-func autopilotCommands() []cli.Command {
- return nil
-}
diff --git a/lnd/cmd/lncli/cmd_build_route.go b/lnd/cmd/lncli/cmd_build_route.go
deleted file mode 100644
index 62c4288a..00000000
--- a/lnd/cmd/lncli/cmd_build_route.go
+++ /dev/null
@@ -1,91 +0,0 @@
-package main
-
-import (
- "context"
- "strings"
-
- "github.com/pkt-cash/pktd/btcutil/er"
- "github.com/pkt-cash/pktd/lnd/chainreg"
- "github.com/pkt-cash/pktd/lnd/lnrpc/routerrpc"
- "github.com/pkt-cash/pktd/lnd/routing/route"
- "github.com/urfave/cli"
-)
-
-var buildRouteCommand = cli.Command{
- Name: "buildroute",
- Category: "Payments",
- Usage: "Build a route from a list of hop pubkeys.",
- Action: actionDecorator(buildRoute),
- Flags: []cli.Flag{
- cli.Int64Flag{
- Name: "amt",
- Usage: "the amount to send expressed in satoshis. If" +
- "not set, the minimum routable amount is used",
- },
- cli.Int64Flag{
- Name: "final_cltv_delta",
- Usage: "number of blocks the last hop has to reveal " +
- "the preimage",
- Value: chainreg.DefaultBitcoinTimeLockDelta,
- },
- cli.StringFlag{
- Name: "hops",
- Usage: "comma separated hex pubkeys",
- },
- cli.Uint64Flag{
- Name: "outgoing_chan_id",
- Usage: "short channel id of the outgoing channel to " +
- "use for the first hop of the payment",
- Value: 0,
- },
- },
-}
-
-func buildRoute(ctx *cli.Context) er.R {
- conn := getClientConn(ctx, false)
- defer conn.Close()
-
- client := routerrpc.NewRouterClient(conn)
-
- if !ctx.IsSet("hops") {
- return er.New("hops required")
- }
-
- // Build list of hop addresses for the rpc.
- hops := strings.Split(ctx.String("hops"), ",")
- rpcHops := make([][]byte, 0, len(hops))
- for _, k := range hops {
- pubkey, err := route.NewVertexFromStr(k)
- if err != nil {
- return er.Errorf("error parsing %v: %v", k, err)
- }
- rpcHops = append(rpcHops, pubkey[:])
- }
-
- var amtMsat int64
- hasAmt := ctx.IsSet("amt")
- if hasAmt {
- amtMsat = ctx.Int64("amt") * 1000
- if amtMsat == 0 {
- return er.Errorf("non-zero amount required")
- }
- }
-
- // Call BuildRoute rpc.
- req := &routerrpc.BuildRouteRequest{
- AmtMsat: amtMsat,
- FinalCltvDelta: int32(ctx.Int64("final_cltv_delta")),
- HopPubkeys: rpcHops,
- OutgoingChanId: ctx.Uint64("outgoing_chan_id"),
- }
-
- rpcCtx := context.Background()
- route, err := client.BuildRoute(rpcCtx, req)
- if err != nil {
- return er.E(err)
- }
-
- printRespJSON(route)
-
- return nil
-}
diff --git a/lnd/cmd/lncli/cmd_invoice.go b/lnd/cmd/lncli/cmd_invoice.go
deleted file mode 100644
index d5f37e41..00000000
--- a/lnd/cmd/lncli/cmd_invoice.go
+++ /dev/null
@@ -1,289 +0,0 @@
-package main
-
-import (
- "context"
- "strconv"
-
- "github.com/pkt-cash/pktd/btcutil/er"
- "github.com/pkt-cash/pktd/btcutil/util"
- "github.com/pkt-cash/pktd/lnd/lnrpc"
- "github.com/urfave/cli"
-)
-
-var addInvoiceCommand = cli.Command{
- Name: "addinvoice",
- Category: "Invoices",
- Usage: "Add a new invoice.",
- Description: `
- Add a new invoice, expressing intent for a future payment.
-
- Invoices without an amount can be created by not supplying any
- parameters or providing an amount of 0. These invoices allow the payee
- to specify the amount of satoshis they wish to send.`,
- ArgsUsage: "value preimage",
- Flags: []cli.Flag{
- cli.StringFlag{
- Name: "memo",
- Usage: "a description of the payment to attach along " +
- "with the invoice (default=\"\")",
- },
- cli.StringFlag{
- Name: "preimage",
- Usage: "the hex-encoded preimage (32 byte) which will " +
- "allow settling an incoming HTLC payable to this " +
- "preimage. If not set, a random preimage will be " +
- "created.",
- },
- cli.Int64Flag{
- Name: "amt",
- Usage: "the amt of satoshis in this invoice",
- },
- cli.StringFlag{
- Name: "description_hash",
- Usage: "SHA-256 hash of the description of the payment. " +
- "Used if the purpose of payment cannot naturally " +
- "fit within the memo. If provided this will be " +
- "used instead of the description(memo) field in " +
- "the encoded invoice.",
- },
- cli.StringFlag{
- Name: "fallback_addr",
- Usage: "fallback on-chain address that can be used in " +
- "case the lightning payment fails",
- },
- cli.Int64Flag{
- Name: "expiry",
- Usage: "the invoice's expiry time in seconds. If not " +
- "specified an expiry of 3600 seconds (1 hour) " +
- "is implied.",
- },
- cli.BoolTFlag{
- Name: "private",
- Usage: "encode routing hints in the invoice with " +
- "private channels in order to assist the " +
- "payer in reaching you",
- },
- },
- Action: actionDecorator(addInvoice),
-}
-
-func addInvoice(ctx *cli.Context) er.R {
- var (
- preimage []byte
- descHash []byte
- amt int64
- err er.R
- errr error
- )
-
- client, cleanUp := getClient(ctx)
- defer cleanUp()
-
- args := ctx.Args()
-
- switch {
- case ctx.IsSet("amt"):
- amt = ctx.Int64("amt")
- case args.Present():
- amt, errr = strconv.ParseInt(args.First(), 10, 64)
- args = args.Tail()
- if errr != nil {
- return er.Errorf("unable to decode amt argument: %v", errr)
- }
- }
-
- switch {
- case ctx.IsSet("preimage"):
- preimage, err = util.DecodeHex(ctx.String("preimage"))
- case args.Present():
- preimage, err = util.DecodeHex(args.First())
- }
-
- if err != nil {
- return er.Errorf("unable to parse preimage: %v", err)
- }
-
- descHash, err = util.DecodeHex(ctx.String("description_hash"))
- if err != nil {
- return er.Errorf("unable to parse description_hash: %v", err)
- }
-
- invoice := &lnrpc.Invoice{
- Memo: ctx.String("memo"),
- RPreimage: preimage,
- Value: amt,
- DescriptionHash: descHash,
- FallbackAddr: ctx.String("fallback_addr"),
- Expiry: ctx.Int64("expiry"),
- Private: ctx.Bool("private"),
- }
-
- resp, errr := client.AddInvoice(context.Background(), invoice)
- if errr != nil {
- return er.E(errr)
- }
-
- printRespJSON(resp)
-
- return nil
-}
-
-var lookupInvoiceCommand = cli.Command{
- Name: "lookupinvoice",
- Category: "Invoices",
- Usage: "Lookup an existing invoice by its payment hash.",
- ArgsUsage: "rhash",
- Flags: []cli.Flag{
- cli.StringFlag{
- Name: "rhash",
- Usage: "the 32 byte payment hash of the invoice to query for, the hash " +
- "should be a hex-encoded string",
- },
- },
- Action: actionDecorator(lookupInvoice),
-}
-
-func lookupInvoice(ctx *cli.Context) er.R {
- client, cleanUp := getClient(ctx)
- defer cleanUp()
-
- var (
- rHash []byte
- err er.R
- )
-
- switch {
- case ctx.IsSet("rhash"):
- rHash, err = util.DecodeHex(ctx.String("rhash"))
- case ctx.Args().Present():
- rHash, err = util.DecodeHex(ctx.Args().First())
- default:
- return er.Errorf("rhash argument missing")
- }
-
- if err != nil {
- return er.Errorf("unable to decode rhash argument: %v", err)
- }
-
- req := &lnrpc.PaymentHash{
- RHash: rHash,
- }
-
- invoice, errr := client.LookupInvoice(context.Background(), req)
- if errr != nil {
- return er.E(errr)
- }
-
- printRespJSON(invoice)
-
- return nil
-}
-
-var listInvoicesCommand = cli.Command{
- Name: "listinvoices",
- Category: "Invoices",
- Usage: "List all invoices currently stored within the database. Any " +
- "active debug invoices are ignored.",
- Description: `
- This command enables the retrieval of all invoices currently stored
- within the database. It has full support for paginationed responses,
- allowing users to query for specific invoices through their add_index.
- This can be done by using either the first_index_offset or
- last_index_offset fields included in the response as the index_offset of
- the next request. Backward pagination is enabled by default to receive
- current invoices first. If you wish to paginate forwards, set the
- paginate-forwards flag. If none of the parameters are specified, then
- the last 100 invoices will be returned.
-
- For example: if you have 200 invoices, "lncli listinvoices" will return
- the last 100 created. If you wish to retrieve the previous 100, the
- first_offset_index of the response can be used as the index_offset of
- the next listinvoices request.`,
- Flags: []cli.Flag{
- cli.BoolFlag{
- Name: "pending_only",
- Usage: "toggles if all invoices should be returned, " +
- "or only those that are currently unsettled",
- },
- cli.Uint64Flag{
- Name: "index_offset",
- Usage: "the index of an invoice that will be used as " +
- "either the start or end of a query to " +
- "determine which invoices should be returned " +
- "in the response",
- },
- cli.Uint64Flag{
- Name: "max_invoices",
- Usage: "the max number of invoices to return",
- },
- cli.BoolFlag{
- Name: "paginate-forwards",
- Usage: "if set, invoices succeeding the " +
- "index_offset will be returned",
- },
- },
- Action: actionDecorator(listInvoices),
-}
-
-func listInvoices(ctx *cli.Context) er.R {
- client, cleanUp := getClient(ctx)
- defer cleanUp()
-
- req := &lnrpc.ListInvoiceRequest{
- PendingOnly: ctx.Bool("pending_only"),
- IndexOffset: ctx.Uint64("index_offset"),
- NumMaxInvoices: ctx.Uint64("max_invoices"),
- Reversed: !ctx.Bool("paginate-forwards"),
- }
-
- invoices, errr := client.ListInvoices(context.Background(), req)
- if errr != nil {
- return er.E(errr)
- }
-
- printRespJSON(invoices)
-
- return nil
-}
-
-var decodePayReqCommand = cli.Command{
- Name: "decodepayreq",
- Category: "Invoices",
- Usage: "Decode a payment request.",
- Description: "Decode the passed payment request revealing the destination, payment hash and value of the payment request",
- ArgsUsage: "pay_req",
- Flags: []cli.Flag{
- cli.StringFlag{
- Name: "pay_req",
- Usage: "the bech32 encoded payment request",
- },
- },
- Action: actionDecorator(decodePayReq),
-}
-
-func decodePayReq(ctx *cli.Context) er.R {
- ctxb := context.Background()
- client, cleanUp := getClient(ctx)
- defer cleanUp()
-
- var payreq string
-
- switch {
- case ctx.IsSet("pay_req"):
- payreq = ctx.String("pay_req")
- case ctx.Args().Present():
- payreq = ctx.Args().First()
- default:
- return er.Errorf("pay_req argument missing")
- }
-
- resp, errr := client.DecodePayReq(ctxb, &lnrpc.PayReqString{
- PayReq: payreq,
- })
- if errr != nil {
- return er.E(errr)
- }
-
- printRespJSON(resp)
- return nil
-}
diff --git a/lnd/cmd/lncli/cmd_macaroon.go b/lnd/cmd/lncli/cmd_macaroon.go
deleted file mode 100644
index adcddaf0..00000000
--- a/lnd/cmd/lncli/cmd_macaroon.go
+++ /dev/null
@@ -1,416 +0,0 @@
-package main
-
-import (
- "bytes"
- "context"
- "encoding/hex"
- "fmt"
- "io/ioutil"
- "net"
- "strconv"
- "strings"
-
- "github.com/golang/protobuf/proto"
- "github.com/pkt-cash/pktd/btcutil/er"
- "github.com/pkt-cash/pktd/btcutil/util"
- "github.com/pkt-cash/pktd/lnd/lncfg"
- "github.com/pkt-cash/pktd/lnd/lnrpc"
- "github.com/pkt-cash/pktd/lnd/macaroons"
- "github.com/urfave/cli"
- "gopkg.in/macaroon-bakery.v2/bakery"
- "gopkg.in/macaroon.v2"
-)
-
-var bakeMacaroonCommand = cli.Command{
- Name: "bakemacaroon",
- Category: "Macaroons",
- Usage: "Bakes a new macaroon with the provided list of permissions " +
- "and restrictions.",
- ArgsUsage: "[--save_to=] [--timeout=] [--ip_address=] permissions...",
- Description: `
- Bake a new macaroon that grants the provided permissions and
- optionally adds restrictions (timeout, IP address) to it.
-
- The new macaroon can either be shown on command line in hex serialized
- format or it can be saved directly to a file using the --save_to
- argument.
-
- A permission is a tuple of an entity and an action, separated by a
- colon. Multiple operations can be added as arguments, for example:
-
- lncli bakemacaroon info:read invoices:write foo:bar
-
- For even more fine-grained permission control, it is also possible to
- specify single RPC method URIs that are allowed to be accessed by a
- macaroon. This can be achieved by specifying "uri:" pairs,
- for example:
-
- lncli bakemacaroon uri:/lnrpc.Lightning/GetInfo uri:/verrpc.Versioner/GetVersion
-
- The macaroon created by this command would only be allowed to use the
- "lncli getinfo" and "lncli version" commands.
-
- To get a list of all available URIs and permissions, use the
- "lncli listpermissions" command.
- `,
- Flags: []cli.Flag{
- cli.StringFlag{
- Name: "save_to",
- Usage: "save the created macaroon to this file " +
- "using the default binary format",
- },
- cli.Uint64Flag{
- Name: "timeout",
- Usage: "the number of seconds the macaroon will be " +
- "valid before it times out",
- },
- cli.StringFlag{
- Name: "ip_address",
- Usage: "the IP address the macaroon will be bound to",
- },
- cli.Uint64Flag{
- Name: "root_key_id",
- Usage: "the numerical root key ID used to create the macaroon",
- },
- },
- Action: actionDecorator(bakeMacaroon),
-}
-
-func bakeMacaroon(ctx *cli.Context) er.R {
- client, cleanUp := getClient(ctx)
- defer cleanUp()
-
- // Show command help if no arguments.
- if ctx.NArg() == 0 {
- return er.E(cli.ShowCommandHelp(ctx, "bakemacaroon"))
- }
- args := ctx.Args()
-
- var (
- savePath string
- timeout int64
- ipAddress net.IP
- rootKeyID uint64
- parsedPermissions []*lnrpc.MacaroonPermission
- err er.R
- )
-
- if ctx.String("save_to") != "" {
- savePath = lncfg.CleanAndExpandPath(ctx.String("save_to"))
- }
-
- if ctx.IsSet("timeout") {
- timeout = ctx.Int64("timeout")
- if timeout <= 0 {
- return er.Errorf("timeout must be greater than 0")
- }
- }
-
- if ctx.IsSet("ip_address") {
- ipAddress = net.ParseIP(ctx.String("ip_address"))
- if ipAddress == nil {
- return er.Errorf("unable to parse ip_address: %s",
- ctx.String("ip_address"))
- }
- }
-
- if ctx.IsSet("root_key_id") {
- rootKeyID = ctx.Uint64("root_key_id")
- }
-
- // A command line argument can't be an empty string. So we'll check each
- // entry if it's a valid entity:action tuple. The content itself is
- // validated server side. We just make sure we can parse it correctly.
- for _, permission := range args {
- tuple := strings.Split(permission, ":")
- if len(tuple) != 2 {
- return er.Errorf("unable to parse "+
- "permission tuple: %s", permission)
- }
- entity, action := tuple[0], tuple[1]
- if entity == "" {
- return er.Errorf("invalid permission [%s]. entity "+
- "cannot be empty", permission)
- }
- if action == "" {
- return er.Errorf("invalid permission [%s]. action "+
- "cannot be empty", permission)
- }
-
- // No we can assume that we have a formally valid entity:action
- // tuple. The rest of the validation happens server side.
- parsedPermissions = append(
- parsedPermissions, &lnrpc.MacaroonPermission{
- Entity: entity,
- Action: action,
- },
- )
- }
-
- // Now we have gathered all the input we need and can do the actual
- // RPC call.
- req := &lnrpc.BakeMacaroonRequest{
- Permissions: parsedPermissions,
- RootKeyId: rootKeyID,
- }
- resp, errr := client.BakeMacaroon(context.Background(), req)
- if errr != nil {
- return er.E(errr)
- }
-
- // Now we should have gotten a valid macaroon. Unmarshal it so we can
- // add first-party caveats (if necessary) to it.
- macBytes, err := util.DecodeHex(resp.Macaroon)
- if err != nil {
- return err
- }
- unmarshalMac := &macaroon.Macaroon{}
- if errr := unmarshalMac.UnmarshalBinary(macBytes); errr != nil {
- return er.E(errr)
- }
-
- // Now apply the desired constraints to the macaroon. This will always
- // create a new macaroon object, even if no constraints are added.
- macConstraints := make([]macaroons.Constraint, 0)
- if timeout > 0 {
- macConstraints = append(
- macConstraints, macaroons.TimeoutConstraint(timeout),
- )
- }
- if ipAddress != nil {
- macConstraints = append(
- macConstraints,
- macaroons.IPLockConstraint(ipAddress.String()),
- )
- }
- constrainedMac, err := macaroons.AddConstraints(
- unmarshalMac, macConstraints...,
- )
- if err != nil {
- return err
- }
- macBytes, errr = constrainedMac.MarshalBinary()
- if errr != nil {
- return er.E(errr)
- }
-
- // Now we can output the result. We either write it binary serialized to
- // a file or write to the standard output using hex encoding.
- switch {
- case savePath != "":
- err := ioutil.WriteFile(savePath, macBytes, 0644)
- if err != nil {
- return er.E(err)
- }
- fmt.Printf("Macaroon saved to %s\n", savePath)
-
- default:
- fmt.Printf("%s\n", hex.EncodeToString(macBytes))
- }
-
- return nil
-}
-
-var listMacaroonIDsCommand = cli.Command{
- Name: "listmacaroonids",
- Category: "Macaroons",
- Usage: "List all macaroons root key IDs in use.",
- Action: actionDecorator(listMacaroonIDs),
-}
-
-func listMacaroonIDs(ctx *cli.Context) er.R {
- client, cleanUp := getClient(ctx)
- defer cleanUp()
-
- req := &lnrpc.ListMacaroonIDsRequest{}
- resp, errr := client.ListMacaroonIDs(context.Background(), req)
- if errr != nil {
- return er.E(errr)
- }
-
- printRespJSON(resp)
- return nil
-}
-
-var deleteMacaroonIDCommand = cli.Command{
- Name: "deletemacaroonid",
- Category: "Macaroons",
- Usage: "Delete a specific macaroon ID.",
- ArgsUsage: "root_key_id",
- Description: `
- Remove a macaroon ID using the specified root key ID. For example:
-
- lncli deletemacaroonid 1
-
- WARNING
- When the ID is deleted, all macaroons created from that root key will
- be invalidated.
-
- Note that the default root key ID 0 cannot be deleted.
- `,
- Action: actionDecorator(deleteMacaroonID),
-}
-
-func deleteMacaroonID(ctx *cli.Context) er.R {
- client, cleanUp := getClient(ctx)
- defer cleanUp()
-
- // Validate args length. Only one argument is allowed.
- if ctx.NArg() != 1 {
- return er.E(cli.ShowCommandHelp(ctx, "deletemacaroonid"))
- }
-
- rootKeyIDString := ctx.Args().First()
-
- // Convert string into uint64.
- rootKeyID, errr := strconv.ParseUint(rootKeyIDString, 10, 64)
- if errr != nil {
- return er.Errorf("root key ID must be a positive integer")
- }
-
- // Check that the value is not equal to DefaultRootKeyID. Note that the
- // server also validates the root key ID when removing it. However, we check
- // it here too so that we can give users a nice warning.
- if bytes.Equal([]byte(rootKeyIDString), macaroons.DefaultRootKeyID) {
- return er.Errorf("deleting the default root key ID 0 is not allowed")
- }
-
- // Make the actual RPC call.
- req := &lnrpc.DeleteMacaroonIDRequest{
- RootKeyId: rootKeyID,
- }
- resp, errr := client.DeleteMacaroonID(context.Background(), req)
- if errr != nil {
- return er.E(errr)
- }
-
- printRespJSON(resp)
- return nil
-}
-
-var listPermissionsCommand = cli.Command{
- Name: "listpermissions",
- Category: "Macaroons",
- Usage: "Lists all RPC method URIs and the macaroon permissions they " +
- "require to be invoked.",
- Action: actionDecorator(listPermissions),
-}
-
-func listPermissions(ctx *cli.Context) er.R {
- client, cleanUp := getClient(ctx)
- defer cleanUp()
-
- request := &lnrpc.ListPermissionsRequest{}
- response, errr := client.ListPermissions(context.Background(), request)
- if errr != nil {
- return er.E(errr)
- }
-
- printRespJSON(response)
-
- return nil
-}
-
-type macaroonContent struct {
- Version uint16 `json:"version"`
- Location string `json:"location"`
- RootKeyID string `json:"root_key_id"`
- Permissions []string `json:"permissions"`
- Caveats []string `json:"caveats"`
-}
-
-var printMacaroonCommand = cli.Command{
- Name: "printmacaroon",
- Category: "Macaroons",
- Usage: "Print the content of a macaroon in a human readable format.",
- ArgsUsage: "[macaroon_content_hex]",
- Description: `
- Decode a macaroon and show its content in a more human readable format.
- The macaroon can either be passed as a hex encoded positional parameter
- or loaded from a file.
- `,
- Flags: []cli.Flag{
- cli.StringFlag{
- Name: "macaroon_file",
- Usage: "load the macaroon from a file instead of the " +
- "command line directly",
- },
- },
- Action: actionDecorator(printMacaroon),
-}
-
-func printMacaroon(ctx *cli.Context) er.R {
- // Show command help if no arguments or flags are set.
- if ctx.NArg() == 0 && ctx.NumFlags() == 0 {
- return er.E(cli.ShowCommandHelp(ctx, "printmacaroon"))
- }
-
- var (
- macBytes []byte
- err er.R
- args = ctx.Args()
- )
- switch {
- case ctx.IsSet("macaroon_file"):
- macPath := lncfg.CleanAndExpandPath(ctx.String("macaroon_file"))
-
- // Load the specified macaroon file.
- var errr error
- macBytes, errr = ioutil.ReadFile(macPath)
- if errr != nil {
- return er.Errorf("unable to read macaroon path %v: %v",
- macPath, errr)
- }
-
- case args.Present():
- macBytes, err = util.DecodeHex(args.First())
- if err != nil {
- return er.Errorf("unable to hex decode macaroon: %v",
- err)
- }
-
- default:
- return er.Errorf("macaroon parameter missing")
- }
-
- // Decode the macaroon and its protobuf encoded internal identifier.
- mac := &macaroon.Macaroon{}
- if err := mac.UnmarshalBinary(macBytes); err != nil {
- return er.Errorf("unable to decode macaroon: %v", err)
- }
- rawID := mac.Id()
- if rawID[0] != byte(bakery.LatestVersion) {
- return er.Errorf("invalid macaroon version: %x", rawID)
- }
- decodedID := &lnrpc.MacaroonId{}
- idProto := rawID[1:]
- errr := proto.Unmarshal(idProto, decodedID)
- if errr != nil {
- return er.Errorf("unable to decode macaroon version: %v", errr)
- }
-
- // Prepare everything to be printed in a more human readable format.
- content := &macaroonContent{
- Version: uint16(mac.Version()),
- Location: mac.Location(),
- RootKeyID: string(decodedID.StorageId),
- Permissions: nil,
- Caveats: nil,
- }
-
- for _, caveat := range mac.Caveats() {
- content.Caveats = append(content.Caveats, string(caveat.Id))
- }
- for _, op := range decodedID.Ops {
- for _, action := range op.Actions {
- permission := fmt.Sprintf("%s:%s", op.Entity, action)
- content.Permissions = append(
- content.Permissions, permission,
- )
- }
- }
-
- printJSON(content)
-
- return nil
-}
diff --git a/lnd/cmd/lncli/cmd_open_channel.go b/lnd/cmd/lncli/cmd_open_channel.go
deleted file mode 100644
index 215036ef..00000000
--- a/lnd/cmd/lncli/cmd_open_channel.go
+++ /dev/null
@@ -1,731 +0,0 @@
-package main
-
-import (
- "bytes"
- "context"
- "crypto/rand"
- "encoding/base64"
- "encoding/hex"
- "fmt"
- "io"
- "strconv"
- "strings"
-
- "github.com/pkt-cash/pktd/btcutil"
- "github.com/pkt-cash/pktd/btcutil/er"
- "github.com/pkt-cash/pktd/btcutil/util"
- "github.com/pkt-cash/pktd/chaincfg/chainhash"
- "github.com/pkt-cash/pktd/lnd/lnrpc"
- "github.com/pkt-cash/pktd/lnd/lnwallet/chanfunding"
- "github.com/pkt-cash/pktd/lnd/signal"
- "github.com/pkt-cash/pktd/wire"
- "github.com/urfave/cli"
-)
-
-const (
- userMsgFund = `PSBT funding initiated with peer %x.
-Please create a PSBT that sends %v (%d satoshi) to the funding address %s.
-
-Note: The whole process should be completed within 10 minutes, otherwise there
-is a risk of the remote node timing out and canceling the funding process.
-
-Example with bitcoind:
- bitcoin-cli walletcreatefundedpsbt [] '[{"%s":%.8f}]'
-
-If you are using a wallet that can fund a PSBT directly (currently not possible
-with bitcoind), you can use this PSBT that contains the same address and amount:
-%s
-
-!!! WARNING !!!
-DO NOT PUBLISH the finished transaction by yourself or with another tool.
-lnd MUST publish it in the proper funding flow order OR THE FUNDS CAN BE LOST!
-
-Paste the funded PSBT here to continue the funding flow.
-Base64 encoded PSBT: `
-
- userMsgSign = `
-PSBT verified by lnd, please continue the funding flow by signing the PSBT by
-all required parties/devices. Once the transaction is fully signed, paste it
-again here either in base64 PSBT or hex encoded raw wire TX format.
-
-Signed base64 encoded PSBT or hex encoded raw wire TX: `
-)
-
-// TODO(roasbeef): change default number of confirmations
-var openChannelCommand = cli.Command{
- Name: "openchannel",
- Category: "Channels",
- Usage: "Open a channel to a node or an existing peer.",
- Description: `
- Attempt to open a new channel to an existing peer with the key node-key
- optionally blocking until the channel is 'open'.
-
- One can also connect to a node before opening a new channel to it by
- setting its host:port via the --connect argument. For this to work,
- the node_key must be provided, rather than the peer_id. This is optional.
-
- The channel will be initialized with local-amt satoshis local and push-amt
- satoshis for the remote node. Note that specifying push-amt means you give that
- amount to the remote node as part of the channel opening. Once the channel is open,
- a channelPoint (txid:vout) of the funding output is returned.
-
- If the remote peer supports the option upfront shutdown feature bit (query
- listpeers to see their supported feature bits), an address to enforce
- payout of funds on cooperative close can optionally be provided. Note that
- if you set this value, you will not be able to cooperatively close out to
- another address.
-
- One can manually set the fee to be used for the funding transaction via either
- the --conf_target or --sat_per_byte arguments. This is optional.`,
- ArgsUsage: "node-key local-amt push-amt",
- Flags: []cli.Flag{
- cli.StringFlag{
- Name: "node_key",
- Usage: "the identity public key of the target node/peer " +
- "serialized in compressed format",
- },
- cli.StringFlag{
- Name: "connect",
- Usage: "(optional) the host:port of the target node",
- },
- cli.IntFlag{
- Name: "local_amt",
- Usage: "the number of satoshis the wallet should commit to the channel",
- },
- cli.IntFlag{
- Name: "push_amt",
- Usage: "the number of satoshis to give the remote side " +
- "as part of the initial commitment state, " +
- "this is equivalent to first opening a " +
- "channel and sending the remote party funds, " +
- "but done all in one step",
- },
- cli.BoolFlag{
- Name: "block",
- Usage: "block and wait until the channel is fully open",
- },
- cli.Int64Flag{
- Name: "conf_target",
- Usage: "(optional) the number of blocks that the " +
- "transaction *should* confirm in, will be " +
- "used for fee estimation",
- },
- cli.Int64Flag{
- Name: "sat_per_byte",
- Usage: "(optional) a manual fee expressed in " +
- "sat/byte that should be used when crafting " +
- "the transaction",
- },
- cli.BoolFlag{
- Name: "private",
- Usage: "make the channel private, such that it won't " +
- "be announced to the greater network, and " +
- "nodes other than the two channel endpoints " +
- "must be explicitly told about it to be able " +
- "to route through it",
- },
- cli.Int64Flag{
- Name: "min_htlc_msat",
- Usage: "(optional) the minimum value we will require " +
- "for incoming HTLCs on the channel",
- },
- cli.Uint64Flag{
- Name: "remote_csv_delay",
- Usage: "(optional) the number of blocks we will require " +
- "our channel counterparty to wait before accessing " +
- "its funds in case of unilateral close. If this is " +
- "not set, we will scale the value according to the " +
- "channel size",
- },
- cli.Uint64Flag{
- Name: "max_local_csv",
- Usage: "(optional) the maximum number of blocks that " +
- "we will allow the remote peer to require we " +
- "wait before accessing our funds in the case " +
- "of a unilateral close.",
- },
- cli.Uint64Flag{
- Name: "min_confs",
- Usage: "(optional) the minimum number of confirmations " +
- "each one of your outputs used for the funding " +
- "transaction must satisfy",
- Value: defaultUtxoMinConf,
- },
- cli.StringFlag{
- Name: "close_address",
- Usage: "(optional) an address to enforce payout of our " +
- "funds to on cooperative close. Note that if this " +
- "value is set on channel open, you will *not* be " +
- "able to cooperatively close to a different address.",
- },
- cli.BoolFlag{
- Name: "psbt",
- Usage: "start an interactive mode that initiates " +
- "funding through a partially signed bitcoin " +
- "transaction (PSBT), allowing the channel " +
- "funds to be added and signed from a hardware " +
- "or other offline device.",
- },
- cli.StringFlag{
- Name: "base_psbt",
- Usage: "when using the interactive PSBT mode to open " +
- "a new channel, use this base64 encoded PSBT " +
- "as a base and add the new channel output to " +
- "it instead of creating a new, empty one.",
- },
- cli.BoolFlag{
- Name: "no_publish",
- Usage: "when using the interactive PSBT mode to open " +
- "multiple channels in a batch, this flag " +
- "instructs lnd to not publish the full batch " +
- "transaction just yet. For safety reasons " +
- "this flag should be set for each of the " +
- "batch's transactions except the very last",
- },
- cli.Uint64Flag{
- Name: "remote_max_value_in_flight_msat",
- Usage: "(optional) the maximum value in msat that " +
- "can be pending within the channel at any given time",
- },
- },
- Action: actionDecorator(openChannel),
-}
-
-func openChannel(ctx *cli.Context) er.R {
- // TODO(roasbeef): add deadline to context
- ctxb := context.Background()
- client, cleanUp := getClient(ctx)
- defer cleanUp()
-
- args := ctx.Args()
- var errr error
-
- // Show command help if no arguments provided
- if ctx.NArg() == 0 && ctx.NumFlags() == 0 {
- _ = cli.ShowCommandHelp(ctx, "openchannel")
- return nil
- }
-
- minConfs := int32(ctx.Uint64("min_confs"))
- req := &lnrpc.OpenChannelRequest{
- TargetConf: int32(ctx.Int64("conf_target")),
- SatPerByte: ctx.Int64("sat_per_byte"),
- MinHtlcMsat: ctx.Int64("min_htlc_msat"),
- RemoteCsvDelay: uint32(ctx.Uint64("remote_csv_delay")),
- MinConfs: minConfs,
- SpendUnconfirmed: minConfs == 0,
- CloseAddress: ctx.String("close_address"),
- RemoteMaxValueInFlightMsat: ctx.Uint64("remote_max_value_in_flight_msat"),
- MaxLocalCsv: uint32(ctx.Uint64("max_local_csv")),
- }
-
- switch {
- case ctx.IsSet("node_key"):
- nodePubHex, err := util.DecodeHex(ctx.String("node_key"))
- if err != nil {
- return er.Errorf("unable to decode node public key: %v", err)
- }
- req.NodePubkey = nodePubHex
-
- case args.Present():
- nodePubHex, err := util.DecodeHex(args.First())
- if err != nil {
- return er.Errorf("unable to decode node public key: %v", err)
- }
- args = args.Tail()
- req.NodePubkey = nodePubHex
- default:
- return er.Errorf("node id argument missing")
- }
-
- // As soon as we can confirm that the node's node_key was set, rather
- // than the peer_id, we can check if the host:port was also set to
- // connect to it before opening the channel.
- if req.NodePubkey != nil && ctx.IsSet("connect") {
- addr := &lnrpc.LightningAddress{
- Pubkey: hex.EncodeToString(req.NodePubkey),
- Host: ctx.String("connect"),
- }
-
- req := &lnrpc.ConnectPeerRequest{
- Addr: addr,
- Perm: false,
- }
-
- // Check if connecting to the node was successful.
- // We discard the peer id returned as it is not needed.
- _, err := client.ConnectPeer(ctxb, req)
- if err != nil &&
- !strings.Contains(err.Error(), "already connected") {
- return er.E(err)
- }
- }
-
- switch {
- case ctx.IsSet("local_amt"):
- req.LocalFundingAmount = int64(ctx.Int("local_amt"))
- case args.Present():
- req.LocalFundingAmount, errr = strconv.ParseInt(args.First(), 10, 64)
- if errr != nil {
- return er.Errorf("unable to decode local amt: %v", errr)
- }
- args = args.Tail()
- default:
- return er.Errorf("local amt argument missing")
- }
-
- if ctx.IsSet("push_amt") {
- req.PushSat = int64(ctx.Int("push_amt"))
- } else if args.Present() {
- req.PushSat, errr = strconv.ParseInt(args.First(), 10, 64)
- if errr != nil {
- return er.Errorf("unable to decode push amt: %v", errr)
- }
- }
-
- req.Private = ctx.Bool("private")
-
- // PSBT funding is a more involved, interactive process that is too
- // large to also fit into this already long function.
- if ctx.Bool("psbt") {
- return openChannelPsbt(ctx, client, req)
- }
- if !ctx.Bool("psbt") && ctx.Bool("no_publish") {
- return er.Errorf("the --no_publish flag can only be used in " +
- "combination with the --psbt flag")
- }
-
- stream, errr := client.OpenChannel(ctxb, req)
- if errr != nil {
- return er.E(errr)
- }
-
- for {
- resp, err := stream.Recv()
- if err == io.EOF {
- return nil
- } else if err != nil {
- return er.E(err)
- }
-
- switch update := resp.Update.(type) {
- case *lnrpc.OpenStatusUpdate_ChanPending:
- err := printChanPending(update)
- if err != nil {
- return err
- }
-
- if !ctx.Bool("block") {
- return nil
- }
-
- case *lnrpc.OpenStatusUpdate_ChanOpen:
- return printChanOpen(update)
- }
- }
-}
-
-// openChannelPsbt starts an interactive channel open protocol that uses a
-// partially signed bitcoin transaction (PSBT) to fund the channel output. The
-// protocol involves several steps between the RPC server and the CLI client:
-//
-// RPC server CLI client
-// | |
-// | |<------open channel (stream)-----|
-// | |-------ready for funding----->| |
-// | |<------PSBT verify------------| |
-// | |-------ready for signing----->| |
-// | |<------PSBT finalize----------| |
-// | |-------channel pending------->| |
-// | |-------channel open------------->|
-// | |
-func openChannelPsbt(ctx *cli.Context, client lnrpc.LightningClient,
- req *lnrpc.OpenChannelRequest) er.R {
-
- var (
- pendingChanID [32]byte
- shimPending = true
- basePsbtBytes []byte
- quit = make(chan struct{})
- srvMsg = make(chan *lnrpc.OpenStatusUpdate, 1)
- srvErr = make(chan er.R, 1)
- ctxc, cancel = context.WithCancel(context.Background())
- )
- defer cancel()
-
- // Make sure the user didn't supply any command line flags that are
- // incompatible with PSBT funding.
- err := checkPsbtFlags(req)
- if err != nil {
- return err
- }
-
- // If the user supplied a base PSBT, only make sure it's valid base64.
- // The RPC server will make sure it's also a valid PSBT.
- basePsbt := ctx.String("base_psbt")
- if basePsbt != "" {
- var err error
- basePsbtBytes, err = base64.StdEncoding.DecodeString(basePsbt)
- if err != nil {
- return er.Errorf("error parsing base PSBT: %v", err)
- }
- }
-
- // Generate a new, random pending channel ID that we'll use as the main
- // identifier when sending update messages to the RPC server.
- if _, err := rand.Read(pendingChanID[:]); err != nil {
- return er.Errorf("unable to generate random chan ID: %v", err)
- }
- fmt.Printf("Starting PSBT funding flow with pending channel ID %x.\n",
- pendingChanID)
-
- // maybeCancelShim is a helper function that cancels the funding shim
- // with the RPC server in case we end up aborting early.
- maybeCancelShim := func() {
- // If the user canceled while there was still a shim registered
- // with the wallet, release the resources now.
- if shimPending {
- fmt.Printf("Canceling PSBT funding flow for pending "+
- "channel ID %x.\n", pendingChanID)
- cancelMsg := &lnrpc.FundingTransitionMsg{
- Trigger: &lnrpc.FundingTransitionMsg_ShimCancel{
- ShimCancel: &lnrpc.FundingShimCancel{
- PendingChanId: pendingChanID[:],
- },
- },
- }
- err := sendFundingState(ctxc, ctx, cancelMsg)
- if err != nil {
- fmt.Printf("Error canceling shim: %v\n", err)
- }
- shimPending = false
- }
-
- // Abort the stream connection to the server.
- cancel()
- }
- defer maybeCancelShim()
-
- // Create the PSBT funding shim that will tell the funding manager we
- // want to use a PSBT.
- req.FundingShim = &lnrpc.FundingShim{
- Shim: &lnrpc.FundingShim_PsbtShim{
- PsbtShim: &lnrpc.PsbtShim{
- PendingChanId: pendingChanID[:],
- BasePsbt: basePsbtBytes,
- NoPublish: ctx.Bool("no_publish"),
- },
- },
- }
-
- // Start the interactive process by opening the stream connection to the
- // daemon. If the user cancels by pressing we need to cancel
- // the shim. To not just kill the process on interrupt, we need to
- // explicitly capture the signal.
- stream, errr := client.OpenChannel(ctxc, req)
- if errr != nil {
- return er.Errorf("opening stream to server failed: %v", errr)
- }
-
- if err := signal.Intercept(); err != nil {
- return err
- }
-
- // We also need to spawn a goroutine that reads from the server. This
- // will copy the messages to the channel as long as they come in or add
- // exactly one error to the error stream and then bail out.
- go func() {
- for {
- // Recv blocks until a message or error arrives.
- resp, err := stream.Recv()
- if err == io.EOF {
- srvErr <- er.Errorf("lnd shutting down: %v",
- err)
- return
- } else if err != nil {
- srvErr <- er.Errorf("got error from server: "+
- "%v", err)
- return
- }
-
- // Don't block on sending in case of shutting down.
- select {
- case srvMsg <- resp:
- case <-quit:
- return
- }
- }
- }()
-
- // Spawn another goroutine that only handles abort from user or errors
- // from the server. Both will trigger an attempt to cancel the shim with
- // the server.
- go func() {
- select {
- case <-signal.ShutdownChannel():
- fmt.Printf("\nInterrupt signal received.\n")
- close(quit)
-
- case err := <-srvErr:
- fmt.Printf("\nError received: %v\n", err)
-
- // If the remote peer canceled on us, the reservation
- // has already been deleted. We don't need to try to
- // remove it again, this would just produce another
- // error.
- if chanfunding.ErrRemoteCanceled.Is(err) {
- shimPending = false
- }
- close(quit)
-
- case <-quit:
- }
- }()
-
- // Our main event loop where we wait for triggers
- for {
- var srvResponse *lnrpc.OpenStatusUpdate
- select {
- case srvResponse = <-srvMsg:
- case <-quit:
- return nil
- }
-
- switch update := srvResponse.Update.(type) {
- case *lnrpc.OpenStatusUpdate_PsbtFund:
- // First tell the user how to create the PSBT with the
- // address and amount we now know.
- amt := btcutil.Amount(update.PsbtFund.FundingAmount)
- addr := update.PsbtFund.FundingAddress
- fmt.Printf(
- userMsgFund, req.NodePubkey, amt, amt, addr,
- addr, amt.ToBTC(),
- base64.StdEncoding.EncodeToString(
- update.PsbtFund.Psbt,
- ),
- )
-
- // Read the user's response and send it to the server to
- // verify everything's correct before anything is
- // signed.
- psbtBase64, err := readLine(quit)
- if er.Wrapped(err) == io.EOF {
- return nil
- }
- if err != nil {
- return er.Errorf("reading from console "+
- "failed: %v", err)
- }
- fundedPsbt, errr := base64.StdEncoding.DecodeString(
- strings.TrimSpace(psbtBase64),
- )
- if errr != nil {
- return er.Errorf("base64 decode failed: %v",
- errr)
- }
- verifyMsg := &lnrpc.FundingTransitionMsg{
- Trigger: &lnrpc.FundingTransitionMsg_PsbtVerify{
- PsbtVerify: &lnrpc.FundingPsbtVerify{
- FundedPsbt: fundedPsbt,
- PendingChanId: pendingChanID[:],
- },
- },
- }
- err = sendFundingState(ctxc, ctx, verifyMsg)
- if err != nil {
- return er.Errorf("verifying PSBT by lnd "+
- "failed: %v", err)
- }
-
- // Now that we know the PSBT looks good, we can let it
- // be signed by the user.
- fmt.Print(userMsgSign)
-
- // Read the signed PSBT and send it to lnd.
- finalTxStr, err := readLine(quit)
- if er.Wrapped(err) == io.EOF {
- return nil
- }
- if err != nil {
- return er.Errorf("reading from console "+
- "failed: %v", err)
- }
- finalizeMsg, err := finalizeMsgFromString(
- finalTxStr, pendingChanID[:],
- )
- if err != nil {
- return err
- }
- transitionMsg := &lnrpc.FundingTransitionMsg{
- Trigger: finalizeMsg,
- }
- err = sendFundingState(ctxc, ctx, transitionMsg)
- if err != nil {
- return er.Errorf("finalizing PSBT funding "+
- "flow failed: %v", err)
- }
-
- case *lnrpc.OpenStatusUpdate_ChanPending:
- // As soon as the channel is pending, there is no more
- // shim that needs to be canceled. If the user
- // interrupts now, we don't need to clean up anything.
- shimPending = false
-
- err := printChanPending(update)
- if err != nil {
- return err
- }
-
- if !ctx.Bool("block") {
- return nil
- }
-
- case *lnrpc.OpenStatusUpdate_ChanOpen:
- return printChanOpen(update)
- }
- }
-}
-
-// printChanOpen prints the channel point of the channel open message.
-func printChanOpen(update *lnrpc.OpenStatusUpdate_ChanOpen) er.R {
- channelPoint := update.ChanOpen.ChannelPoint
-
- // A channel point's funding txid can be get/set as a
- // byte slice or a string. In the case it is a string,
- // decode it.
- var txidHash []byte
- switch channelPoint.GetFundingTxid().(type) {
- case *lnrpc.ChannelPoint_FundingTxidBytes:
- txidHash = channelPoint.GetFundingTxidBytes()
- case *lnrpc.ChannelPoint_FundingTxidStr:
- s := channelPoint.GetFundingTxidStr()
- h, err := chainhash.NewHashFromStr(s)
- if err != nil {
- return err
- }
-
- txidHash = h[:]
- }
-
- txid, err := chainhash.NewHash(txidHash)
- if err != nil {
- return err
- }
-
- index := channelPoint.OutputIndex
- printJSON(struct {
- ChannelPoint string `json:"channel_point"`
- }{
- ChannelPoint: fmt.Sprintf("%v:%v", txid, index),
- })
- return nil
-}
-
-// printChanPending prints the funding transaction ID of the channel pending
-// message.
-func printChanPending(update *lnrpc.OpenStatusUpdate_ChanPending) er.R {
- txid, err := chainhash.NewHash(update.ChanPending.Txid)
- if err != nil {
- return err
- }
-
- printJSON(struct {
- FundingTxid string `json:"funding_txid"`
- }{
- FundingTxid: txid.String(),
- })
- return nil
-}
-
-// readLine reads a line from standard in but does not block in case of a
-// system interrupt like syscall.SIGINT (Ctrl+C).
-func readLine(quit chan struct{}) (string, er.R) {
- msg := make(chan string, 1)
-
- // In a normal console, reading from stdin won't signal EOF when the
- // user presses Ctrl+C. That's why we need to put this in a separate
- // goroutine so it doesn't block.
- go func() {
- for {
- var str string
- _, _ = fmt.Scan(&str)
- msg <- str
- return
- }
- }()
- for {
- select {
- case <-quit:
- return "", er.E(io.EOF)
-
- case str := <-msg:
- return str, nil
- }
- }
-}
-
-// checkPsbtFlags make sure a request to open a channel doesn't set any
-// parameters that are incompatible with the PSBT funding flow.
-func checkPsbtFlags(req *lnrpc.OpenChannelRequest) er.R {
- if req.MinConfs != defaultUtxoMinConf || req.SpendUnconfirmed {
- return er.Errorf("specifying minimum confirmations for PSBT " +
- "funding is not supported")
- }
- if req.TargetConf != 0 || req.SatPerByte != 0 {
- return er.Errorf("setting fee estimation parameters not " +
- "supported for PSBT funding")
- }
- return nil
-}
-
-// sendFundingState sends a single funding state step message by using a new
-// client connection. This is necessary if the whole funding flow takes longer
-// than the default macaroon timeout, then we cannot use a single client
-// connection.
-func sendFundingState(cancelCtx context.Context, cliCtx *cli.Context,
- msg *lnrpc.FundingTransitionMsg) er.R {
-
- client, cleanUp := getClient(cliCtx)
- defer cleanUp()
-
- _, errr := client.FundingStateStep(cancelCtx, msg)
- return er.E(errr)
-}
-
-// finalizeMsgFromString creates the final message for the PsbtFinalize step
-// from either a hex encoded raw wire transaction or a base64 encoded PSBT
-// packet.
-func finalizeMsgFromString(tx string,
- pendingChanID []byte) (*lnrpc.FundingTransitionMsg_PsbtFinalize, er.R) {
-
- rawTx, err := util.DecodeHex(strings.TrimSpace(tx))
- if err == nil {
- // Hex decoding succeeded so we assume we have a raw wire format
- // transaction. Let's submit that instead of a PSBT packet.
- tx := &wire.MsgTx{}
- err := tx.Deserialize(bytes.NewReader(rawTx))
- if err != nil {
- return nil, er.Errorf("deserializing as raw wire "+
- "transaction failed: %v", err)
- }
- return &lnrpc.FundingTransitionMsg_PsbtFinalize{
- PsbtFinalize: &lnrpc.FundingPsbtFinalize{
- FinalRawTx: rawTx,
- PendingChanId: pendingChanID,
- },
- }, nil
- }
-
- // If the string isn't a hex encoded transaction, we assume it must be
- // a base64 encoded PSBT packet.
- psbtBytes, errr := base64.StdEncoding.DecodeString(strings.TrimSpace(tx))
- if errr != nil {
- return nil, er.Errorf("base64 decode failed: %v", errr)
- }
- return &lnrpc.FundingTransitionMsg_PsbtFinalize{
- PsbtFinalize: &lnrpc.FundingPsbtFinalize{
- SignedPsbt: psbtBytes,
- PendingChanId: pendingChanID,
- },
- }, nil
-}
diff --git a/lnd/cmd/lncli/cmd_pay.go b/lnd/cmd/lncli/cmd_pay.go
deleted file mode 100644
index fe87c20d..00000000
--- a/lnd/cmd/lncli/cmd_pay.go
+++ /dev/null
@@ -1,887 +0,0 @@
-package main
-
-import (
- "bytes"
- "context"
- "crypto/rand"
- "fmt"
- "io/ioutil"
- "os"
- "runtime"
- "strconv"
- "strings"
- "time"
-
- "github.com/jedib0t/go-pretty/table"
- "github.com/jedib0t/go-pretty/text"
- "github.com/lightninglabs/protobuf-hex-display/jsonpb"
- "github.com/pkt-cash/pktd/btcutil/er"
- "github.com/pkt-cash/pktd/btcutil/util"
- "github.com/pkt-cash/pktd/lnd/lnrpc"
- "github.com/pkt-cash/pktd/lnd/lnrpc/routerrpc"
- "github.com/pkt-cash/pktd/lnd/lntypes"
- "github.com/pkt-cash/pktd/lnd/record"
- "github.com/pkt-cash/pktd/lnd/routing/route"
- "github.com/urfave/cli"
-)
-
-const (
- // paymentTimeout is the default timeout for the payment loop in lnd.
- // No new attempts will be started after the timeout.
- paymentTimeout = time.Second * 60
-)
-
-var (
- cltvLimitFlag = cli.UintFlag{
- Name: "cltv_limit",
- Usage: "the maximum time lock that may be used for " +
- "this payment",
- }
-
- lastHopFlag = cli.StringFlag{
- Name: "last_hop",
- Usage: "pubkey of the last hop (penultimate node in the path) " +
- "to route through for this payment",
- }
-
- dataFlag = cli.StringFlag{
- Name: "data",
- Usage: "attach custom data to the payment. The required " +
- "format is: =,=" +
- ",.. For example: --data 3438382=0a21ff. " +
- "Custom record ids start from 65536.",
- }
-
- inflightUpdatesFlag = cli.BoolFlag{
- Name: "inflight_updates",
- Usage: "if set, intermediate payment state updates will be " +
- "displayed. Only valid in combination with --json.",
- }
-
- maxPartsFlag = cli.UintFlag{
- Name: "max_parts",
- Usage: "the maximum number of partial payments that may be " +
- "used",
- Value: 1,
- }
-
- jsonFlag = cli.BoolFlag{
- Name: "json",
- Usage: "if set, payment updates are printed as json " +
- "messages. Set by default on Windows because table " +
- "formatting is unsupported.",
- }
-)
-
-// paymentFlags returns common flags for sendpayment and payinvoice.
-func paymentFlags() []cli.Flag {
- return []cli.Flag{
- cli.StringFlag{
- Name: "pay_req",
- Usage: "a zpay32 encoded payment request to fulfill",
- },
- cli.Int64Flag{
- Name: "fee_limit",
- Usage: "maximum fee allowed in satoshis when " +
- "sending the payment",
- },
- cli.Int64Flag{
- Name: "fee_limit_percent",
- Usage: "percentage of the payment's amount used as " +
- "the maximum fee allowed when sending the " +
- "payment",
- },
- cli.DurationFlag{
- Name: "timeout",
- Usage: "the maximum amount of time we should spend " +
- "trying to fulfill the payment, failing " +
- "after the timeout has elapsed",
- Value: paymentTimeout,
- },
- cltvLimitFlag,
- lastHopFlag,
- cli.Uint64Flag{
- Name: "outgoing_chan_id",
- Usage: "short channel id of the outgoing channel to " +
- "use for the first hop of the payment",
- Value: 0,
- },
- cli.BoolFlag{
- Name: "force, f",
- Usage: "will skip payment request confirmation",
- },
- cli.BoolFlag{
- Name: "allow_self_payment",
- Usage: "allow sending a circular payment to self",
- },
- dataFlag, inflightUpdatesFlag, maxPartsFlag, jsonFlag,
- }
-}
-
-var sendPaymentCommand = cli.Command{
- Name: "sendpayment",
- Category: "Payments",
- Usage: "Send a payment over lightning.",
- Description: `
- Send a payment over Lightning. One can either specify the full
- parameters of the payment, or just use a payment request which encodes
- all the payment details.
-
- If payment isn't manually specified, then only a payment request needs
- to be passed using the --pay_req argument.
-
- If the payment *is* manually specified, then all four alternative
- arguments need to be specified in order to complete the payment:
- * --dest=N
- * --amt=A
- * --final_cltv_delta=T
- * --payment_hash=H
- `,
- ArgsUsage: "dest amt payment_hash final_cltv_delta | --pay_req=[payment request]",
- Flags: append(paymentFlags(),
- cli.StringFlag{
- Name: "dest, d",
- Usage: "the compressed identity pubkey of the " +
- "payment recipient",
- },
- cli.Int64Flag{
- Name: "amt, a",
- Usage: "number of satoshis to send",
- },
- cli.StringFlag{
- Name: "payment_hash, r",
- Usage: "the hash to use within the payment's HTLC",
- },
- cli.Int64Flag{
- Name: "final_cltv_delta",
- Usage: "the number of blocks the last hop has to reveal the preimage",
- },
- cli.BoolFlag{
- Name: "keysend",
- Usage: "will generate a pre-image and encode it in the sphinx packet, a dest must be set [experimental]",
- },
- ),
- Action: sendPayment,
-}
-
-// retrieveFeeLimit retrieves the fee limit based on the different fee limit
-// flags passed. It always returns a value and doesn't rely on lnd applying a
-// default.
-func retrieveFeeLimit(ctx *cli.Context, amt int64) (int64, er.R) {
- switch {
-
- case ctx.IsSet("fee_limit") && ctx.IsSet("fee_limit_percent"):
- return 0, er.Errorf("either fee_limit or fee_limit_percent " +
- "can be set, but not both")
-
- case ctx.IsSet("fee_limit"):
- return ctx.Int64("fee_limit"), nil
-
- case ctx.IsSet("fee_limit_percent"):
- // Round up the fee limit to prevent hitting zero on small
- // amounts.
- feeLimitRoundedUp :=
- (amt*ctx.Int64("fee_limit_percent") + 99) / 100
-
- return feeLimitRoundedUp, nil
- }
-
- // If no fee limit is set, use the payment amount as a limit (100%).
- return amt, nil
-}
-
-func confirmPayReq(resp *lnrpc.PayReq, amt, feeLimit int64) er.R {
- fmt.Printf("Payment hash: %v\n", resp.GetPaymentHash())
- fmt.Printf("Description: %v\n", resp.GetDescription())
- fmt.Printf("Amount (in satoshis): %v\n", amt)
- fmt.Printf("Fee limit (in satoshis): %v\n", feeLimit)
- fmt.Printf("Destination: %v\n", resp.GetDestination())
-
- confirm := promptForConfirmation("Confirm payment (yes/no): ")
- if !confirm {
- return er.Errorf("payment not confirmed")
- }
-
- return nil
-}
-
-func sendPayment(ctx *cli.Context) er.R {
- // Show command help if no arguments provided
- if ctx.NArg() == 0 && ctx.NumFlags() == 0 {
- _ = cli.ShowCommandHelp(ctx, "sendpayment")
- return nil
- }
-
- // If a payment request was provided, we can exit early since all of the
- // details of the payment are encoded within the request.
- if ctx.IsSet("pay_req") {
- req := &routerrpc.SendPaymentRequest{
- PaymentRequest: ctx.String("pay_req"),
- Amt: ctx.Int64("amt"),
- }
-
- return sendPaymentRequest(ctx, req)
- }
-
- var (
- destNode []byte
- amount int64
- err er.R
- )
-
- args := ctx.Args()
-
- switch {
- case ctx.IsSet("dest"):
- destNode, err = util.DecodeHex(ctx.String("dest"))
- case args.Present():
- destNode, err = util.DecodeHex(args.First())
- args = args.Tail()
- default:
- return er.Errorf("destination txid argument missing")
- }
- if err != nil {
- return err
- }
-
- if len(destNode) != 33 {
- return er.Errorf("dest node pubkey must be exactly 33 bytes, is "+
- "instead: %v", len(destNode))
- }
-
- if ctx.IsSet("amt") {
- amount = ctx.Int64("amt")
- } else if args.Present() {
- var err error
- amount, err = strconv.ParseInt(args.First(), 10, 64)
- args = args.Tail()
- if err != nil {
- return er.Errorf("unable to decode payment amount: %v", err)
- }
- }
-
- req := &routerrpc.SendPaymentRequest{
- Dest: destNode,
- Amt: amount,
- DestCustomRecords: make(map[uint64][]byte),
- }
-
- var rHash []byte
-
- if ctx.Bool("keysend") {
- if ctx.IsSet("payment_hash") {
- return er.New("cannot set payment hash when using " +
- "keysend")
- }
- var preimage lntypes.Preimage
- if _, err := rand.Read(preimage[:]); err != nil {
- return er.E(err)
- }
-
- // Set the preimage. If the user supplied a preimage with the
- // data flag, the preimage that is set here will be overwritten
- // later.
- req.DestCustomRecords[record.KeySendType] = preimage[:]
-
- hash := preimage.Hash()
- rHash = hash[:]
- } else {
- switch {
- case ctx.IsSet("payment_hash"):
- rHash, err = util.DecodeHex(ctx.String("payment_hash"))
- case args.Present():
- rHash, err = util.DecodeHex(args.First())
- args = args.Tail()
- default:
- return er.Errorf("payment hash argument missing")
- }
- }
-
- if err != nil {
- return err
- }
- if len(rHash) != 32 {
- return er.Errorf("payment hash must be exactly 32 "+
- "bytes, is instead %v", len(rHash))
- }
- req.PaymentHash = rHash
-
- switch {
- case ctx.IsSet("final_cltv_delta"):
- req.FinalCltvDelta = int32(ctx.Int64("final_cltv_delta"))
- case args.Present():
- delta, err := strconv.ParseInt(args.First(), 10, 64)
- if err != nil {
- return er.E(err)
- }
- req.FinalCltvDelta = int32(delta)
- }
-
- return sendPaymentRequest(ctx, req)
-}
-
-func sendPaymentRequest(ctx *cli.Context,
- req *routerrpc.SendPaymentRequest) er.R {
-
- conn := getClientConn(ctx, false)
- defer conn.Close()
-
- client := lnrpc.NewLightningClient(conn)
- routerClient := routerrpc.NewRouterClient(conn)
-
- outChan := ctx.Uint64("outgoing_chan_id")
- if outChan != 0 {
- req.OutgoingChanIds = []uint64{outChan}
- }
- if ctx.IsSet(lastHopFlag.Name) {
- lastHop, err := route.NewVertexFromStr(
- ctx.String(lastHopFlag.Name),
- )
- if err != nil {
- return err
- }
- req.LastHopPubkey = lastHop[:]
- }
-
- req.CltvLimit = int32(ctx.Int(cltvLimitFlag.Name))
-
- pmtTimeout := ctx.Duration("timeout")
- if pmtTimeout <= 0 {
- return er.New("payment timeout must be greater than zero")
- }
- req.TimeoutSeconds = int32(pmtTimeout.Seconds())
-
- req.AllowSelfPayment = ctx.Bool("allow_self_payment")
-
- req.MaxParts = uint32(ctx.Uint(maxPartsFlag.Name))
- var err er.R
-
- // Parse custom data records.
- data := ctx.String(dataFlag.Name)
- if data != "" {
- records := strings.Split(data, ",")
- for _, r := range records {
- kv := strings.Split(r, "=")
- if len(kv) != 2 {
- return er.New("invalid data format: " +
- "multiple equal signs in record")
- }
-
- recordID, errr := strconv.ParseUint(kv[0], 10, 64)
- if errr != nil {
- return er.Errorf("invalid data format: %v",
- errr)
- }
-
- hexValue, err := util.DecodeHex(kv[1])
- if err != nil {
- return er.Errorf("invalid data format: %v",
- err)
- }
-
- req.DestCustomRecords[recordID] = hexValue
- }
- }
-
- var feeLimit int64
- if req.PaymentRequest != "" {
- // Decode payment request to find out the amount.
- decodeReq := &lnrpc.PayReqString{PayReq: req.PaymentRequest}
- decodeResp, errr := client.DecodePayReq(
- context.Background(), decodeReq,
- )
- if errr != nil {
- return er.E(errr)
- }
-
- // If amount is present in the request, override the request
- // amount.
- amt := req.Amt
- invoiceAmt := decodeResp.GetNumSatoshis()
- if invoiceAmt != 0 {
- amt = invoiceAmt
- }
-
- // Calculate fee limit based on the determined amount.
- feeLimit, err = retrieveFeeLimit(ctx, amt)
- if err != nil {
- return err
- }
-
- // Ask for confirmation of amount and fee limit if payment is
- // forced.
- if !ctx.Bool("force") {
- err := confirmPayReq(decodeResp, amt, feeLimit)
- if err != nil {
- return err
- }
- }
- } else {
- var err er.R
- feeLimit, err = retrieveFeeLimit(ctx, req.Amt)
- if err != nil {
- return err
- }
- }
-
- req.FeeLimitSat = feeLimit
-
- // Always print in-flight updates for the table output.
- printJSON := ctx.Bool(jsonFlag.Name)
- req.NoInflightUpdates = !ctx.Bool(inflightUpdatesFlag.Name) && printJSON
-
- stream, errr := routerClient.SendPaymentV2(context.Background(), req)
- if errr != nil {
- return er.E(errr)
- }
-
- finalState, err := printLivePayment(
- stream, client, printJSON,
- )
- if err != nil {
- return err
- }
-
- // If we get a payment error back, we pass an error up
- // to main which eventually calls fatal() and returns
- // with a non-zero exit code.
- if finalState.Status != lnrpc.Payment_SUCCEEDED {
- return er.New(finalState.Status.String())
- }
-
- return nil
-}
-
-var trackPaymentCommand = cli.Command{
- Name: "trackpayment",
- Category: "Payments",
- Usage: "Track progress of an existing payment.",
- Description: `
- Pick up monitoring the progression of a previously initiated payment
- specified by the hash argument.
- `,
- ArgsUsage: "hash",
- Action: actionDecorator(trackPayment),
-}
-
-func trackPayment(ctx *cli.Context) er.R {
- args := ctx.Args()
-
- conn := getClientConn(ctx, false)
- defer conn.Close()
-
- routerClient := routerrpc.NewRouterClient(conn)
-
- if !args.Present() {
- return er.Errorf("hash argument missing")
- }
-
- hash, err := util.DecodeHex(args.First())
- if err != nil {
- return err
- }
-
- req := &routerrpc.TrackPaymentRequest{
- PaymentHash: hash,
- }
-
- stream, errr := routerClient.TrackPaymentV2(context.Background(), req)
- if errr != nil {
- return er.E(errr)
- }
-
- client := lnrpc.NewLightningClient(conn)
- _, err = printLivePayment(stream, client, ctx.Bool(jsonFlag.Name))
- return err
-}
-
-// printLivePayment receives payment updates from the given stream and either
-// outputs them as json or as a more user-friendly formatted table. The table
-// option uses terminal control codes to rewrite the output. This call
-// terminates when the payment reaches a final state.
-func printLivePayment(stream routerrpc.Router_TrackPaymentV2Client,
- client lnrpc.LightningClient, json bool) (*lnrpc.Payment, er.R) {
-
- // Terminal escape codes aren't supported on Windows, fall back to json.
- if !json && runtime.GOOS == "windows" {
- json = true
- }
-
- aliases := newAliasCache(client)
-
- first := true
- var lastLineCount int
- for {
- payment, errr := stream.Recv()
- if errr != nil {
- return nil, er.E(errr)
- }
-
- if json {
- // Delimit json messages by newlines (inspired by
- // grpc over rest chunking).
- if first {
- first = false
- } else {
- fmt.Println()
- }
-
- // Write raw json to stdout.
- printRespJSON(payment)
- } else {
- table := formatPayment(payment, aliases)
-
- // Clear all previously written lines and print the
- // updated table.
- clearLines(lastLineCount)
- fmt.Print(table)
-
- // Store the number of lines written for the next update
- // pass.
- lastLineCount = 0
- for _, b := range table {
- if b == '\n' {
- lastLineCount++
- }
- }
- }
-
- // Terminate loop if payments state is final.
- if payment.Status != lnrpc.Payment_IN_FLIGHT {
- return payment, nil
- }
- }
-}
-
-// aliasCache allows cached retrieval of node aliases.
-type aliasCache struct {
- cache map[string]string
- client lnrpc.LightningClient
-}
-
-func newAliasCache(client lnrpc.LightningClient) *aliasCache {
- return &aliasCache{
- client: client,
- cache: make(map[string]string),
- }
-}
-
-// get returns a node alias either from cache or freshly requested from lnd.
-func (a *aliasCache) get(pubkey string) string {
- alias, ok := a.cache[pubkey]
- if ok {
- return alias
- }
-
- // Request node info.
- resp, err := a.client.GetNodeInfo(
- context.Background(),
- &lnrpc.NodeInfoRequest{
- PubKey: pubkey,
- },
- )
- if err != nil {
- // If no info is available, use the
- // pubkey as identifier.
- alias = pubkey[:6]
- } else {
- alias = resp.Node.Alias
- }
- a.cache[pubkey] = alias
-
- return alias
-}
-
-// formatMsat formats msat amounts as fractional sats.
-func formatMsat(amt int64) string {
- return strconv.FormatFloat(float64(amt)/1000.0, 'f', -1, 64)
-}
-
-// formatPayment formats the payment state as an ascii table.
-func formatPayment(payment *lnrpc.Payment, aliases *aliasCache) string {
- t := table.NewWriter()
-
- // Build table header.
- t.AppendHeader(table.Row{
- "HTLC_STATE", "ATTEMPT_TIME", "RESOLVE_TIME", "RECEIVER_AMT",
- "FEE", "TIMELOCK", "CHAN_OUT", "ROUTE",
- })
- t.SetColumnConfigs([]table.ColumnConfig{
- {Name: "ATTEMPT_TIME", Align: text.AlignRight},
- {Name: "RESOLVE_TIME", Align: text.AlignRight},
- {Name: "CHAN_OUT", Align: text.AlignLeft,
- AlignHeader: text.AlignLeft},
- })
-
- // Add all htlcs as rows.
- createTime := time.Unix(0, payment.CreationTimeNs)
- var totalPaid, totalFees int64
- for _, htlc := range payment.Htlcs {
- formatTime := func(timeNs int64) string {
- if timeNs == 0 {
- return "-"
- }
- resolveTime := time.Unix(0, timeNs)
- resolveTimeDiff := resolveTime.Sub(createTime)
- resolveTimeMs := resolveTimeDiff / time.Millisecond
- return fmt.Sprintf(
- "%.3f", float64(resolveTimeMs)/1000.0,
- )
- }
-
- attemptTime := formatTime(htlc.AttemptTimeNs)
- resolveTime := formatTime(htlc.ResolveTimeNs)
-
- route := htlc.Route
- lastHop := route.Hops[len(route.Hops)-1]
-
- hops := []string{}
- for _, h := range route.Hops {
- alias := aliases.get(h.PubKey)
- hops = append(hops, alias)
- }
-
- state := htlc.Status.String()
- if htlc.Failure != nil {
- state = fmt.Sprintf(
- "%v @ %v",
- htlc.Failure.Code,
- htlc.Failure.FailureSourceIndex,
- )
- }
-
- t.AppendRow([]interface{}{
- state, attemptTime, resolveTime,
- formatMsat(lastHop.AmtToForwardMsat),
- formatMsat(route.TotalFeesMsat),
- route.TotalTimeLock, route.Hops[0].ChanId,
- strings.Join(hops, "->")},
- )
-
- if htlc.Status == lnrpc.HTLCAttempt_SUCCEEDED {
- totalPaid += lastHop.AmtToForwardMsat
- totalFees += route.TotalFeesMsat
- }
- }
-
- // Render table.
- b := &bytes.Buffer{}
- t.SetOutputMirror(b)
- t.Render()
-
- // Add additional payment-level data.
- fmt.Fprintf(b, "Amount + fee: %v + %v sat\n",
- formatMsat(totalPaid), formatMsat(totalFees))
- fmt.Fprintf(b, "Payment hash: %v\n", payment.PaymentHash)
- fmt.Fprintf(b, "Payment status: %v", payment.Status)
- switch payment.Status {
- case lnrpc.Payment_SUCCEEDED:
- fmt.Fprintf(b, ", preimage: %v", payment.PaymentPreimage)
- case lnrpc.Payment_FAILED:
- fmt.Fprintf(b, ", reason: %v", payment.FailureReason)
- }
- fmt.Fprintf(b, "\n")
-
- return b.String()
-}
-
-var payInvoiceCommand = cli.Command{
- Name: "payinvoice",
- Category: "Payments",
- Usage: "Pay an invoice over lightning.",
- ArgsUsage: "pay_req",
- Flags: append(paymentFlags(),
- cli.Int64Flag{
- Name: "amt",
- Usage: "(optional) number of satoshis to fulfill the " +
- "invoice",
- },
- ),
- Action: actionDecorator(payInvoice),
-}
-
-func payInvoice(ctx *cli.Context) er.R {
- args := ctx.Args()
-
- var payReq string
- switch {
- case ctx.IsSet("pay_req"):
- payReq = ctx.String("pay_req")
- case args.Present():
- payReq = args.First()
- default:
- return er.Errorf("pay_req argument missing")
- }
-
- req := &routerrpc.SendPaymentRequest{
- PaymentRequest: payReq,
- Amt: ctx.Int64("amt"),
- DestCustomRecords: make(map[uint64][]byte),
- }
-
- return sendPaymentRequest(ctx, req)
-}
-
-var sendToRouteCommand = cli.Command{
- Name: "sendtoroute",
- Category: "Payments",
- Usage: "Send a payment over a predefined route.",
- Description: `
- Send a payment over Lightning using a specific route. One must specify
- the route to attempt and the payment hash. This command can even
- be chained with the response to queryroutes or buildroute. This command
- can be used to implement channel rebalancing by crafting a self-route,
- or even atomic swaps using a self-route that crosses multiple chains.
-
- There are three ways to specify a route:
- * using the --routes parameter to manually specify a JSON encoded
- route in the format of the return value of queryroutes or
- buildroute:
- (lncli sendtoroute --payment_hash= --routes=)
-
- * passing the route as a positional argument:
- (lncli sendtoroute --payment_hash=pay_hash )
-
- * or reading in the route from stdin, which can allow chaining the
- response from queryroutes or buildroute, or even read in a file
- with a pre-computed route:
- (lncli queryroutes --args.. | lncli sendtoroute --payment_hash= -
-
- notice the '-' at the end, which signals that lncli should read
- the route in from stdin
- `,
- Flags: []cli.Flag{
- cli.StringFlag{
- Name: "payment_hash, pay_hash",
- Usage: "the hash to use within the payment's HTLC",
- },
- cli.StringFlag{
- Name: "routes, r",
- Usage: "a json array string in the format of the response " +
- "of queryroutes that denotes which routes to use",
- },
- },
- Action: sendToRoute,
-}
-
-func sendToRoute(ctx *cli.Context) er.R {
- // Show command help if no arguments provided.
- if ctx.NArg() == 0 && ctx.NumFlags() == 0 {
- _ = cli.ShowCommandHelp(ctx, "sendtoroute")
- return nil
- }
-
- args := ctx.Args()
-
- var (
- rHash []byte
- err er.R
- )
- switch {
- case ctx.IsSet("payment_hash"):
- rHash, err = util.DecodeHex(ctx.String("payment_hash"))
- case args.Present():
- rHash, err = util.DecodeHex(args.First())
-
- args = args.Tail()
- default:
- return er.Errorf("payment hash argument missing")
- }
-
- if err != nil {
- return err
- }
-
- if len(rHash) != 32 {
- return er.Errorf("payment hash must be exactly 32 "+
- "bytes, is instead %d", len(rHash))
- }
-
- var jsonRoutes string
- switch {
- // The user is specifying the routes explicitly via the key word
- // argument.
- case ctx.IsSet("routes"):
- jsonRoutes = ctx.String("routes")
-
- // The user is specifying the routes as a positional argument.
- case args.Present() && args.First() != "-":
- jsonRoutes = args.First()
-
- // The user is signalling that we should read stdin in order to parse
- // the set of target routes.
- case args.Present() && args.First() == "-":
- b, err := ioutil.ReadAll(os.Stdin)
- if err != nil {
- return er.E(err)
- }
- if len(b) == 0 {
- return er.Errorf("queryroutes output is empty")
- }
-
- jsonRoutes = string(b)
- }
-
- // Try to parse the provided json both in the legacy QueryRoutes format
- // that contains a list of routes and the single route BuildRoute
- // format.
- var route *lnrpc.Route
- routes := &lnrpc.QueryRoutesResponse{}
- errr := jsonpb.UnmarshalString(jsonRoutes, routes)
- if errr == nil {
- if len(routes.Routes) == 0 {
- return er.Errorf("no routes provided")
- }
-
- if len(routes.Routes) != 1 {
- return er.Errorf("expected a single route, but got %v",
- len(routes.Routes))
- }
-
- route = routes.Routes[0]
- } else {
- routes := &routerrpc.BuildRouteResponse{}
- errr = jsonpb.UnmarshalString(jsonRoutes, routes)
- if errr != nil {
- return er.Errorf("unable to unmarshal json string "+
- "from incoming array of routes: %v", errr)
- }
-
- route = routes.Route
- }
-
- req := &routerrpc.SendToRouteRequest{
- PaymentHash: rHash,
- Route: route,
- }
-
- return sendToRouteRequest(ctx, req)
-}
-
-func sendToRouteRequest(ctx *cli.Context, req *routerrpc.SendToRouteRequest) er.R {
- conn := getClientConn(ctx, false)
- defer conn.Close()
-
- client := routerrpc.NewRouterClient(conn)
-
- resp, errr := client.SendToRouteV2(context.Background(), req)
- if errr != nil {
- return er.E(errr)
- }
-
- printRespJSON(resp)
-
- return nil
-}
-
-// ESC is the ASCII code for escape character
-const ESC = 27
-
-// clearCode defines a terminal escape code to clear the currently line and move
-// the cursor up.
-var clearCode = fmt.Sprintf("%c[%dA%c[2K", ESC, 1, ESC)
-
-// clearLines erases the last count lines in the terminal window.
-func clearLines(count int) {
- _, _ = fmt.Print(strings.Repeat(clearCode, count))
-}
diff --git a/lnd/cmd/lncli/cmd_profile.go b/lnd/cmd/lncli/cmd_profile.go
deleted file mode 100644
index f550cb09..00000000
--- a/lnd/cmd/lncli/cmd_profile.go
+++ /dev/null
@@ -1,450 +0,0 @@
-package main
-
-import (
- "fmt"
- "io/ioutil"
- "os"
- "path"
- "strings"
-
- "github.com/pkt-cash/pktd/btcutil"
- "github.com/pkt-cash/pktd/btcutil/er"
- "github.com/pkt-cash/pktd/lnd/lncfg"
- "github.com/urfave/cli"
- "gopkg.in/macaroon.v2"
-)
-
-var (
- // defaultLncliDir is the default directory to store the profile file
- // in. This defaults to:
- // C:\Users\\AppData\Local\Lncli\ on Windows
- // ~/.lncli/ on Linux
- // ~/Library/Application Support/Lncli/ on MacOS
- defaultLncliDir = btcutil.AppDataDir("lncli", false)
-
- // defaultProfileFile is the full, absolute path of the profile file.
- defaultProfileFile = path.Join(defaultLncliDir, "profiles.json")
-)
-
-var profileSubCommand = cli.Command{
- Name: "profile",
- Category: "Profiles",
- Usage: "Create and manage lncli profiles",
- Description: `
- Profiles for lncli are an easy and comfortable way to manage multiple
- nodes from the command line by storing node specific parameters like RPC
- host, network, TLS certificate path or macaroons in a named profile.
-
- To use a predefined profile, just use the '--profile=myprofile' (or
- short version '-p=myprofile') with any lncli command.
-
- A default profile can also be defined, lncli will then always use the
- connection/node parameters from that profile instead of the default
- values.
-
- WARNING: Setting a default profile changes the default behavior of
- lncli! To disable the use of the default profile for a single command,
- set '--profile= '.
-
- The profiles are stored in a file called profiles.json in the user's
- home directory, for example:
- C:\Users\\AppData\Local\Lncli\profiles.json on Windows
- ~/.lncli/profiles.json on Linux
- ~/Library/Application Support/Lncli/profiles.json on MacOS
- `,
- Subcommands: []cli.Command{
- profileListCommand,
- profileAddCommand,
- profileRemoveCommand,
- profileSetDefaultCommand,
- profileUnsetDefaultCommand,
- profileAddMacaroonCommand,
- },
-}
-
-var profileListCommand = cli.Command{
- Name: "list",
- Usage: "Lists all lncli profiles",
- Action: profileList,
-}
-
-func profileList(_ *cli.Context) er.R {
- f, err := loadProfileFile(defaultProfileFile)
- if err != nil {
- return err
- }
-
- printJSON(f)
- return nil
-}
-
-var profileAddCommand = cli.Command{
- Name: "add",
- Usage: "Add a new profile",
- ArgsUsage: "name",
- Description: `
- Add a new named profile to the main profiles.json. All global options
- (see 'lncli --help') passed into this command are stored in that named
- profile.
- `,
- Flags: []cli.Flag{
- cli.StringFlag{
- Name: "name",
- Usage: "the name of the new profile",
- },
- cli.BoolFlag{
- Name: "default",
- Usage: "set the new profile to be the default profile",
- },
- },
- Action: profileAdd,
-}
-
-func profileAdd(ctx *cli.Context) er.R {
- if ctx.NArg() == 0 && ctx.NumFlags() == 0 {
- return er.E(cli.ShowCommandHelp(ctx, "add"))
- }
-
- // Load the default profile file or create a new one if it doesn't exist
- // yet.
- f, err := loadProfileFile(defaultProfileFile)
- switch {
- case errNoProfileFile.Is(err):
- f = &profileFile{}
- _ = os.MkdirAll(path.Dir(defaultProfileFile), 0700)
-
- case err != nil:
- return err
- }
-
- // Create a profile struct from all the global options.
- profile, err := profileFromContext(ctx, true, false)
- if err != nil {
- return er.Errorf("could not load global options: %v", err)
- }
-
- // Finally, all that's left is to get the profile name from either
- // positional argument or flag.
- args := ctx.Args()
- switch {
- case ctx.IsSet("name"):
- profile.Name = ctx.String("name")
- case args.Present():
- profile.Name = args.First()
- default:
- return er.Errorf("name argument missing")
- }
-
- // Is there already a profile with that name?
- for _, p := range f.Profiles {
- if p.Name == profile.Name {
- return er.Errorf("a profile with the name %s already "+
- "exists", profile.Name)
- }
- }
-
- // Do we need to update the default entry to be this one?
- if ctx.Bool("default") {
- f.Default = profile.Name
- }
-
- // All done, store the updated profile file.
- f.Profiles = append(f.Profiles, profile)
- if err = saveProfileFile(defaultProfileFile, f); err != nil {
- return er.Errorf("error writing profile file %s: %v",
- defaultProfileFile, err)
- }
-
- fmt.Printf("Profile %s added to file %s.\n", profile.Name,
- defaultProfileFile)
- return nil
-}
-
-var profileRemoveCommand = cli.Command{
- Name: "remove",
- Usage: "Remove a profile",
- ArgsUsage: "name",
- Description: `Remove the specified profile from the profile file.`,
- Flags: []cli.Flag{
- cli.StringFlag{
- Name: "name",
- Usage: "the name of the profile to delete",
- },
- },
- Action: profileRemove,
-}
-
-func profileRemove(ctx *cli.Context) er.R {
- if ctx.NArg() == 0 && ctx.NumFlags() == 0 {
- return er.E(cli.ShowCommandHelp(ctx, "remove"))
- }
-
- // Load the default profile file.
- f, err := loadProfileFile(defaultProfileFile)
- if err != nil {
- return er.Errorf("could not load profile file: %v", err)
- }
-
- // Get the profile name from either positional argument or flag.
- var (
- args = ctx.Args()
- name string
- found = false
- )
- switch {
- case ctx.IsSet("name"):
- name = ctx.String("name")
- case args.Present():
- name = args.First()
- default:
- return er.Errorf("name argument missing")
- }
-
- // Create a copy of all profiles but don't include the one to delete.
- newProfiles := make([]*profileEntry, 0, len(f.Profiles)-1)
- for _, p := range f.Profiles {
- // Skip the one we want to delete.
- if p.Name == name {
- found = true
-
- if p.Name == f.Default {
- fmt.Println("Warning: removing default profile.")
- }
- continue
- }
-
- // Keep all others.
- newProfiles = append(newProfiles, p)
- }
-
- // If what we were looking for didn't exist in the first place, there's
- // no need for updating the file.
- if !found {
- return er.Errorf("profile with name %s not found in file",
- name)
- }
-
- // Great, everything updated, now let's save the file.
- f.Profiles = newProfiles
- return saveProfileFile(defaultProfileFile, f)
-}
-
-var profileSetDefaultCommand = cli.Command{
- Name: "setdefault",
- Usage: "Set the default profile",
- ArgsUsage: "name",
- Description: `
- Set a specified profile to be used as the default profile.
-
- WARNING: Setting a default profile changes the default behavior of
- lncli! To disable the use of the default profile for a single command,
- set '--profile= '.
- `,
- Flags: []cli.Flag{
- cli.StringFlag{
- Name: "name",
- Usage: "the name of the profile to set as default",
- },
- },
- Action: profileSetDefault,
-}
-
-func profileSetDefault(ctx *cli.Context) er.R {
- if ctx.NArg() == 0 && ctx.NumFlags() == 0 {
- return er.E(cli.ShowCommandHelp(ctx, "setdefault"))
- }
-
- // Load the default profile file.
- f, err := loadProfileFile(defaultProfileFile)
- if err != nil {
- return er.Errorf("could not load profile file: %v", err)
- }
-
- // Get the profile name from either positional argument or flag.
- var (
- args = ctx.Args()
- name string
- found = false
- )
- switch {
- case ctx.IsSet("name"):
- name = ctx.String("name")
- case args.Present():
- name = args.First()
- default:
- return er.Errorf("name argument missing")
- }
-
- // Make sure the new default profile actually exists.
- for _, p := range f.Profiles {
- if p.Name == name {
- found = true
- f.Default = p.Name
-
- break
- }
- }
-
- // If the default profile doesn't exist, there's no need for updating
- // the file.
- if !found {
- return er.Errorf("profile with name %s not found in file",
- name)
- }
-
- // Great, everything updated, now let's save the file.
- return saveProfileFile(defaultProfileFile, f)
-}
-
-var profileUnsetDefaultCommand = cli.Command{
- Name: "unsetdefault",
- Usage: "Unsets the default profile",
- Description: `
- Disables the use of a default profile and restores lncli to its original
- behavior.
- `,
- Action: profileUnsetDefault,
-}
-
-func profileUnsetDefault(_ *cli.Context) er.R {
- // Load the default profile file.
- f, err := loadProfileFile(defaultProfileFile)
- if err != nil {
- return er.Errorf("could not load profile file: %v", err)
- }
-
- // Save the file with the flag disabled.
- f.Default = ""
- return saveProfileFile(defaultProfileFile, f)
-}
-
-var profileAddMacaroonCommand = cli.Command{
- Name: "addmacaroon",
- Usage: "Add a macaroon to a profile's macaroon jar",
- ArgsUsage: "macaroon-name",
- Description: `
- Add an additional macaroon specified by the global option --macaroonpath
- to an existing profile's macaroon jar.
-
- If no profile is selected, the macaroon is added to the default profile
- (if one exists). To add a macaroon to a specific profile, use the global
- --profile=myprofile option.
-
- If multiple macaroons exist in a profile's macaroon jar, the one to use
- can be specified with the global option --macfromjar=xyz.
- `,
- Flags: []cli.Flag{
- cli.StringFlag{
- Name: "name",
- Usage: "the name of the macaroon",
- },
- cli.BoolFlag{
- Name: "default",
- Usage: "set the new macaroon to be the default " +
- "macaroon in the jar",
- },
- },
- Action: profileAddMacaroon,
-}
-
-func profileAddMacaroon(ctx *cli.Context) er.R {
- if ctx.NArg() == 0 && ctx.NumFlags() == 0 {
- return er.E(cli.ShowCommandHelp(ctx, "addmacaroon"))
- }
-
- // Load the default profile file or create a new one if it doesn't exist
- // yet.
- f, err := loadProfileFile(defaultProfileFile)
- if err != nil {
- return er.Errorf("could not load profile file: %v", err)
- }
-
- // Finally, all that's left is to get the profile name from either
- // positional argument or flag.
- var (
- args = ctx.Args()
- profileName string
- macName string
- )
- switch {
- case ctx.IsSet("name"):
- macName = ctx.String("name")
- case args.Present():
- macName = args.First()
- default:
- return er.Errorf("name argument missing")
- }
-
- // Make sure the user actually set a macaroon path to use.
- if !ctx.GlobalIsSet("macaroonpath") {
- return er.Errorf("macaroonpath global option missing")
- }
-
- // Find out which profile we should add the macaroon. The global flag
- // takes precedence over the default profile.
- if f.Default != "" {
- profileName = f.Default
- }
- if ctx.GlobalIsSet("profile") {
- profileName = ctx.GlobalString("profile")
- }
- if len(strings.TrimSpace(profileName)) == 0 {
- return er.Errorf("no profile specified and no default " +
- "profile exists")
- }
-
- // Is there a profile with that name?
- var selectedProfile *profileEntry
- for _, p := range f.Profiles {
- if p.Name == profileName {
- selectedProfile = p
- break
- }
- }
- if selectedProfile == nil {
- return er.Errorf("profile with name %s not found", profileName)
- }
-
- // Does a macaroon with that name already exist?
- for _, m := range selectedProfile.Macaroons.Jar {
- if m.Name == macName {
- return er.Errorf("a macaroon with the name %s "+
- "already exists", macName)
- }
- }
-
- // Do we need to update the default entry to be this one?
- if ctx.Bool("default") {
- selectedProfile.Macaroons.Default = macName
- }
-
- // Now load and possibly encrypt the macaroon file.
- macPath := lncfg.CleanAndExpandPath(ctx.GlobalString("macaroonpath"))
- macBytes, errr := ioutil.ReadFile(macPath)
- if errr != nil {
- return er.Errorf("unable to read macaroon path: %v", errr)
- }
- mac := &macaroon.Macaroon{}
- if errr = mac.UnmarshalBinary(macBytes); errr != nil {
- return er.Errorf("unable to decode macaroon: %v", errr)
- }
- macEntry := &macaroonEntry{
- Name: macName,
- }
- if err = macEntry.storeMacaroon(mac, nil); err != nil {
- return er.Errorf("unable to store macaroon: %v", err)
- }
-
- // All done, store the updated profile file.
- selectedProfile.Macaroons.Jar = append(
- selectedProfile.Macaroons.Jar, macEntry,
- )
- if err = saveProfileFile(defaultProfileFile, f); err != nil {
- return er.Errorf("error writing profile file %s: %v",
- defaultProfileFile, err)
- }
-
- fmt.Printf("Macaroon %s added to profile %s in file %s.\n", macName,
- selectedProfile.Name, defaultProfileFile)
- return nil
-}
diff --git a/lnd/cmd/lncli/cmd_query_mission_control.go b/lnd/cmd/lncli/cmd_query_mission_control.go
deleted file mode 100644
index 822abcf1..00000000
--- a/lnd/cmd/lncli/cmd_query_mission_control.go
+++ /dev/null
@@ -1,35 +0,0 @@
-package main
-
-import (
- "context"
-
- "github.com/pkt-cash/pktd/btcutil/er"
- "github.com/pkt-cash/pktd/lnd/lnrpc/routerrpc"
-
- "github.com/urfave/cli"
-)
-
-var queryMissionControlCommand = cli.Command{
- Name: "querymc",
- Category: "Payments",
- Usage: "Query the internal mission control state.",
- Action: actionDecorator(queryMissionControl),
-}
-
-func queryMissionControl(ctx *cli.Context) er.R {
- conn := getClientConn(ctx, false)
- defer conn.Close()
-
- client := routerrpc.NewRouterClient(conn)
-
- req := &routerrpc.QueryMissionControlRequest{}
- rpcCtx := context.Background()
- snapshot, errr := client.QueryMissionControl(rpcCtx, req)
- if errr != nil {
- return er.E(errr)
- }
-
- printRespJSON(snapshot)
-
- return nil
-}
diff --git a/lnd/cmd/lncli/cmd_query_probability.go b/lnd/cmd/lncli/cmd_query_probability.go
deleted file mode 100644
index 37db1359..00000000
--- a/lnd/cmd/lncli/cmd_query_probability.go
+++ /dev/null
@@ -1,68 +0,0 @@
-package main
-
-import (
- "context"
- "strconv"
-
- "github.com/pkt-cash/pktd/btcutil"
- "github.com/pkt-cash/pktd/btcutil/er"
- "github.com/pkt-cash/pktd/lnd/lnrpc/routerrpc"
- "github.com/pkt-cash/pktd/lnd/lnwire"
- "github.com/pkt-cash/pktd/lnd/routing/route"
- "github.com/urfave/cli"
-)
-
-var queryProbCommand = cli.Command{
- Name: "queryprob",
- Category: "Payments",
- Usage: "Estimate a success probability.",
- ArgsUsage: "from-node to-node amt",
- Action: actionDecorator(queryProb),
-}
-
-func queryProb(ctx *cli.Context) er.R {
- args := ctx.Args()
-
- if len(args) != 3 {
- return er.E(cli.ShowCommandHelp(ctx, "queryprob"))
- }
-
- fromNode, err := route.NewVertexFromStr(args.Get(0))
- if err != nil {
- return er.Errorf("invalid from node key: %v", err)
- }
-
- toNode, err := route.NewVertexFromStr(args.Get(1))
- if err != nil {
- return er.Errorf("invalid to node key: %v", err)
- }
-
- amtSat, errr := strconv.ParseUint(args.Get(2), 10, 64)
- if errr != nil {
- return er.Errorf("invalid amt: %v", errr)
- }
-
- amtMsat := lnwire.NewMSatFromSatoshis(
- btcutil.Amount(amtSat),
- )
-
- conn := getClientConn(ctx, false)
- defer conn.Close()
-
- client := routerrpc.NewRouterClient(conn)
-
- req := &routerrpc.QueryProbabilityRequest{
- FromNode: fromNode[:],
- ToNode: toNode[:],
- AmtMsat: int64(amtMsat),
- }
- rpcCtx := context.Background()
- response, errr := client.QueryProbability(rpcCtx, req)
- if errr != nil {
- return er.E(errr)
- }
-
- printRespJSON(response)
-
- return nil
-}
diff --git a/lnd/cmd/lncli/cmd_reset_mission_control.go b/lnd/cmd/lncli/cmd_reset_mission_control.go
deleted file mode 100644
index 95b02d77..00000000
--- a/lnd/cmd/lncli/cmd_reset_mission_control.go
+++ /dev/null
@@ -1,29 +0,0 @@
-package main
-
-import (
- "context"
-
- "github.com/pkt-cash/pktd/btcutil/er"
- "github.com/pkt-cash/pktd/lnd/lnrpc/routerrpc"
-
- "github.com/urfave/cli"
-)
-
-var resetMissionControlCommand = cli.Command{
- Name: "resetmc",
- Category: "Payments",
- Usage: "Reset internal mission control state.",
- Action: actionDecorator(resetMissionControl),
-}
-
-func resetMissionControl(ctx *cli.Context) er.R {
- conn := getClientConn(ctx, false)
- defer conn.Close()
-
- client := routerrpc.NewRouterClient(conn)
-
- req := &routerrpc.ResetMissionControlRequest{}
- rpcCtx := context.Background()
- _, errr := client.ResetMissionControl(rpcCtx, req)
- return er.E(errr)
-}
diff --git a/lnd/cmd/lncli/cmd_version.go b/lnd/cmd/lncli/cmd_version.go
deleted file mode 100644
index 399676fc..00000000
--- a/lnd/cmd/lncli/cmd_version.go
+++ /dev/null
@@ -1,51 +0,0 @@
-package main
-
-import (
- "context"
- "fmt"
-
- "github.com/pkt-cash/pktd/btcutil/er"
- "github.com/pkt-cash/pktd/lnd/lnrpc/lnclipb"
- "github.com/pkt-cash/pktd/lnd/lnrpc/verrpc"
- "github.com/pkt-cash/pktd/pktconfig/version"
- "github.com/urfave/cli"
-)
-
-var versionCommand = cli.Command{
- Name: "version",
- Usage: "Display lncli and lnd version info.",
- Description: `
- Returns version information about both lncli and lnd. If lncli is unable
- to connect to lnd, the command fails but still prints the lncli version.
- `,
- Action: actionDecorator(v),
-}
-
-func v(ctx *cli.Context) er.R {
- conn := getClientConn(ctx, false)
- defer conn.Close()
-
- versions := &lnclipb.VersionResponse{
- Lncli: &verrpc.Version{
- Version: version.Version(),
- AppMajor: uint32(version.AppMajorVersion()),
- AppMinor: uint32(version.AppMinorVersion()),
- AppPatch: uint32(version.AppPatchVersion()),
- AppPreRelease: fmt.Sprintf("%v", version.IsPrerelease()),
- },
- }
-
- client := verrpc.NewVersionerClient(conn)
-
- ctxb := context.Background()
- lndVersion, err := client.GetVersion(ctxb, &verrpc.VersionRequest{})
- if err != nil {
- printRespJSON(versions)
- return er.Errorf("unable fetch version from lnd: %v", err)
- }
- versions.Lnd = lndVersion
-
- printRespJSON(versions)
-
- return nil
-}
diff --git a/lnd/cmd/lncli/cmd_walletunlocker.go b/lnd/cmd/lncli/cmd_walletunlocker.go
deleted file mode 100644
index 850ac8a1..00000000
--- a/lnd/cmd/lncli/cmd_walletunlocker.go
+++ /dev/null
@@ -1,663 +0,0 @@
-package main
-
-import (
- "bufio"
- "bytes"
- "context"
- "encoding/hex"
- "fmt"
- "io/ioutil"
- "os"
- "strconv"
- "strings"
-
- "github.com/pkt-cash/pktd/btcutil/er"
- "github.com/pkt-cash/pktd/lnd/lncfg"
- "github.com/pkt-cash/pktd/lnd/lnrpc"
- "github.com/pkt-cash/pktd/lnd/walletunlocker"
- "github.com/pkt-cash/pktd/pktwallet/wallet/seedwords"
- "github.com/urfave/cli"
-)
-
-var (
- statelessInitFlag = cli.BoolFlag{
- Name: "stateless_init",
- Usage: "do not create any macaroon files in the file " +
- "system of the daemon",
- }
- saveToFlag = cli.StringFlag{
- Name: "save_to",
- Usage: "save returned admin macaroon to this file",
- }
-)
-
-var createCommand = cli.Command{
- Name: "create",
- Category: "Startup",
- Usage: "Initialize a wallet when starting lnd for the first time.",
- Description: `
- The create command is used to initialize an lnd wallet from scratch for
- the very first time. This is interactive command with one required
- argument (the password), and one optional argument (the mnemonic
- passphrase).
-
- The first argument (the password) is required and MUST be greater than
- 8 characters. This will be used to encrypt the wallet within lnd. This
- MUST be remembered as it will be required to fully start up the daemon.
-
- The second argument is an optional 24-word mnemonic derived from BIP
- 39. If provided, then the internal wallet will use the seed derived
- from this mnemonic to generate all keys.
-
- This command returns a 24-word seed in the scenario that NO mnemonic
- was provided by the user. This should be written down as it can be used
- to potentially recover all on-chain funds, and most off-chain funds as
- well.
-
- If the --stateless_init flag is set, no macaroon files are created by
- the daemon. Instead, the binary serialized admin macaroon is returned
- in the answer. This answer MUST be stored somewhere, otherwise all
- access to the RPC server will be lost and the wallet must be recreated
- to re-gain access.
- If the --save_to parameter is set, the macaroon is saved to this file,
- otherwise it is printed to standard out.
-
- Finally, it's also possible to use this command and a set of static
- channel backups to trigger a recover attempt for the provided Static
- Channel Backups. Only one of the three parameters will be accepted. See
- the restorechanbackup command for further details w.r.t the format
- accepted.
- `,
- Flags: []cli.Flag{
- cli.StringFlag{
- Name: "single_backup",
- Usage: "a hex encoded single channel backup obtained " +
- "from exportchanbackup",
- },
- cli.StringFlag{
- Name: "multi_backup",
- Usage: "a hex encoded multi-channel backup obtained " +
- "from exportchanbackup",
- },
- cli.StringFlag{
- Name: "multi_file",
- Usage: "the path to a multi-channel back up file",
- },
- statelessInitFlag,
- saveToFlag,
- },
- Action: actionDecorator(create),
-}
-
-// monowidthColumns takes a set of words, and the number of desired columns,
-// and returns a new set of words that have had white space appended to the
-// word in order to create a mono-width column.
-func monowidthColumns(words []string, ncols int) []string {
- // Determine max size of words in each column.
- colWidths := make([]int, ncols)
- for i, word := range words {
- col := i % ncols
- curWidth := colWidths[col]
- if len(word) > curWidth {
- colWidths[col] = len(word)
- }
- }
-
- // Append whitespace to each word to make columns mono-width.
- finalWords := make([]string, len(words))
- for i, word := range words {
- col := i % ncols
- width := colWidths[col]
-
- diff := width - len(word)
- finalWords[i] = word + strings.Repeat(" ", diff)
- }
-
- return finalWords
-}
-
-func create(ctx *cli.Context) er.R {
- ctxb := context.Background()
- client, cleanUp := getWalletUnlockerClient(ctx)
- defer cleanUp()
-
- var (
- chanBackups *lnrpc.ChanBackupSnapshot
-
- // We use var restoreSCB to track if we will be including an SCB
- // recovery in the init wallet request.
- restoreSCB = false
- )
-
- backups, err := parseChanBackups(ctx)
-
- // We'll check to see if the user provided any static channel backups (SCB),
- // if so, we will warn the user that SCB recovery closes all open channels
- // and ask them to confirm their intention.
- // If the user agrees, we'll add the SCB recovery onto the final init wallet
- // request.
- switch {
- // parseChanBackups returns an errMissingBackup error (which we ignore) if
- // the user did not request a SCB recovery.
- case errMissingChanBackup.Is(err):
-
- // Passed an invalid channel backup file.
- case err != nil:
- return er.Errorf("unable to parse chan backups: %v", err)
-
- // We have an SCB recovery option with a valid backup file.
- default:
-
- warningLoop:
- for {
-
- fmt.Println()
- fmt.Printf("WARNING: You are attempting to restore from a " +
- "static channel backup (SCB) file.\nThis action will CLOSE " +
- "all currently open channels, and you will pay on-chain fees." +
- "\n\nAre you sure you want to recover funds from a" +
- " static channel backup? (Enter y/n): ")
-
- reader := bufio.NewReader(os.Stdin)
- answer, err := reader.ReadString('\n')
- if err != nil {
- return er.E(err)
- }
-
- answer = strings.TrimSpace(answer)
- answer = strings.ToLower(answer)
-
- switch answer {
- case "y":
- restoreSCB = true
- break warningLoop
- case "n":
- fmt.Println("Aborting SCB recovery")
- return nil
- }
- }
- }
-
- // Proceed with SCB recovery.
- if restoreSCB {
- fmt.Println("Static Channel Backup (SCB) recovery selected!")
- if backups != nil {
- switch {
- case backups.GetChanBackups() != nil:
- singleBackup := backups.GetChanBackups()
- chanBackups = &lnrpc.ChanBackupSnapshot{
- SingleChanBackups: singleBackup,
- }
-
- case backups.GetMultiChanBackup() != nil:
- multiBackup := backups.GetMultiChanBackup()
- chanBackups = &lnrpc.ChanBackupSnapshot{
- MultiChanBackup: &lnrpc.MultiChanBackup{
- MultiChanBackup: multiBackup,
- },
- }
- }
- }
- }
-
- // Should the daemon be initialized stateless? Then we expect an answer
- // with the admin macaroon later. Because the --save_to is related to
- // stateless init, it doesn't make sense to be set on its own.
- statelessInit := ctx.Bool(statelessInitFlag.Name)
- if !statelessInit && ctx.IsSet(saveToFlag.Name) {
- return er.Errorf("cannot set save_to parameter without " +
- "stateless_init")
- }
-
- walletPassword, err := capturePassword(
- "Input wallet password: ", false, walletunlocker.ValidatePassword,
- )
- if err != nil {
- return err
- }
-
- // Next, we'll see if the user has 24-word mnemonic they want to use to
- // derive a seed within the wallet.
- var (
- hasMnemonic bool
- )
-
-mnemonicCheck:
- for {
- fmt.Println()
- fmt.Printf("Do you have an existing Pktwallet seed " +
- "you want to use? (Enter y/n): ")
-
- reader := bufio.NewReader(os.Stdin)
- answer, errr := reader.ReadString('\n')
- if errr != nil {
- return er.E(errr)
- }
-
- fmt.Println()
-
- answer = strings.TrimSpace(answer)
- answer = strings.ToLower(answer)
-
- switch answer {
- case "y":
- hasMnemonic = true
- break mnemonicCheck
- case "n":
- hasMnemonic = false
- break mnemonicCheck
- }
- }
-
- // If the user *does* have an existing seed they want to use, then
- // we'll read that in directly from the terminal.
- var (
- cipherSeedMnemonic []string
- aezeedPass []byte
- recoveryWindow int32
- )
- if hasMnemonic {
- fmt.Printf("Input your 15-word Pktwallet seed separated by spaces: ")
- reader := bufio.NewReader(os.Stdin)
- mnemonic, errr := reader.ReadString('\n')
- if errr != nil {
- return er.E(errr)
- }
-
- // We'll trim off extra spaces, and ensure the mnemonic is all
- // lower case, then populate our request.
- mnemonic = strings.TrimSpace(mnemonic)
- mnemonic = strings.ToLower(mnemonic)
-
- cipherSeedMnemonic = strings.Split(mnemonic, " ")
-
- fmt.Println()
-
- if len(cipherSeedMnemonic) != 15 {
- return er.Errorf("wrong cipher seed mnemonic "+
- "length: got %v words, expecting %v words",
- len(cipherSeedMnemonic), 15)
- }
-
- seedEnc, err := seedwords.SeedFromWords(mnemonic)
- if err != nil {
- return err
- }
- if seedEnc.NeedsPassphrase() {
- aezeedPass, err = readPassword("This seed is encrypted " +
- "with a passphrase please enter it now: ")
- }
-
- /// This should be automatic
- // for {
- // fmt.Println()
- // fmt.Printf("Input an optional address look-ahead "+
- // "used to scan for used keys (default %d): ",
- // defaultRecoveryWindow)
-
- // reader := bufio.NewReader(os.Stdin)
- // answer, errr := reader.ReadString('\n')
- // if errr != nil {
- // return er.E(errr)
- // }
-
- // fmt.Println()
-
- // answer = strings.TrimSpace(answer)
-
- // if len(answer) == 0 {
- // recoveryWindow = defaultRecoveryWindow
- // break
- // }
-
- // lookAhead, err := strconv.Atoi(answer)
- // if err != nil {
- // fmt.Printf("Unable to parse recovery "+
- // "window: %v\n", err)
- // continue
- // }
-
- // recoveryWindow = int32(lookAhead)
- // break
- // }
- } else {
- // Otherwise, if the user doesn't have a mnemonic that they
- // want to use, we'll generate a fresh one with the GenSeed
- // command.
- fmt.Println("Your cipher seed can optionally be encrypted.")
-
- instruction := "Input your passphrase if you wish to encrypt it " +
- "(or press enter to proceed without a cipher seed " +
- "passphrase): "
- aezeedPass, err = capturePassword(
- instruction, true, func(_ []byte) er.R { return nil },
- )
- if err != nil {
- return err
- }
-
- fmt.Println()
- fmt.Println("Generating fresh cipher seed...")
- fmt.Println()
-
- genSeedReq := &lnrpc.GenSeedRequest{
- AezeedPassphrase: aezeedPass,
- }
- seedResp, err := client.GenSeed(ctxb, genSeedReq)
- if err != nil {
- return er.Errorf("unable to generate seed: %v", err)
- }
-
- cipherSeedMnemonic = seedResp.CipherSeedMnemonic
- }
-
- // Before we initialize the wallet, we'll display the cipher seed to
- // the user so they can write it down.
-
- fmt.Println("!!!YOU MUST WRITE DOWN THIS SEED AND YOUR PASSWORD TO BE ABLE TO " +
- "RESTORE THE WALLET!!!")
- fmt.Println()
-
- fmt.Println("---------------BEGIN LND CIPHER SEED---------------")
-
- fmt.Printf("%v\n", strings.Join(cipherSeedMnemonic, " "))
-
- fmt.Println("---------------END LND CIPHER SEED-----------------")
-
- fmt.Println("\n!!!YOU MUST WRITE DOWN THIS SEED AND YOUR PASSWORD TO BE ABLE TO " +
- "RESTORE THE WALLET!!!")
-
- // With either the user's prior cipher seed, or a newly generated one,
- // we'll go ahead and initialize the wallet.
- req := &lnrpc.InitWalletRequest{
- WalletPassword: walletPassword,
- CipherSeedMnemonic: cipherSeedMnemonic,
- AezeedPassphrase: aezeedPass,
- RecoveryWindow: recoveryWindow,
- ChannelBackups: chanBackups,
- StatelessInit: statelessInit,
- }
- response, errr := client.InitWallet(ctxb, req)
- if errr != nil {
- return er.E(errr)
- }
-
- fmt.Println("\npld successfully initialized!")
-
- if statelessInit {
- return storeOrPrintAdminMac(ctx, response.AdminMacaroon)
- }
-
- return nil
-}
-
-// capturePassword returns a password value that has been entered twice by the
-// user, to ensure that the user knows what password they have entered. The user
-// will be prompted to retry until the passwords match. If the optional param is
-// true, the function may return an empty byte array if the user opts against
-// using a password.
-func capturePassword(instruction string, optional bool,
- validate func([]byte) er.R) ([]byte, er.R) {
-
- for {
- password, err := readPassword(instruction)
- if err != nil {
- return nil, err
- }
-
- // Do not require users to repeat password if
- // it is optional and they are not using one.
- if len(password) == 0 && optional {
- return nil, nil
- }
-
- // If the password provided is not valid, restart
- // password capture process from the beginning.
- if err := validate(password); err != nil {
- fmt.Println(err.String())
- fmt.Println()
- continue
- }
-
- passwordConfirmed, err := readPassword("Confirm password: ")
- if err != nil {
- return nil, err
- }
-
- if bytes.Equal(password, passwordConfirmed) {
- return password, nil
- }
-
- fmt.Println("Passwords don't match, please try again")
- fmt.Println()
- }
-}
-
-var unlockCommand = cli.Command{
- Name: "unlock",
- Category: "Startup",
- Usage: "Unlock an encrypted wallet at startup.",
- Description: `
- The unlock command is used to decrypt lnd's wallet state in order to
- start up. This command MUST be run after booting up lnd before it's
- able to carry out its duties. An exception is if a user is running with
- --noseedbackup, then a default passphrase will be used.
-
- If the --stateless_init flag is set, no macaroon files are created by
- the daemon. This should be set for every unlock if the daemon was
- initially initialized stateless. Otherwise the daemon will create
- unencrypted macaroon files which could leak information to the system
- that the daemon runs on.
- `,
- Flags: []cli.Flag{
- cli.IntFlag{
- Name: "recovery_window",
- Usage: "address lookahead to resume recovery rescan, " +
- "value should be non-zero -- To recover all " +
- "funds, this should be greater than the " +
- "maximum number of consecutive, unused " +
- "addresses ever generated by the wallet.",
- },
- cli.BoolFlag{
- Name: "stdin",
- Usage: "read password from standard input instead of " +
- "prompting for it. THIS IS CONSIDERED TO " +
- "BE DANGEROUS if the password is located in " +
- "a file that can be read by another user. " +
- "This flag should only be used in " +
- "combination with some sort of password " +
- "manager or secrets vault.",
- },
- statelessInitFlag,
- },
- Action: actionDecorator(unlock),
-}
-
-func unlock(ctx *cli.Context) er.R {
- ctxb := context.Background()
- client, cleanUp := getWalletUnlockerClient(ctx)
- defer cleanUp()
-
- var (
- pw []byte
- errr error
- err er.R
- )
- switch {
- // Read the password from standard in as if it were a file. This should
- // only be used if the password is piped into lncli from some sort of
- // password manager. If the user types the password instead, it will be
- // echoed in the console.
- case ctx.IsSet("stdin"):
- reader := bufio.NewReader(os.Stdin)
- pw, errr = reader.ReadBytes('\n')
-
- // Remove carriage return and newline characters.
- pw = bytes.Trim(pw, "\r\n")
-
- // Read the password from a terminal by default. This requires the
- // terminal to be a real tty and will fail if a string is piped into
- // lncli.
- default:
- pw, err = readPassword("Input wallet password: ")
- }
- if err != nil {
- return err
- }
- if errr != nil {
- return er.E(errr)
- }
-
- args := ctx.Args()
-
- // Parse the optional recovery window if it is specified. By default,
- // the recovery window will be 0, indicating no lookahead should be
- // used.
- var recoveryWindow int32
- switch {
- case ctx.IsSet("recovery_window"):
- recoveryWindow = int32(ctx.Int64("recovery_window"))
- case args.Present():
- window, errr := strconv.ParseInt(args.First(), 10, 64)
- if errr != nil {
- return er.E(errr)
- }
- recoveryWindow = int32(window)
- }
-
- req := &lnrpc.UnlockWalletRequest{
- WalletPassword: pw,
- RecoveryWindow: recoveryWindow,
- StatelessInit: ctx.Bool(statelessInitFlag.Name),
- }
- _, errr = client.UnlockWallet(ctxb, req)
- if errr != nil {
- return er.E(errr)
- }
-
- fmt.Println("\nlnd successfully unlocked!")
-
- // TODO(roasbeef): add ability to accept hex single and multi backups
-
- return nil
-}
-
-var changePasswordCommand = cli.Command{
- Name: "changepassword",
- Category: "Startup",
- Usage: "Change an encrypted wallet's password at startup.",
- Description: `
- The changepassword command is used to Change lnd's encrypted wallet's
- password. It will automatically unlock the daemon if the password change
- is successful.
-
- If one did not specify a password for their wallet (running lnd with
- --noseedbackup), one must restart their daemon without
- --noseedbackup and use this command. The "current password" field
- should be left empty.
-
- If the daemon was originally initialized stateless, then the
- --stateless_init flag needs to be set for the change password request
- as well! Otherwise the daemon will generate unencrypted macaroon files
- in its file system again and possibly leak sensitive information.
- Changing the password will by default not change the macaroon root key
- (just re-encrypt the macaroon database with the new password). So all
- macaroons will still be valid.
- If one wants to make sure that all previously created macaroons are
- invalidated, a new macaroon root key can be generated by using the
- --new_mac_root_key flag.
-
- After a successful password change with the --stateless_init flag set,
- the current or new admin macaroon is returned binary serialized in the
- answer. This answer MUST then be stored somewhere, otherwise
- all access to the RPC server will be lost and the wallet must be re-
- created to re-gain access. If the --save_to parameter is set, the
- macaroon is saved to this file, otherwise it is printed to standard out.
- `,
- Flags: []cli.Flag{
- statelessInitFlag,
- saveToFlag,
- cli.BoolFlag{
- Name: "new_mac_root_key",
- Usage: "rotate the macaroon root key resulting in " +
- "all previously created macaroons to be " +
- "invalidated",
- },
- },
- Action: actionDecorator(changePassword),
-}
-
-func changePassword(ctx *cli.Context) er.R {
- ctxb := context.Background()
- client, cleanUp := getWalletUnlockerClient(ctx)
- defer cleanUp()
-
- currentPw, err := readPassword("Input current wallet password: ")
- if err != nil {
- return err
- }
-
- newPw, err := readPassword("Input new wallet password: ")
- if err != nil {
- return err
- }
-
- confirmPw, err := readPassword("Confirm new wallet password: ")
- if err != nil {
- return err
- }
-
- if !bytes.Equal(newPw, confirmPw) {
- return er.Errorf("passwords don't match")
- }
-
- // Should the daemon be initialized stateless? Then we expect an answer
- // with the admin macaroon later. Because the --save_to is related to
- // stateless init, it doesn't make sense to be set on its own.
- statelessInit := ctx.Bool(statelessInitFlag.Name)
- if !statelessInit && ctx.IsSet(saveToFlag.Name) {
- return er.Errorf("cannot set save_to parameter without " +
- "stateless_init")
- }
-
- req := &lnrpc.ChangePasswordRequest{
- CurrentPassword: currentPw,
- NewPassword: newPw,
- StatelessInit: statelessInit,
- NewMacaroonRootKey: ctx.Bool("new_mac_root_key"),
- }
-
- response, errr := client.ChangePassword(ctxb, req)
- if errr != nil {
- return er.E(errr)
- }
-
- if statelessInit {
- return storeOrPrintAdminMac(ctx, response.AdminMacaroon)
- }
-
- return nil
-}
-
-// storeOrPrintAdminMac either stores the admin macaroon to a file specified or
-// prints it to standard out, depending on the user flags set.
-func storeOrPrintAdminMac(ctx *cli.Context, adminMac []byte) er.R {
- // The user specified the optional --save_to parameter. We'll save the
- // macaroon to that file.
- if ctx.IsSet("save_to") {
- macSavePath := lncfg.CleanAndExpandPath(ctx.String("save_to"))
- err := ioutil.WriteFile(macSavePath, adminMac, 0644)
- if err != nil {
- _ = os.Remove(macSavePath)
- return er.E(err)
- }
- fmt.Printf("Admin macaroon saved to %s\n", macSavePath)
- return nil
- }
-
- // Otherwise we just print it. The user MUST store this macaroon
- // somewhere so we either save it to a provided file path or just print
- // it to standard output.
- fmt.Printf("Admin macaroon: %s\n", hex.EncodeToString(adminMac))
- return nil
-}
diff --git a/lnd/cmd/lncli/commands.go b/lnd/cmd/lncli/commands.go
deleted file mode 100644
index 2ccd8941..00000000
--- a/lnd/cmd/lncli/commands.go
+++ /dev/null
@@ -1,2844 +0,0 @@
-package main
-
-import (
- "bufio"
- "bytes"
- "context"
- "fmt"
- "io"
- "io/ioutil"
- "math"
- "os"
- "strconv"
- "strings"
- "sync"
- "time"
-
- "github.com/lightninglabs/protobuf-hex-display/json"
- "github.com/lightninglabs/protobuf-hex-display/jsonpb"
- "github.com/lightninglabs/protobuf-hex-display/proto"
- "github.com/pkt-cash/pktd/btcutil/er"
- "github.com/pkt-cash/pktd/btcutil/util"
- "github.com/pkt-cash/pktd/chaincfg/chainhash"
- "github.com/pkt-cash/pktd/lnd/lnrpc"
- "github.com/pkt-cash/pktd/lnd/routing/route"
- "github.com/pkt-cash/pktd/wire"
- "github.com/urfave/cli"
- "google.golang.org/grpc/codes"
- "google.golang.org/grpc/status"
-)
-
-// TODO(roasbeef): cli logic for supporting both positional and unix style
-// arguments.
-
-// TODO(roasbeef): expose all fee conf targets
-
-const defaultRecoveryWindow int32 = 2500
-
-const (
- defaultUtxoMinConf = 1
-)
-
-func printJSON(resp interface{}) {
- b, err := json.Marshal(resp)
- if err != nil {
- fatal(er.E(err))
- }
-
- var out bytes.Buffer
- json.Indent(&out, b, "", "\t")
- out.WriteString("\n")
- out.WriteTo(os.Stdout)
-}
-
-func printRespJSON(resp proto.Message) {
- jsonMarshaler := &jsonpb.Marshaler{
- EmitDefaults: true,
- OrigName: true,
- Indent: " ",
- }
-
- jsonStr, err := jsonMarshaler.MarshalToString(resp)
- if err != nil {
- fmt.Println("unable to decode response: ", err)
- return
- }
-
- fmt.Println(jsonStr)
-}
-
-// actionDecorator is used to add additional information and error handling
-// to command actions.
-func actionDecorator(f func(*cli.Context) er.R) func(*cli.Context) er.R {
- return func(c *cli.Context) er.R {
- if err := f(c); err != nil {
- s, ok := status.FromError(er.Wrapped(err))
-
- // If it's a command for the UnlockerService (like
- // 'create' or 'unlock') but the wallet is already
- // unlocked, then these methods aren't recognized any
- // more because this service is shut down after
- // successful unlock. That's why the code
- // 'Unimplemented' means something different for these
- // two commands.
- if s.Code() == codes.Unimplemented &&
- (c.Command.Name == "create" ||
- c.Command.Name == "unlock") {
- return er.Errorf("Wallet is already unlocked")
- }
-
- // lnd might be active, but not possible to contact
- // using RPC if the wallet is encrypted. If we get
- // error code Unimplemented, it means that lnd is
- // running, but the RPC server is not active yet (only
- // WalletUnlocker server active) and most likely this
- // is because of an encrypted wallet.
- // exclude getinfo in order to work even when wallet is locked
- if ok && s.Code() == codes.Unimplemented && c.Command.Name != "getinfo" {
- return er.Errorf("Wallet is encrypted. " +
- "Please unlock using 'lncli unlock', " +
- "or set password using 'lncli create'" +
- " if this is the first time starting " +
- "lnd.")
- }
- return err
- }
- return nil
- }
-}
-
-var newAddressCommand = cli.Command{
- Name: "newaddress",
- Category: "Wallet",
- Usage: "Generates a new address.",
- ArgsUsage: "address-type",
- Description: `
- Generate a wallet new address. Address-types has to be one of:
- - p2wkh: Pay to witness key hash
- - np2wkh: Pay to nested witness key hash`,
- Action: actionDecorator(newAddress),
-}
-
-func newAddress(ctx *cli.Context) er.R {
- client, cleanUp := getClient(ctx)
- defer cleanUp()
-
- stringAddrType := ctx.Args().First()
-
- // Map the string encoded address type, to the concrete typed address
- // type enum. An unrecognized address type will result in an error.
- var addrType lnrpc.AddressType
- switch stringAddrType { // TODO(roasbeef): make them ints on the cli?
- case "p2wkh":
- addrType = lnrpc.AddressType_WITNESS_PUBKEY_HASH
- case "np2wkh":
- addrType = lnrpc.AddressType_NESTED_PUBKEY_HASH
- default:
- return er.Errorf("invalid address type %v, support address type "+
- "are: p2wkh and np2wkh", stringAddrType)
- }
-
- ctxb := context.Background()
- addr, err := client.NewAddress(ctxb, &lnrpc.NewAddressRequest{
- Type: addrType,
- })
- if err != nil {
- return er.E(err)
- }
-
- printRespJSON(addr)
- return nil
-}
-
-var estimateFeeCommand = cli.Command{
- Name: "estimatefee",
- Category: "On-chain",
- Usage: "Get fee estimates for sending bitcoin on-chain to multiple addresses.",
- ArgsUsage: "send-json-string [--conf_target=N]",
- Description: `
- Get fee estimates for sending a transaction paying the specified amount(s) to the passed address(es).
-
- The send-json-string' param decodes addresses and the amount to send respectively in the following format:
-
- '{"ExampleAddr": NumCoinsInSatoshis, "SecondAddr": NumCoins}'
- `,
- Flags: []cli.Flag{
- cli.Int64Flag{
- Name: "conf_target",
- Usage: "(optional) the number of blocks that the transaction *should* " +
- "confirm in",
- },
- },
- Action: actionDecorator(estimateFees),
-}
-
-func estimateFees(ctx *cli.Context) er.R {
- var amountToAddr map[string]int64
-
- jsonMap := ctx.Args().First()
- if err := json.Unmarshal([]byte(jsonMap), &amountToAddr); err != nil {
- return er.E(err)
- }
-
- ctxb := context.Background()
- client, cleanUp := getClient(ctx)
- defer cleanUp()
-
- resp, err := client.EstimateFee(ctxb, &lnrpc.EstimateFeeRequest{
- AddrToAmount: amountToAddr,
- TargetConf: int32(ctx.Int64("conf_target")),
- })
- if err != nil {
- return er.E(err)
- }
-
- printRespJSON(resp)
- return nil
-}
-
-var txLabelFlag = cli.StringFlag{
- Name: "label",
- Usage: "(optional) a label for the transaction",
-}
-
-var sendCoinsCommand = cli.Command{
- Name: "sendcoins",
- Category: "On-chain",
- Usage: "Send bitcoin on-chain to an address.",
- ArgsUsage: "addr amt",
- Description: `
- Send amt coins in satoshis to the base58 or bech32 encoded bitcoin address addr.
-
- Fees used when sending the transaction can be specified via the --conf_target, or
- --sat_per_byte optional flags.
-
- Positional arguments and flags can be used interchangeably but not at the same time!
- `,
- Flags: []cli.Flag{
- cli.StringFlag{
- Name: "addr",
- Usage: "the base58 or bech32 encoded bitcoin address to send coins " +
- "to on-chain",
- },
- cli.BoolFlag{
- Name: "sweepall",
- Usage: "if set, then the amount field will be ignored, " +
- "and the wallet will attempt to sweep all " +
- "outputs within the wallet to the target " +
- "address",
- },
- cli.Int64Flag{
- Name: "amt",
- Usage: "the number of bitcoin denominated in satoshis to send",
- },
- cli.Int64Flag{
- Name: "conf_target",
- Usage: "(optional) the number of blocks that the " +
- "transaction *should* confirm in, will be " +
- "used for fee estimation",
- },
- cli.Int64Flag{
- Name: "sat_per_byte",
- Usage: "(optional) a manual fee expressed in " +
- "sat/byte that should be used when crafting " +
- "the transaction",
- },
- cli.Uint64Flag{
- Name: "min_confs",
- Usage: "(optional) the minimum number of confirmations " +
- "each one of your outputs used for the transaction " +
- "must satisfy",
- Value: defaultUtxoMinConf,
- },
- txLabelFlag,
- },
- Action: actionDecorator(sendCoins),
-}
-
-func sendCoins(ctx *cli.Context) er.R {
- var (
- addr string
- amt int64
- err error
- )
- args := ctx.Args()
-
- if ctx.NArg() == 0 && ctx.NumFlags() == 0 {
- cli.ShowCommandHelp(ctx, "sendcoins")
- return nil
- }
-
- if ctx.IsSet("conf_target") && ctx.IsSet("sat_per_byte") {
- return er.Errorf("either conf_target or sat_per_byte should be " +
- "set, but not both")
- }
-
- switch {
- case ctx.IsSet("addr"):
- addr = ctx.String("addr")
- case args.Present():
- addr = args.First()
- args = args.Tail()
- default:
- return er.Errorf("Address argument missing")
- }
-
- switch {
- case ctx.IsSet("amt"):
- amt = ctx.Int64("amt")
- case args.Present():
- amt, err = strconv.ParseInt(args.First(), 10, 64)
- case !ctx.Bool("sweepall"):
- return er.Errorf("Amount argument missing")
- }
- if err != nil {
- return er.Errorf("unable to decode amount: %v", err)
- }
-
- if amt != 0 && ctx.Bool("sweepall") {
- return er.Errorf("amount cannot be set if attempting to " +
- "sweep all coins out of the wallet")
- }
-
- ctxb := context.Background()
- client, cleanUp := getClient(ctx)
- defer cleanUp()
-
- minConfs := int32(ctx.Uint64("min_confs"))
- req := &lnrpc.SendCoinsRequest{
- Addr: addr,
- Amount: amt,
- TargetConf: int32(ctx.Int64("conf_target")),
- SatPerByte: ctx.Int64("sat_per_byte"),
- SendAll: ctx.Bool("sweepall"),
- Label: ctx.String(txLabelFlag.Name),
- MinConfs: minConfs,
- SpendUnconfirmed: minConfs == 0,
- }
- txid, err := client.SendCoins(ctxb, req)
- if err != nil {
- return er.E(err)
- }
-
- printRespJSON(txid)
- return nil
-}
-
-var listUnspentCommand = cli.Command{
- Name: "listunspent",
- Category: "On-chain",
- Usage: "List utxos available for spending.",
- ArgsUsage: "[min-confs [max-confs]] [--unconfirmed_only]",
- Description: `
- For each spendable utxo currently in the wallet, with at least min_confs
- confirmations, and at most max_confs confirmations, lists the txid,
- index, amount, address, address type, scriptPubkey and number of
- confirmations. Use --min_confs=0 to include unconfirmed coins. To list
- all coins with at least min_confs confirmations, omit the second
- argument or flag '--max_confs'. To list all confirmed and unconfirmed
- coins, no arguments are required. To see only unconfirmed coins, use
- '--unconfirmed_only' with '--min_confs' and '--max_confs' set to zero or
- not present.
- `,
- Flags: []cli.Flag{
- cli.Int64Flag{
- Name: "min_confs",
- Usage: "the minimum number of confirmations for a utxo",
- },
- cli.Int64Flag{
- Name: "max_confs",
- Usage: "the maximum number of confirmations for a utxo",
- },
- cli.BoolFlag{
- Name: "unconfirmed_only",
- Usage: "when min_confs and max_confs are zero, " +
- "setting false implicitly overrides max_confs " +
- "to be MaxInt32, otherwise max_confs remains " +
- "zero. An error is returned if the value is " +
- "true and both min_confs and max_confs are " +
- "non-zero. (default: false)",
- },
- },
- Action: actionDecorator(listUnspent),
-}
-
-func listUnspent(ctx *cli.Context) er.R {
- var (
- minConfirms int64
- maxConfirms int64
- err error
- )
- args := ctx.Args()
-
- if ctx.IsSet("max_confs") && !ctx.IsSet("min_confs") {
- return er.Errorf("max_confs cannot be set without " +
- "min_confs being set")
- }
-
- switch {
- case ctx.IsSet("min_confs"):
- minConfirms = ctx.Int64("min_confs")
- case args.Present():
- minConfirms, err = strconv.ParseInt(args.First(), 10, 64)
- if err != nil {
- cli.ShowCommandHelp(ctx, "listunspent")
- return nil
- }
- args = args.Tail()
- }
-
- switch {
- case ctx.IsSet("max_confs"):
- maxConfirms = ctx.Int64("max_confs")
- case args.Present():
- maxConfirms, err = strconv.ParseInt(args.First(), 10, 64)
- if err != nil {
- cli.ShowCommandHelp(ctx, "listunspent")
- return nil
- }
- args = args.Tail()
- }
-
- unconfirmedOnly := ctx.Bool("unconfirmed_only")
-
- // Force minConfirms and maxConfirms to be zero if unconfirmedOnly is
- // true.
- if unconfirmedOnly && (minConfirms != 0 || maxConfirms != 0) {
- cli.ShowCommandHelp(ctx, "listunspent")
- return nil
- }
-
- // When unconfirmedOnly is inactive, we will override maxConfirms to be
- // a MaxInt32 to return all confirmed and unconfirmed utxos.
- if maxConfirms == 0 && !unconfirmedOnly {
- maxConfirms = math.MaxInt32
- }
-
- ctxb := context.Background()
- client, cleanUp := getClient(ctx)
- defer cleanUp()
-
- req := &lnrpc.ListUnspentRequest{
- MinConfs: int32(minConfirms),
- MaxConfs: int32(maxConfirms),
- }
- resp, err := client.ListUnspent(ctxb, req)
- if err != nil {
- return er.E(err)
- }
-
- // Parse the response into the final json object that will be printed
- // to stdout. At the moment, this filters out the raw txid bytes from
- // each utxo's outpoint and only prints the txid string.
- var listUnspentResp = struct {
- Utxos []*Utxo `json:"utxos"`
- }{
- Utxos: make([]*Utxo, 0, len(resp.Utxos)),
- }
- for _, protoUtxo := range resp.Utxos {
- utxo := NewUtxoFromProto(protoUtxo)
- listUnspentResp.Utxos = append(listUnspentResp.Utxos, utxo)
- }
-
- printJSON(listUnspentResp)
-
- return nil
-}
-
-var sendManyCommand = cli.Command{
- Name: "sendmany",
- Category: "On-chain",
- Usage: "Send bitcoin on-chain to multiple addresses.",
- ArgsUsage: "send-json-string [--conf_target=N] [--sat_per_byte=P]",
- Description: `
- Create and broadcast a transaction paying the specified amount(s) to the passed address(es).
-
- The send-json-string' param decodes addresses and the amount to send
- respectively in the following format:
-
- '{"ExampleAddr": NumCoinsInSatoshis, "SecondAddr": NumCoins}'
- `,
- Flags: []cli.Flag{
- cli.Int64Flag{
- Name: "conf_target",
- Usage: "(optional) the number of blocks that the transaction *should* " +
- "confirm in, will be used for fee estimation",
- },
- cli.Int64Flag{
- Name: "sat_per_byte",
- Usage: "(optional) a manual fee expressed in sat/byte that should be " +
- "used when crafting the transaction",
- },
- cli.Uint64Flag{
- Name: "min_confs",
- Usage: "(optional) the minimum number of confirmations " +
- "each one of your outputs used for the transaction " +
- "must satisfy",
- Value: defaultUtxoMinConf,
- },
- txLabelFlag,
- },
- Action: actionDecorator(sendMany),
-}
-
-func sendMany(ctx *cli.Context) er.R {
- var amountToAddr map[string]int64
-
- jsonMap := ctx.Args().First()
- if err := json.Unmarshal([]byte(jsonMap), &amountToAddr); err != nil {
- return er.E(err)
- }
-
- if ctx.IsSet("conf_target") && ctx.IsSet("sat_per_byte") {
- return er.Errorf("either conf_target or sat_per_byte should be " +
- "set, but not both")
- }
-
- ctxb := context.Background()
- client, cleanUp := getClient(ctx)
- defer cleanUp()
-
- minConfs := int32(ctx.Uint64("min_confs"))
- txid, err := client.SendMany(ctxb, &lnrpc.SendManyRequest{
- AddrToAmount: amountToAddr,
- TargetConf: int32(ctx.Int64("conf_target")),
- SatPerByte: ctx.Int64("sat_per_byte"),
- Label: ctx.String(txLabelFlag.Name),
- MinConfs: minConfs,
- SpendUnconfirmed: minConfs == 0,
- })
- if err != nil {
- return er.E(err)
- }
-
- printRespJSON(txid)
- return nil
-}
-
-var connectCommand = cli.Command{
- Name: "connect",
- Category: "Peers",
- Usage: "Connect to a remote lnd peer.",
- ArgsUsage: "@host",
- Description: `
- Connect to a peer using its and host.
-
- A custom timeout on the connection is supported. For instance, to timeout
- the connection request in 30 seconds, use the following:
-
- lncli connect @host --timeout 30s
- `,
- Flags: []cli.Flag{
- cli.BoolFlag{
- Name: "perm",
- Usage: "If set, the daemon will attempt to persistently " +
- "connect to the target peer.\n" +
- " If not, the call will be synchronous.",
- },
- cli.DurationFlag{
- Name: "timeout",
- Usage: "The connection timeout value for current request. " +
- "Valid uints are {ms, s, m, h}.\n" +
- "If not set, the global connection " +
- "timeout value (default to 120s) is used.",
- },
- },
- Action: actionDecorator(connectPeer),
-}
-
-func connectPeer(ctx *cli.Context) er.R {
- ctxb := context.Background()
- client, cleanUp := getClient(ctx)
- defer cleanUp()
-
- targetAddress := ctx.Args().First()
- splitAddr := strings.Split(targetAddress, "@")
- if len(splitAddr) != 2 {
- return er.Errorf("target address expected in format: " +
- "pubkey@host:port")
- }
-
- addr := &lnrpc.LightningAddress{
- Pubkey: splitAddr[0],
- Host: splitAddr[1],
- }
- req := &lnrpc.ConnectPeerRequest{
- Addr: addr,
- Perm: ctx.Bool("perm"),
- Timeout: uint64(ctx.Duration("timeout").Seconds()),
- }
-
- lnid, err := client.ConnectPeer(ctxb, req)
- if err != nil {
- return er.E(err)
- }
-
- printRespJSON(lnid)
- return nil
-}
-
-var disconnectCommand = cli.Command{
- Name: "disconnect",
- Category: "Peers",
- Usage: "Disconnect a remote lnd peer identified by public key.",
- ArgsUsage: "",
- Flags: []cli.Flag{
- cli.StringFlag{
- Name: "node_key",
- Usage: "The hex-encoded compressed public key of the peer " +
- "to disconnect from",
- },
- },
- Action: actionDecorator(disconnectPeer),
-}
-
-func disconnectPeer(ctx *cli.Context) er.R {
- ctxb := context.Background()
- client, cleanUp := getClient(ctx)
- defer cleanUp()
-
- var pubKey string
- switch {
- case ctx.IsSet("node_key"):
- pubKey = ctx.String("node_key")
- case ctx.Args().Present():
- pubKey = ctx.Args().First()
- default:
- return er.Errorf("must specify target public key")
- }
-
- req := &lnrpc.DisconnectPeerRequest{
- PubKey: pubKey,
- }
-
- lnid, err := client.DisconnectPeer(ctxb, req)
- if err != nil {
- return er.E(err)
- }
-
- printRespJSON(lnid)
- return nil
-}
-
-// TODO(roasbeef): also allow short relative channel ID.
-
-var closeChannelCommand = cli.Command{
- Name: "closechannel",
- Category: "Channels",
- Usage: "Close an existing channel.",
- Description: `
- Close an existing channel. The channel can be closed either cooperatively,
- or unilaterally (--force).
-
- A unilateral channel closure means that the latest commitment
- transaction will be broadcast to the network. As a result, any settled
- funds will be time locked for a few blocks before they can be spent.
-
- In the case of a cooperative closure, one can manually set the fee to
- be used for the closing transaction via either the --conf_target or
- --sat_per_byte arguments. This will be the starting value used during
- fee negotiation. This is optional.
-
- In the case of a cooperative closure, one can manually set the address
- to deliver funds to upon closure. This is optional, and may only be used
- if an upfront shutdown address has not already been set. If neither are
- set the funds will be delivered to a new wallet address.
-
- To view which funding_txids/output_indexes can be used for a channel close,
- see the channel_point values within the listchannels command output.
- The format for a channel_point is 'funding_txid:output_index'.`,
- ArgsUsage: "funding_txid [output_index]",
- Flags: []cli.Flag{
- cli.StringFlag{
- Name: "funding_txid",
- Usage: "the txid of the channel's funding transaction",
- },
- cli.IntFlag{
- Name: "output_index",
- Usage: "the output index for the funding output of the funding " +
- "transaction",
- },
- cli.BoolFlag{
- Name: "force",
- Usage: "attempt an uncooperative closure",
- },
- cli.BoolFlag{
- Name: "block",
- Usage: "block until the channel is closed",
- },
- cli.Int64Flag{
- Name: "conf_target",
- Usage: "(optional) the number of blocks that the " +
- "transaction *should* confirm in, will be " +
- "used for fee estimation",
- },
- cli.Int64Flag{
- Name: "sat_per_byte",
- Usage: "(optional) a manual fee expressed in " +
- "sat/byte that should be used when crafting " +
- "the transaction",
- },
- cli.StringFlag{
- Name: "delivery_addr",
- Usage: "(optional) an address to deliver funds " +
- "upon cooperative channel closing, may only " +
- "be used if an upfront shutdown address is not " +
- "already set",
- },
- },
- Action: actionDecorator(closeChannel),
-}
-
-func closeChannel(ctx *cli.Context) er.R {
- client, cleanUp := getClient(ctx)
- defer cleanUp()
-
- // Show command help if no arguments and flags were provided.
- if ctx.NArg() == 0 && ctx.NumFlags() == 0 {
- cli.ShowCommandHelp(ctx, "closechannel")
- return nil
- }
-
- channelPoint, err := parseChannelPoint(ctx)
- if err != nil {
- return err
- }
-
- // TODO(roasbeef): implement time deadline within server
- req := &lnrpc.CloseChannelRequest{
- ChannelPoint: channelPoint,
- Force: ctx.Bool("force"),
- TargetConf: int32(ctx.Int64("conf_target")),
- SatPerByte: ctx.Int64("sat_per_byte"),
- DeliveryAddress: ctx.String("delivery_addr"),
- }
-
- // After parsing the request, we'll spin up a goroutine that will
- // retrieve the closing transaction ID when attempting to close the
- // channel. We do this to because `executeChannelClose` can block, so we
- // would like to present the closing transaction ID to the user as soon
- // as it is broadcasted.
- var wg sync.WaitGroup
- txidChan := make(chan string, 1)
-
- wg.Add(1)
- go func() {
- defer wg.Done()
-
- printJSON(struct {
- ClosingTxid string `json:"closing_txid"`
- }{
- ClosingTxid: <-txidChan,
- })
- }()
-
- err = executeChannelClose(client, req, txidChan, ctx.Bool("block"))
- if err != nil {
- return err
- }
-
- // In the case that the user did not provide the `block` flag, then we
- // need to wait for the goroutine to be done to prevent it from being
- // destroyed when exiting before printing the closing transaction ID.
- wg.Wait()
-
- return nil
-}
-
-// executeChannelClose attempts to close the channel from a request. The closing
-// transaction ID is sent through `txidChan` as soon as it is broadcasted to the
-// network. The block boolean is used to determine if we should block until the
-// closing transaction receives all of its required confirmations.
-func executeChannelClose(client lnrpc.LightningClient, req *lnrpc.CloseChannelRequest,
- txidChan chan<- string, block bool) er.R {
-
- stream, err := client.CloseChannel(context.Background(), req)
- if err != nil {
- return er.E(err)
- }
-
- for {
- resp, err := stream.Recv()
- if err == io.EOF {
- return nil
- } else if err != nil {
- return er.E(err)
- }
-
- switch update := resp.Update.(type) {
- case *lnrpc.CloseStatusUpdate_ClosePending:
- closingHash := update.ClosePending.Txid
- txid, err := chainhash.NewHash(closingHash)
- if err != nil {
- return err
- }
-
- txidChan <- txid.String()
-
- if !block {
- return nil
- }
- case *lnrpc.CloseStatusUpdate_ChanClose:
- return nil
- }
- }
-}
-
-var closeAllChannelsCommand = cli.Command{
- Name: "closeallchannels",
- Category: "Channels",
- Usage: "Close all existing channels.",
- Description: `
- Close all existing channels.
-
- Channels will be closed either cooperatively or unilaterally, depending
- on whether the channel is active or not. If the channel is inactive, any
- settled funds within it will be time locked for a few blocks before they
- can be spent.
-
- One can request to close inactive channels only by using the
- --inactive_only flag.
-
- By default, one is prompted for confirmation every time an inactive
- channel is requested to be closed. To avoid this, one can set the
- --force flag, which will only prompt for confirmation once for all
- inactive channels and proceed to close them.
-
- In the case of cooperative closures, one can manually set the fee to
- be used for the closing transactions via either the --conf_target or
- --sat_per_byte arguments. This will be the starting value used during
- fee negotiation. This is optional.`,
- Flags: []cli.Flag{
- cli.BoolFlag{
- Name: "inactive_only",
- Usage: "close inactive channels only",
- },
- cli.BoolFlag{
- Name: "force",
- Usage: "ask for confirmation once before attempting " +
- "to close existing channels",
- },
- cli.Int64Flag{
- Name: "conf_target",
- Usage: "(optional) the number of blocks that the " +
- "closing transactions *should* confirm in, will be " +
- "used for fee estimation",
- },
- cli.Int64Flag{
- Name: "sat_per_byte",
- Usage: "(optional) a manual fee expressed in " +
- "sat/byte that should be used when crafting " +
- "the closing transactions",
- },
- },
- Action: actionDecorator(closeAllChannels),
-}
-
-func closeAllChannels(ctx *cli.Context) er.R {
- client, cleanUp := getClient(ctx)
- defer cleanUp()
-
- listReq := &lnrpc.ListChannelsRequest{}
- openChannels, err := client.ListChannels(context.Background(), listReq)
- if err != nil {
- return er.Errorf("unable to fetch open channels: %v", err)
- }
-
- if len(openChannels.Channels) == 0 {
- return er.New("no open channels to close")
- }
-
- var channelsToClose []*lnrpc.Channel
-
- switch {
- case ctx.Bool("force") && ctx.Bool("inactive_only"):
- msg := "Unilaterally close all inactive channels? The funds " +
- "within these channels will be locked for some blocks " +
- "(CSV delay) before they can be spent. (yes/no): "
-
- confirmed := promptForConfirmation(msg)
-
- // We can safely exit if the user did not confirm.
- if !confirmed {
- return nil
- }
-
- // Go through the list of open channels and only add inactive
- // channels to the closing list.
- for _, channel := range openChannels.Channels {
- if !channel.GetActive() {
- channelsToClose = append(
- channelsToClose, channel,
- )
- }
- }
- case ctx.Bool("force"):
- msg := "Close all active and inactive channels? Inactive " +
- "channels will be closed unilaterally, so funds " +
- "within them will be locked for a few blocks (CSV " +
- "delay) before they can be spent. (yes/no): "
-
- confirmed := promptForConfirmation(msg)
-
- // We can safely exit if the user did not confirm.
- if !confirmed {
- return nil
- }
-
- channelsToClose = openChannels.Channels
- default:
- // Go through the list of open channels and determine which
- // should be added to the closing list.
- for _, channel := range openChannels.Channels {
- // If the channel is inactive, we'll attempt to
- // unilaterally close the channel, so we should prompt
- // the user for confirmation beforehand.
- if !channel.GetActive() {
- msg := fmt.Sprintf("Unilaterally close channel "+
- "with node %s and channel point %s? "+
- "The closing transaction will need %d "+
- "confirmations before the funds can be "+
- "spent. (yes/no): ", channel.RemotePubkey,
- channel.ChannelPoint, channel.LocalConstraints.CsvDelay)
-
- confirmed := promptForConfirmation(msg)
-
- if confirmed {
- channelsToClose = append(
- channelsToClose, channel,
- )
- }
- } else if !ctx.Bool("inactive_only") {
- // Otherwise, we'll only add active channels if
- // we were not requested to close inactive
- // channels only.
- channelsToClose = append(
- channelsToClose, channel,
- )
- }
- }
- }
-
- // result defines the result of closing a channel. The closing
- // transaction ID is populated if a channel is successfully closed.
- // Otherwise, the error that prevented closing the channel is populated.
- type result struct {
- RemotePubKey string `json:"remote_pub_key"`
- ChannelPoint string `json:"channel_point"`
- ClosingTxid string `json:"closing_txid"`
- FailErr string `json:"error"`
- }
-
- // Launch each channel closure in a goroutine in order to execute them
- // in parallel. Once they're all executed, we will print the results as
- // they come.
- resultChan := make(chan result, len(channelsToClose))
- for _, channel := range channelsToClose {
- go func(channel *lnrpc.Channel) {
- res := result{}
- res.RemotePubKey = channel.RemotePubkey
- res.ChannelPoint = channel.ChannelPoint
- defer func() {
- resultChan <- res
- }()
-
- // Parse the channel point in order to create the close
- // channel request.
- s := strings.Split(res.ChannelPoint, ":")
- if len(s) != 2 {
- res.FailErr = "expected channel point with " +
- "format txid:index"
- return
- }
- index, errr := strconv.ParseUint(s[1], 10, 32)
- if errr != nil {
- res.FailErr = fmt.Sprintf("unable to parse "+
- "channel point output index: %v", errr)
- return
- }
-
- req := &lnrpc.CloseChannelRequest{
- ChannelPoint: &lnrpc.ChannelPoint{
- FundingTxid: &lnrpc.ChannelPoint_FundingTxidStr{
- FundingTxidStr: s[0],
- },
- OutputIndex: uint32(index),
- },
- Force: !channel.GetActive(),
- TargetConf: int32(ctx.Int64("conf_target")),
- SatPerByte: ctx.Int64("sat_per_byte"),
- }
-
- txidChan := make(chan string, 1)
- err := executeChannelClose(client, req, txidChan, false)
- if err != nil {
- res.FailErr = fmt.Sprintf("unable to close "+
- "channel: %v", err)
- return
- }
-
- res.ClosingTxid = <-txidChan
- }(channel)
- }
-
- for range channelsToClose {
- res := <-resultChan
- printJSON(res)
- }
-
- return nil
-}
-
-// promptForConfirmation continuously prompts the user for the message until
-// receiving a response of "yes" or "no" and returns their answer as a bool.
-func promptForConfirmation(msg string) bool {
- reader := bufio.NewReader(os.Stdin)
-
- for {
- fmt.Print(msg)
-
- answer, err := reader.ReadString('\n')
- if err != nil {
- return false
- }
-
- answer = strings.ToLower(strings.TrimSpace(answer))
-
- switch {
- case answer == "yes":
- return true
- case answer == "no":
- return false
- default:
- continue
- }
- }
-}
-
-var abandonChannelCommand = cli.Command{
- Name: "abandonchannel",
- Category: "Channels",
- Usage: "Abandons an existing channel.",
- Description: `
- Removes all channel state from the database except for a close
- summary. This method can be used to get rid of permanently unusable
- channels due to bugs fixed in newer versions of lnd.
-
- Only available when lnd is built in debug mode.
-
- To view which funding_txids/output_indexes can be used for this command,
- see the channel_point values within the listchannels command output.
- The format for a channel_point is 'funding_txid:output_index'.`,
- ArgsUsage: "funding_txid [output_index]",
- Flags: []cli.Flag{
- cli.StringFlag{
- Name: "funding_txid",
- Usage: "the txid of the channel's funding transaction",
- },
- cli.IntFlag{
- Name: "output_index",
- Usage: "the output index for the funding output of the funding " +
- "transaction",
- },
- },
- Action: actionDecorator(abandonChannel),
-}
-
-func abandonChannel(ctx *cli.Context) er.R {
- ctxb := context.Background()
-
- client, cleanUp := getClient(ctx)
- defer cleanUp()
-
- // Show command help if no arguments and flags were provided.
- if ctx.NArg() == 0 && ctx.NumFlags() == 0 {
- cli.ShowCommandHelp(ctx, "abandonchannel")
- return nil
- }
-
- channelPoint, err := parseChannelPoint(ctx)
- if err != nil {
- return err
- }
-
- req := &lnrpc.AbandonChannelRequest{
- ChannelPoint: channelPoint,
- }
-
- resp, errr := client.AbandonChannel(ctxb, req)
- if errr != nil {
- return er.E(errr)
- }
-
- printRespJSON(resp)
- return nil
-}
-
-// parseChannelPoint parses a funding txid and output index from the command
-// line. Both named options as well as unnamed parameters are supported.
-func parseChannelPoint(ctx *cli.Context) (*lnrpc.ChannelPoint, er.R) {
- channelPoint := &lnrpc.ChannelPoint{}
-
- args := ctx.Args()
-
- switch {
- case ctx.IsSet("funding_txid"):
- channelPoint.FundingTxid = &lnrpc.ChannelPoint_FundingTxidStr{
- FundingTxidStr: ctx.String("funding_txid"),
- }
- case args.Present():
- channelPoint.FundingTxid = &lnrpc.ChannelPoint_FundingTxidStr{
- FundingTxidStr: args.First(),
- }
- args = args.Tail()
- default:
- return nil, er.Errorf("funding txid argument missing")
- }
-
- switch {
- case ctx.IsSet("output_index"):
- channelPoint.OutputIndex = uint32(ctx.Int("output_index"))
- case args.Present():
- index, err := strconv.ParseUint(args.First(), 10, 32)
- if err != nil {
- return nil, er.Errorf("unable to decode output index: %v", err)
- }
- channelPoint.OutputIndex = uint32(index)
- default:
- channelPoint.OutputIndex = 0
- }
-
- return channelPoint, nil
-}
-
-var listPeersCommand = cli.Command{
- Name: "listpeers",
- Category: "Peers",
- Usage: "List all active, currently connected peers.",
- Flags: []cli.Flag{
- cli.BoolFlag{
- Name: "list_errors",
- Usage: "list a full set of most recent errors for the peer",
- },
- },
- Action: actionDecorator(listPeers),
-}
-
-func listPeers(ctx *cli.Context) er.R {
- ctxb := context.Background()
- client, cleanUp := getClient(ctx)
- defer cleanUp()
-
- // By default, we display a single error on the cli. If the user
- // specifically requests a full error set, then we will provide it.
- req := &lnrpc.ListPeersRequest{
- LatestError: !ctx.IsSet("list_errors"),
- }
- resp, err := client.ListPeers(ctxb, req)
- if err != nil {
- return er.E(err)
- }
-
- printRespJSON(resp)
- return nil
-}
-
-var walletBalanceCommand = cli.Command{
- Name: "walletbalance",
- Category: "Wallet",
- Usage: "Compute and display the wallet's current balance.",
- Action: actionDecorator(walletBalance),
-}
-
-func walletBalance(ctx *cli.Context) er.R {
- ctxb := context.Background()
- client, cleanUp := getClient(ctx)
- defer cleanUp()
-
- req := &lnrpc.WalletBalanceRequest{}
- resp, err := client.WalletBalance(ctxb, req)
- if err != nil {
- return er.E(err)
- }
-
- printRespJSON(resp)
- return nil
-}
-
-var getAddressBalancesCommand = cli.Command{
- Name: "getaddressbalances",
- Category: "Wallet",
- Description: `
- Get the balance for each active address, including:
- * The amount which is immediately spendable
- * The amount which is unconfirmed (specify minconf to decide how many confirms are needed)
- * The amount which is a mining reward that has not yet matured`,
- Flags: []cli.Flag{
- cli.IntFlag{
- Name: "minconf",
- Usage: "The minimum required confirms for a transaction to be considered confirmed",
- },
- cli.BoolFlag{
- Name: "show_zero_balance",
- Usage: "Show addresses which are active in the wallet but have no known coins",
- },
- },
- Usage: "Compute and display balances for each address in the wallet.",
- Action: actionDecorator(getAddressBalances),
-}
-
-func getAddressBalances(ctx *cli.Context) er.R {
- ctxb := context.Background()
- client, cleanUp := getClient(ctx)
- defer cleanUp()
-
- minconf := 1
- if ctx.IsSet("minconf") {
- minconf = ctx.Int("minconf")
- }
-
- req := &lnrpc.GetAddressBalancesRequest{
- Minconf: int32(minconf),
- Showzerobalance: ctx.IsSet("show_zero_balance"),
- }
- resp, err := client.GetAddressBalances(ctxb, req)
- if err != nil {
- return er.E(err)
- }
-
- printRespJSON(resp)
- return nil
-}
-
-var channelBalanceCommand = cli.Command{
- Name: "channelbalance",
- Category: "Channels",
- Usage: "Returns the sum of the total available channel balance across " +
- "all open channels.",
- Action: actionDecorator(channelBalance),
-}
-
-func channelBalance(ctx *cli.Context) er.R {
- ctxb := context.Background()
- client, cleanUp := getClient(ctx)
- defer cleanUp()
-
- req := &lnrpc.ChannelBalanceRequest{}
- resp, err := client.ChannelBalance(ctxb, req)
- if err != nil {
- return er.E(err)
- }
-
- printRespJSON(resp)
- return nil
-}
-
-var getInfoCommand = cli.Command{
- Name: "getinfo",
- Usage: "Returns basic information related to the active daemon.",
- Action: actionDecorator(getInfo),
-}
-
-func getInfo(ctx *cli.Context) er.R {
- ctxb := context.Background()
- client, cleanUp := getClient(ctx)
- defer cleanUp()
- inforeq := &lnrpc.GetInfoRequest{}
- inforesp, infoerr := client.GetInfo(ctxb, inforeq)
- if infoerr != nil {
- inforesp = nil
- }
- // call getinfo2 from metaservice hat will return some info even when wallet is locked
- metaclient, cleanUpMeta := getMetaServiceClient(ctx)
- defer cleanUpMeta()
- info2req := &lnrpc.GetInfo2Request{
- InfoResponse: inforesp,
- }
- info2resp, info2err := metaclient.GetInfo2(ctxb, info2req)
- if info2err != nil {
- return er.E(info2err)
- }
-
- printRespJSON(info2resp)
- return nil
-}
-
-var getRecoveryInfoCommand = cli.Command{
- Name: "getrecoveryinfo",
- Usage: "Display information about an ongoing recovery attempt.",
- Action: actionDecorator(getRecoveryInfo),
-}
-
-func getRecoveryInfo(ctx *cli.Context) er.R {
- ctxb := context.Background()
- client, cleanUp := getClient(ctx)
- defer cleanUp()
-
- req := &lnrpc.GetRecoveryInfoRequest{}
- resp, err := client.GetRecoveryInfo(ctxb, req)
- if err != nil {
- return er.E(err)
- }
-
- printRespJSON(resp)
- return nil
-}
-
-var pendingChannelsCommand = cli.Command{
- Name: "pendingchannels",
- Category: "Channels",
- Usage: "Display information pertaining to pending channels.",
- Action: actionDecorator(pendingChannels),
-}
-
-func pendingChannels(ctx *cli.Context) er.R {
- ctxb := context.Background()
- client, cleanUp := getClient(ctx)
- defer cleanUp()
-
- req := &lnrpc.PendingChannelsRequest{}
- resp, err := client.PendingChannels(ctxb, req)
- if err != nil {
- return er.E(err)
- }
-
- printRespJSON(resp)
-
- return nil
-}
-
-var listChannelsCommand = cli.Command{
- Name: "listchannels",
- Category: "Channels",
- Usage: "List all open channels.",
- Flags: []cli.Flag{
- cli.BoolFlag{
- Name: "active_only",
- Usage: "only list channels which are currently active",
- },
- cli.BoolFlag{
- Name: "inactive_only",
- Usage: "only list channels which are currently inactive",
- },
- cli.BoolFlag{
- Name: "public_only",
- Usage: "only list channels which are currently public",
- },
- cli.BoolFlag{
- Name: "private_only",
- Usage: "only list channels which are currently private",
- },
- cli.StringFlag{
- Name: "peer",
- Usage: "(optional) only display channels with a " +
- "particular peer, accepts 66-byte, " +
- "hex-encoded pubkeys",
- },
- },
- Action: actionDecorator(listChannels),
-}
-
-func listChannels(ctx *cli.Context) er.R {
- ctxb := context.Background()
- client, cleanUp := getClient(ctx)
- defer cleanUp()
-
- peer := ctx.String("peer")
-
- // If the user requested channels with a particular key, parse the
- // provided pubkey.
- var peerKey []byte
- if len(peer) > 0 {
- pk, err := route.NewVertexFromStr(peer)
- if err != nil {
- return er.Errorf("invalid --peer pubkey: %v", err)
- }
-
- peerKey = pk[:]
- }
-
- req := &lnrpc.ListChannelsRequest{
- ActiveOnly: ctx.Bool("active_only"),
- InactiveOnly: ctx.Bool("inactive_only"),
- PublicOnly: ctx.Bool("public_only"),
- PrivateOnly: ctx.Bool("private_only"),
- Peer: peerKey,
- }
-
- resp, err := client.ListChannels(ctxb, req)
- if err != nil {
- return er.E(err)
- }
-
- printRespJSON(resp)
-
- return nil
-}
-
-var closedChannelsCommand = cli.Command{
- Name: "closedchannels",
- Category: "Channels",
- Usage: "List all closed channels.",
- Flags: []cli.Flag{
- cli.BoolFlag{
- Name: "cooperative",
- Usage: "list channels that were closed cooperatively",
- },
- cli.BoolFlag{
- Name: "local_force",
- Usage: "list channels that were force-closed " +
- "by the local node",
- },
- cli.BoolFlag{
- Name: "remote_force",
- Usage: "list channels that were force-closed " +
- "by the remote node",
- },
- cli.BoolFlag{
- Name: "breach",
- Usage: "list channels for which the remote node " +
- "attempted to broadcast a prior " +
- "revoked channel state",
- },
- cli.BoolFlag{
- Name: "funding_canceled",
- Usage: "list channels that were never fully opened",
- },
- cli.BoolFlag{
- Name: "abandoned",
- Usage: "list channels that were abandoned by " +
- "the local node",
- },
- },
- Action: actionDecorator(closedChannels),
-}
-
-func closedChannels(ctx *cli.Context) er.R {
- ctxb := context.Background()
- client, cleanUp := getClient(ctx)
- defer cleanUp()
-
- req := &lnrpc.ClosedChannelsRequest{
- Cooperative: ctx.Bool("cooperative"),
- LocalForce: ctx.Bool("local_force"),
- RemoteForce: ctx.Bool("remote_force"),
- Breach: ctx.Bool("breach"),
- FundingCanceled: ctx.Bool("funding_canceled"),
- Abandoned: ctx.Bool("abandoned"),
- }
-
- resp, err := client.ClosedChannels(ctxb, req)
- if err != nil {
- return er.E(err)
- }
-
- printRespJSON(resp)
-
- return nil
-}
-
-var describeGraphCommand = cli.Command{
- Name: "describegraph",
- Category: "Graph",
- Description: "Prints a human readable version of the known channel " +
- "graph from the PoV of the node",
- Usage: "Describe the network graph.",
- Flags: []cli.Flag{
- cli.BoolFlag{
- Name: "include_unannounced",
- Usage: "If set, unannounced channels will be included in the " +
- "graph. Unannounced channels are both private channels, and " +
- "public channels that are not yet announced to the network.",
- },
- },
- Action: actionDecorator(describeGraph),
-}
-
-func describeGraph(ctx *cli.Context) er.R {
- client, cleanUp := getClient(ctx)
- defer cleanUp()
-
- req := &lnrpc.ChannelGraphRequest{
- IncludeUnannounced: ctx.Bool("include_unannounced"),
- }
-
- graph, err := client.DescribeGraph(context.Background(), req)
- if err != nil {
- return er.E(err)
- }
-
- printRespJSON(graph)
- return nil
-}
-
-var getNodeMetricsCommand = cli.Command{
- Name: "getnodemetrics",
- Category: "Graph",
- Description: "Prints out node metrics calculated from the current graph",
- Usage: "Get node metrics.",
- Action: actionDecorator(getNodeMetrics),
-}
-
-func getNodeMetrics(ctx *cli.Context) er.R {
- client, cleanUp := getClient(ctx)
- defer cleanUp()
-
- req := &lnrpc.NodeMetricsRequest{
- Types: []lnrpc.NodeMetricType{lnrpc.NodeMetricType_BETWEENNESS_CENTRALITY},
- }
-
- nodeMetrics, err := client.GetNodeMetrics(context.Background(), req)
- if err != nil {
- return er.E(err)
- }
-
- printRespJSON(nodeMetrics)
- return nil
-}
-
-var listPaymentsCommand = cli.Command{
- Name: "listpayments",
- Category: "Payments",
- Usage: "List all outgoing payments.",
- Description: "This command enables the retrieval of payments stored " +
- "in the database. Pagination is supported by the usage of " +
- "index_offset in combination with the paginate_forwards flag. " +
- "Reversed pagination is enabled by default to receive " +
- "current payments first. Pagination can be resumed by using " +
- "the returned last_index_offset (for forwards order), or " +
- "first_index_offset (for reversed order) as the offset_index. ",
- Flags: []cli.Flag{
- cli.BoolFlag{
- Name: "include_incomplete",
- Usage: "if set to true, payments still in flight (or " +
- "failed) will be returned as well, keeping" +
- "indices for payments the same as without " +
- "the flag",
- },
- cli.UintFlag{
- Name: "index_offset",
- Usage: "The index of a payment that will be used as " +
- "either the start (in forwards mode) or end " +
- "(in reverse mode) of a query to determine " +
- "which payments should be returned in the " +
- "response, where the index_offset is " +
- "excluded. If index_offset is set to zero in " +
- "reversed mode, the query will end with the " +
- "last payment made.",
- },
- cli.UintFlag{
- Name: "max_payments",
- Usage: "the max number of payments to return, by " +
- "default, all completed payments are returned",
- },
- cli.BoolFlag{
- Name: "paginate_forwards",
- Usage: "if set, payments succeeding the " +
- "index_offset will be returned, allowing " +
- "forwards pagination",
- },
- },
- Action: actionDecorator(listPayments),
-}
-
-func listPayments(ctx *cli.Context) er.R {
- client, cleanUp := getClient(ctx)
- defer cleanUp()
-
- req := &lnrpc.ListPaymentsRequest{
- IncludeIncomplete: ctx.Bool("include_incomplete"),
- IndexOffset: uint64(ctx.Uint("index_offset")),
- MaxPayments: uint64(ctx.Uint("max_payments")),
- Reversed: !ctx.Bool("paginate_forwards"),
- }
-
- payments, err := client.ListPayments(context.Background(), req)
- if err != nil {
- return er.E(err)
- }
-
- printRespJSON(payments)
- return nil
-}
-
-var getChanInfoCommand = cli.Command{
- Name: "getchaninfo",
- Category: "Graph",
- Usage: "Get the state of a channel.",
- Description: "Prints out the latest authenticated state for a " +
- "particular channel",
- ArgsUsage: "chan_id",
- Flags: []cli.Flag{
- cli.Int64Flag{
- Name: "chan_id",
- Usage: "the 8-byte compact channel ID to query for",
- },
- },
- Action: actionDecorator(getChanInfo),
-}
-
-func getChanInfo(ctx *cli.Context) er.R {
- ctxb := context.Background()
- client, cleanUp := getClient(ctx)
- defer cleanUp()
-
- var (
- chanID int64
- err error
- )
-
- switch {
- case ctx.IsSet("chan_id"):
- chanID = ctx.Int64("chan_id")
- case ctx.Args().Present():
- chanID, err = strconv.ParseInt(ctx.Args().First(), 10, 64)
- if err != nil {
- return er.Errorf("error parsing chan_id: %s", err)
- }
- default:
- return er.Errorf("chan_id argument missing")
- }
-
- req := &lnrpc.ChanInfoRequest{
- ChanId: uint64(chanID),
- }
-
- chanInfo, err := client.GetChanInfo(ctxb, req)
- if err != nil {
- return er.E(err)
- }
-
- printRespJSON(chanInfo)
- return nil
-}
-
-var getNodeInfoCommand = cli.Command{
- Name: "getnodeinfo",
- Category: "Graph",
- Usage: "Get information on a specific node.",
- Description: "Prints out the latest authenticated node state for an " +
- "advertised node",
- Flags: []cli.Flag{
- cli.StringFlag{
- Name: "pub_key",
- Usage: "the 33-byte hex-encoded compressed public of the target " +
- "node",
- },
- cli.BoolFlag{
- Name: "include_channels",
- Usage: "if true, will return all known channels " +
- "associated with the node",
- },
- },
- Action: actionDecorator(getNodeInfo),
-}
-
-func getNodeInfo(ctx *cli.Context) er.R {
- ctxb := context.Background()
- client, cleanUp := getClient(ctx)
- defer cleanUp()
-
- args := ctx.Args()
-
- var pubKey string
- switch {
- case ctx.IsSet("pub_key"):
- pubKey = ctx.String("pub_key")
- case args.Present():
- pubKey = args.First()
- default:
- return er.Errorf("pub_key argument missing")
- }
-
- req := &lnrpc.NodeInfoRequest{
- PubKey: pubKey,
- IncludeChannels: ctx.Bool("include_channels"),
- }
-
- nodeInfo, err := client.GetNodeInfo(ctxb, req)
- if err != nil {
- return er.E(err)
- }
-
- printRespJSON(nodeInfo)
- return nil
-}
-
-var queryRoutesCommand = cli.Command{
- Name: "queryroutes",
- Category: "Payments",
- Usage: "Query a route to a destination.",
- Description: "Queries the channel router for a potential path to the destination that has sufficient flow for the amount including fees",
- ArgsUsage: "dest amt",
- Flags: []cli.Flag{
- cli.StringFlag{
- Name: "dest",
- Usage: "the 33-byte hex-encoded public key for the payment " +
- "destination",
- },
- cli.Int64Flag{
- Name: "amt",
- Usage: "the amount to send expressed in satoshis",
- },
- cli.Int64Flag{
- Name: "fee_limit",
- Usage: "maximum fee allowed in satoshis when sending " +
- "the payment",
- },
- cli.Int64Flag{
- Name: "fee_limit_percent",
- Usage: "percentage of the payment's amount used as the " +
- "maximum fee allowed when sending the payment",
- },
- cli.Int64Flag{
- Name: "final_cltv_delta",
- Usage: "(optional) number of blocks the last hop has to reveal " +
- "the preimage",
- },
- cli.BoolFlag{
- Name: "use_mc",
- Usage: "use mission control probabilities",
- },
- cli.Uint64Flag{
- Name: "outgoing_chanid",
- Usage: "(optional) the channel id of the channel " +
- "that must be taken to the first hop",
- },
- cltvLimitFlag,
- },
- Action: actionDecorator(queryRoutes),
-}
-
-func queryRoutes(ctx *cli.Context) er.R {
- ctxb := context.Background()
- client, cleanUp := getClient(ctx)
- defer cleanUp()
-
- var (
- dest string
- amt int64
- err er.R
- errr error
- )
-
- args := ctx.Args()
-
- switch {
- case ctx.IsSet("dest"):
- dest = ctx.String("dest")
- case args.Present():
- dest = args.First()
- args = args.Tail()
- default:
- return er.Errorf("dest argument missing")
- }
-
- switch {
- case ctx.IsSet("amt"):
- amt = ctx.Int64("amt")
- case args.Present():
- amt, errr = strconv.ParseInt(args.First(), 10, 64)
- if errr != nil {
- return er.Errorf("unable to decode amt argument: %v", errr)
- }
- default:
- return er.Errorf("amt argument missing")
- }
-
- feeLimit, err := retrieveFeeLimitLegacy(ctx)
- if err != nil {
- return err
- }
-
- req := &lnrpc.QueryRoutesRequest{
- PubKey: dest,
- Amt: amt,
- FeeLimit: feeLimit,
- FinalCltvDelta: int32(ctx.Int("final_cltv_delta")),
- UseMissionControl: ctx.Bool("use_mc"),
- CltvLimit: uint32(ctx.Uint64(cltvLimitFlag.Name)),
- OutgoingChanId: ctx.Uint64("outgoing_chanid"),
- }
-
- route, errr := client.QueryRoutes(ctxb, req)
- if errr != nil {
- return er.E(errr)
- }
-
- printRespJSON(route)
- return nil
-}
-
-// retrieveFeeLimitLegacy retrieves the fee limit based on the different fee
-// limit flags passed. This function will eventually disappear in favor of
-// retrieveFeeLimit and the new payment rpc.
-func retrieveFeeLimitLegacy(ctx *cli.Context) (*lnrpc.FeeLimit, er.R) {
- switch {
- case ctx.IsSet("fee_limit") && ctx.IsSet("fee_limit_percent"):
- return nil, er.Errorf("either fee_limit or fee_limit_percent " +
- "can be set, but not both")
- case ctx.IsSet("fee_limit"):
- return &lnrpc.FeeLimit{
- Limit: &lnrpc.FeeLimit_Fixed{
- Fixed: ctx.Int64("fee_limit"),
- },
- }, nil
- case ctx.IsSet("fee_limit_percent"):
- feeLimitPercent := ctx.Int64("fee_limit_percent")
- if feeLimitPercent < 0 {
- return nil, er.New("negative fee limit percentage " +
- "provided")
- }
- return &lnrpc.FeeLimit{
- Limit: &lnrpc.FeeLimit_Percent{
- Percent: feeLimitPercent,
- },
- }, nil
- }
-
- // Since the fee limit flags aren't required, we don't return an error
- // if they're not set.
- return nil, nil
-}
-
-var getNetworkInfoCommand = cli.Command{
- Name: "getnetworkinfo",
- Category: "Channels",
- Usage: "Get statistical information about the current " +
- "state of the network.",
- Description: "Returns a set of statistics pertaining to the known " +
- "channel graph",
- Action: actionDecorator(getNetworkInfo),
-}
-
-func getNetworkInfo(ctx *cli.Context) er.R {
- ctxb := context.Background()
- client, cleanUp := getClient(ctx)
- defer cleanUp()
-
- req := &lnrpc.NetworkInfoRequest{}
-
- netInfo, err := client.GetNetworkInfo(ctxb, req)
- if err != nil {
- return er.E(err)
- }
-
- printRespJSON(netInfo)
- return nil
-}
-
-var debugLevelCommand = cli.Command{
- Name: "debuglevel",
- Usage: "Set the debug level.",
- Description: `Logging level for all subsystems {trace, debug, info, warn, error, critical, off}
- You may also specify =,=,... to set the log level for individual subsystems
-
- Use show to list available subsystems`,
- Flags: []cli.Flag{
- cli.BoolFlag{
- Name: "show",
- Usage: "if true, then the list of available sub-systems will be printed out",
- },
- cli.StringFlag{
- Name: "level",
- Usage: "the level specification to target either a coarse logging level, or granular set of specific sub-systems with logging levels for each",
- },
- },
- Action: actionDecorator(debugLevel),
-}
-
-func debugLevel(ctx *cli.Context) er.R {
- ctxb := context.Background()
- client, cleanUp := getClient(ctx)
- defer cleanUp()
- req := &lnrpc.DebugLevelRequest{
- Show: ctx.Bool("show"),
- LevelSpec: ctx.String("level"),
- }
-
- resp, err := client.DebugLevel(ctxb, req)
- if err != nil {
- return er.E(err)
- }
-
- printRespJSON(resp)
- return nil
-}
-
-var listChainTxnsCommand = cli.Command{
- Name: "listchaintxns",
- Category: "On-chain",
- Usage: "List transactions from the wallet.",
- Flags: []cli.Flag{
- cli.Int64Flag{
- Name: "start_height",
- Usage: "the block height from which to list " +
- "transactions, inclusive",
- },
- cli.Int64Flag{
- Name: "end_height",
- Usage: "the block height until which to list " +
- "transactions, inclusive, to get transactions " +
- "until the chain tip, including unconfirmed, " +
- "set this value to -1",
- },
- },
- Description: `
- List all transactions an address of the wallet was involved in.
-
- This call will return a list of wallet related transactions that paid
- to an address our wallet controls, or spent utxos that we held. The
- start_height and end_height flags can be used to specify an inclusive
- block range over which to query for transactions. If the end_height is
- less than the start_height, transactions will be queried in reverse.
- To get all transactions until the chain tip, including unconfirmed
- transactions (identifiable with BlockHeight=0), set end_height to -1.
- By default, this call will get all transactions our wallet was involved
- in, including unconfirmed transactions.
-`,
- Action: actionDecorator(listChainTxns),
-}
-
-func listChainTxns(ctx *cli.Context) er.R {
- ctxb := context.Background()
- client, cleanUp := getClient(ctx)
- defer cleanUp()
-
- req := &lnrpc.GetTransactionsRequest{}
-
- if ctx.IsSet("start_height") {
- req.StartHeight = int32(ctx.Int64("start_height"))
- }
- if ctx.IsSet("end_height") {
- req.EndHeight = int32(ctx.Int64("end_height"))
- }
-
- resp, err := client.GetTransactions(ctxb, req)
- if err != nil {
- return er.E(err)
- }
-
- printRespJSON(resp)
- return nil
-}
-
-var stopCommand = cli.Command{
- Name: "stop",
- Usage: "Stop and shutdown the daemon.",
- Description: `
- Gracefully stop all daemon subsystems before stopping the daemon itself.
- This is equivalent to stopping it using CTRL-C.`,
- Action: actionDecorator(stopDaemon),
-}
-
-func stopDaemon(ctx *cli.Context) er.R {
- ctxb := context.Background()
- client, cleanUp := getClient(ctx)
- defer cleanUp()
-
- _, err := client.StopDaemon(ctxb, &lnrpc.StopRequest{})
- if err != nil {
- return er.E(err)
- }
-
- return nil
-}
-
-var signMessageCommand = cli.Command{
- Name: "signmessage",
- Category: "Wallet",
- Usage: "Sign a message with the node's private key.",
- ArgsUsage: "msg",
- Description: `
- Sign msg with the resident node's private key.
- Returns the signature as a zbase32 string.
-
- Positional arguments and flags can be used interchangeably but not at the same time!`,
- Flags: []cli.Flag{
- cli.StringFlag{
- Name: "msg",
- Usage: "the message to sign",
- },
- },
- Action: actionDecorator(signMessage),
-}
-
-func signMessage(ctx *cli.Context) er.R {
- ctxb := context.Background()
- client, cleanUp := getClient(ctx)
- defer cleanUp()
-
- var msg []byte
-
- switch {
- case ctx.IsSet("msg"):
- msg = []byte(ctx.String("msg"))
- case ctx.Args().Present():
- msg = []byte(ctx.Args().First())
- default:
- return er.Errorf("msg argument missing")
- }
-
- resp, err := client.SignMessage(ctxb, &lnrpc.SignMessageRequest{Msg: msg})
- if err != nil {
- return er.E(err)
- }
-
- printRespJSON(resp)
- return nil
-}
-
-var verifyMessageCommand = cli.Command{
- Name: "verifymessage",
- Category: "Wallet",
- Usage: "Verify a message signed with the signature.",
- ArgsUsage: "msg signature",
- Description: `
- Verify that the message was signed with a properly-formed signature
- The signature must be zbase32 encoded and signed with the private key of
- an active node in the resident node's channel database.
-
- Positional arguments and flags can be used interchangeably but not at the same time!`,
- Flags: []cli.Flag{
- cli.StringFlag{
- Name: "msg",
- Usage: "the message to verify",
- },
- cli.StringFlag{
- Name: "sig",
- Usage: "the zbase32 encoded signature of the message",
- },
- },
- Action: actionDecorator(verifyMessage),
-}
-
-func verifyMessage(ctx *cli.Context) er.R {
- ctxb := context.Background()
- client, cleanUp := getClient(ctx)
- defer cleanUp()
-
- var (
- msg []byte
- sig string
- )
-
- args := ctx.Args()
-
- switch {
- case ctx.IsSet("msg"):
- msg = []byte(ctx.String("msg"))
- case args.Present():
- msg = []byte(ctx.Args().First())
- args = args.Tail()
- default:
- return er.Errorf("msg argument missing")
- }
-
- switch {
- case ctx.IsSet("sig"):
- sig = ctx.String("sig")
- case args.Present():
- sig = args.First()
- default:
- return er.Errorf("signature argument missing")
- }
-
- req := &lnrpc.VerifyMessageRequest{Msg: msg, Signature: sig}
- resp, err := client.VerifyMessage(ctxb, req)
- if err != nil {
- return er.E(err)
- }
-
- printRespJSON(resp)
- return nil
-}
-
-var feeReportCommand = cli.Command{
- Name: "feereport",
- Category: "Channels",
- Usage: "Display the current fee policies of all active channels.",
- Description: `
- Returns the current fee policies of all active channels.
- Fee policies can be updated using the updatechanpolicy command.`,
- Action: actionDecorator(feeReport),
-}
-
-func feeReport(ctx *cli.Context) er.R {
- ctxb := context.Background()
- client, cleanUp := getClient(ctx)
- defer cleanUp()
-
- req := &lnrpc.FeeReportRequest{}
- resp, err := client.FeeReport(ctxb, req)
- if err != nil {
- return er.E(err)
- }
-
- printRespJSON(resp)
- return nil
-}
-
-var updateChannelPolicyCommand = cli.Command{
- Name: "updatechanpolicy",
- Category: "Channels",
- Usage: "Update the channel policy for all channels, or a single " +
- "channel.",
- ArgsUsage: "base_fee_msat fee_rate time_lock_delta " +
- "[--max_htlc_msat=N] [channel_point]",
- Description: `
- Updates the channel policy for all channels, or just a particular channel
- identified by its channel point. The update will be committed, and
- broadcast to the rest of the network within the next batch.
- Channel points are encoded as: funding_txid:output_index`,
- Flags: []cli.Flag{
- cli.Int64Flag{
- Name: "base_fee_msat",
- Usage: "the base fee in milli-satoshis that will " +
- "be charged for each forwarded HTLC, regardless " +
- "of payment size",
- },
- cli.StringFlag{
- Name: "fee_rate",
- Usage: "the fee rate that will be charged " +
- "proportionally based on the value of each " +
- "forwarded HTLC, the lowest possible rate is 0 " +
- "with a granularity of 0.000001 (millionths)",
- },
- cli.Int64Flag{
- Name: "time_lock_delta",
- Usage: "the CLTV delta that will be applied to all " +
- "forwarded HTLCs",
- },
- cli.Uint64Flag{
- Name: "min_htlc_msat",
- Usage: "if set, the min HTLC size that will be applied " +
- "to all forwarded HTLCs. If unset, the min HTLC " +
- "is left unchanged.",
- },
- cli.Uint64Flag{
- Name: "max_htlc_msat",
- Usage: "if set, the max HTLC size that will be applied " +
- "to all forwarded HTLCs. If unset, the max HTLC " +
- "is left unchanged.",
- },
- cli.StringFlag{
- Name: "chan_point",
- Usage: "The channel whose fee policy should be " +
- "updated, if nil the policies for all channels " +
- "will be updated. Takes the form of: txid:output_index",
- },
- },
- Action: actionDecorator(updateChannelPolicy),
-}
-
-func parseChanPoint(s string) (*lnrpc.ChannelPoint, er.R) {
- split := strings.Split(s, ":")
- if len(split) != 2 {
- return nil, er.Errorf("expecting chan_point to be in format of: " +
- "txid:index")
- }
-
- index, errr := strconv.ParseInt(split[1], 10, 32)
- if errr != nil {
- return nil, er.Errorf("unable to decode output index: %v", errr)
- }
-
- txid, err := chainhash.NewHashFromStr(split[0])
- if err != nil {
- return nil, er.Errorf("unable to parse hex string: %v", err)
- }
-
- return &lnrpc.ChannelPoint{
- FundingTxid: &lnrpc.ChannelPoint_FundingTxidBytes{
- FundingTxidBytes: txid[:],
- },
- OutputIndex: uint32(index),
- }, nil
-}
-
-func updateChannelPolicy(ctx *cli.Context) er.R {
- ctxb := context.Background()
- client, cleanUp := getClient(ctx)
- defer cleanUp()
-
- var (
- baseFee int64
- feeRate float64
- timeLockDelta int64
- err er.R
- errr error
- )
- args := ctx.Args()
-
- switch {
- case ctx.IsSet("base_fee_msat"):
- baseFee = ctx.Int64("base_fee_msat")
- case args.Present():
- baseFee, errr = strconv.ParseInt(args.First(), 10, 64)
- if errr != nil {
- return er.Errorf("unable to decode base_fee_msat: %v", errr)
- }
- args = args.Tail()
- default:
- return er.Errorf("base_fee_msat argument missing")
- }
-
- switch {
- case ctx.IsSet("fee_rate"):
- feeRate = ctx.Float64("fee_rate")
- case args.Present():
- feeRate, errr = strconv.ParseFloat(args.First(), 64)
- if errr != nil {
- return er.Errorf("unable to decode fee_rate: %v", errr)
- }
-
- args = args.Tail()
- default:
- return er.Errorf("fee_rate argument missing")
- }
-
- switch {
- case ctx.IsSet("time_lock_delta"):
- timeLockDelta = ctx.Int64("time_lock_delta")
- case args.Present():
- timeLockDelta, errr = strconv.ParseInt(args.First(), 10, 64)
- if errr != nil {
- return er.Errorf("unable to decode time_lock_delta: %v",
- errr)
- }
-
- args = args.Tail()
- default:
- return er.Errorf("time_lock_delta argument missing")
- }
-
- var (
- chanPoint *lnrpc.ChannelPoint
- chanPointStr string
- )
-
- switch {
- case ctx.IsSet("chan_point"):
- chanPointStr = ctx.String("chan_point")
- case args.Present():
- chanPointStr = args.First()
- }
-
- if chanPointStr != "" {
- chanPoint, err = parseChanPoint(chanPointStr)
- if err != nil {
- return er.Errorf("unable to parse chan point: %v", err)
- }
- }
-
- req := &lnrpc.PolicyUpdateRequest{
- BaseFeeMsat: baseFee,
- FeeRate: feeRate,
- TimeLockDelta: uint32(timeLockDelta),
- MaxHtlcMsat: ctx.Uint64("max_htlc_msat"),
- }
-
- if ctx.IsSet("min_htlc_msat") {
- req.MinHtlcMsat = ctx.Uint64("min_htlc_msat")
- req.MinHtlcMsatSpecified = true
- }
-
- if chanPoint != nil {
- req.Scope = &lnrpc.PolicyUpdateRequest_ChanPoint{
- ChanPoint: chanPoint,
- }
- } else {
- req.Scope = &lnrpc.PolicyUpdateRequest_Global{
- Global: true,
- }
- }
-
- resp, errr := client.UpdateChannelPolicy(ctxb, req)
- if errr != nil {
- return er.E(errr)
- }
-
- printRespJSON(resp)
- return nil
-}
-
-var forwardingHistoryCommand = cli.Command{
- Name: "fwdinghistory",
- Category: "Payments",
- Usage: "Query the history of all forwarded HTLCs.",
- ArgsUsage: "start_time [end_time] [index_offset] [max_events]",
- Description: `
- Query the HTLC switch's internal forwarding log for all completed
- payment circuits (HTLCs) over a particular time range (--start_time and
- --end_time). The start and end times are meant to be expressed in
- seconds since the Unix epoch.
- Alternatively negative time ranges can be used, e.g. "-3d". Supports
- s(seconds), m(minutes), h(ours), d(ays), w(eeks), M(onths), y(ears).
- Month equals 30.44 days, year equals 365.25 days.
- If --start_time isn't provided, then 24 hours ago is used. If
- --end_time isn't provided, then the current time is used.
-
- The max number of events returned is 50k. The default number is 100,
- callers can use the --max_events param to modify this value.
-
- Finally, callers can skip a series of events using the --index_offset
- parameter. Each response will contain the offset index of the last
- entry. Using this callers can manually paginate within a time slice.
- `,
- Flags: []cli.Flag{
- cli.StringFlag{
- Name: "start_time",
- Usage: "the starting time for the query " +
- `as unix timestamp or relative e.g. "-1w"`,
- },
- cli.StringFlag{
- Name: "end_time",
- Usage: "the end time for the query " +
- `as unix timestamp or relative e.g. "-1w"`,
- },
- cli.Int64Flag{
- Name: "index_offset",
- Usage: "the number of events to skip",
- },
- cli.Int64Flag{
- Name: "max_events",
- Usage: "the max number of events to return",
- },
- },
- Action: actionDecorator(forwardingHistory),
-}
-
-func forwardingHistory(ctx *cli.Context) er.R {
- ctxb := context.Background()
- client, cleanUp := getClient(ctx)
- defer cleanUp()
-
- var (
- startTime, endTime uint64
- indexOffset, maxEvents uint32
- err er.R
- )
- args := ctx.Args()
- now := time.Now()
-
- switch {
- case ctx.IsSet("start_time"):
- startTime, err = parseTime(ctx.String("start_time"), now)
- case args.Present():
- startTime, err = parseTime(args.First(), now)
- args = args.Tail()
- default:
- now := time.Now()
- startTime = uint64(now.Add(-time.Hour * 24).Unix())
- }
- if err != nil {
- return er.Errorf("unable to decode start_time: %v", err)
- }
-
- switch {
- case ctx.IsSet("end_time"):
- endTime, err = parseTime(ctx.String("end_time"), now)
- case args.Present():
- endTime, err = parseTime(args.First(), now)
- args = args.Tail()
- default:
- endTime = uint64(now.Unix())
- }
- if err != nil {
- return er.Errorf("unable to decode end_time: %v", err)
- }
-
- switch {
- case ctx.IsSet("index_offset"):
- indexOffset = uint32(ctx.Int64("index_offset"))
- case args.Present():
- i, err := strconv.ParseInt(args.First(), 10, 64)
- if err != nil {
- return er.Errorf("unable to decode index_offset: %v", err)
- }
- indexOffset = uint32(i)
- args = args.Tail()
- }
-
- switch {
- case ctx.IsSet("max_events"):
- maxEvents = uint32(ctx.Int64("max_events"))
- case args.Present():
- m, err := strconv.ParseInt(args.First(), 10, 64)
- if err != nil {
- return er.Errorf("unable to decode max_events: %v", err)
- }
- maxEvents = uint32(m)
- args = args.Tail()
- }
-
- req := &lnrpc.ForwardingHistoryRequest{
- StartTime: startTime,
- EndTime: endTime,
- IndexOffset: indexOffset,
- NumMaxEvents: maxEvents,
- }
- resp, errr := client.ForwardingHistory(ctxb, req)
- if errr != nil {
- return er.E(errr)
- }
-
- printRespJSON(resp)
- return nil
-}
-
-var exportChanBackupCommand = cli.Command{
- Name: "exportchanbackup",
- Category: "Channels",
- Usage: "Obtain a static channel back up for a selected channels, " +
- "or all known channels",
- ArgsUsage: "[chan_point] [--all] [--output_file]",
- Description: `
- This command allows a user to export a Static Channel Backup (SCB) for
- a selected channel. SCB's are encrypted backups of a channel's initial
- state that are encrypted with a key derived from the seed of a user. In
- the case of partial or complete data loss, the SCB will allow the user
- to reclaim settled funds in the channel at its final state. The
- exported channel backups can be restored at a later time using the
- restorechanbackup command.
-
- This command will return one of two types of channel backups depending
- on the set of passed arguments:
-
- * If a target channel point is specified, then a single channel
- backup containing only the information for that channel will be
- returned.
-
- * If the --all flag is passed, then a multi-channel backup will be
- returned. A multi backup is a single encrypted blob (displayed in
- hex encoding) that contains several channels in a single cipher
- text.
-
- Both of the backup types can be restored using the restorechanbackup
- command.
- `,
- Flags: []cli.Flag{
- cli.StringFlag{
- Name: "chan_point",
- Usage: "the target channel to obtain an SCB for",
- },
- cli.BoolFlag{
- Name: "all",
- Usage: "if specified, then a multi backup of all " +
- "active channels will be returned",
- },
- cli.StringFlag{
- Name: "output_file",
- Usage: `
- if specified, then rather than printing a JSON output
- of the static channel backup, a serialized version of
- the backup (either Single or Multi) will be written to
- the target file, this is the same format used by lnd in
- its channels.backup file `,
- },
- },
- Action: actionDecorator(exportChanBackup),
-}
-
-func exportChanBackup(ctx *cli.Context) er.R {
- ctxb := context.Background()
- client, cleanUp := getClient(ctx)
- defer cleanUp()
-
- // Show command help if no arguments provided
- if ctx.NArg() == 0 && ctx.NumFlags() == 0 {
- cli.ShowCommandHelp(ctx, "exportchanbackup")
- return nil
- }
-
- var (
- err error
- chanPointStr string
- )
- args := ctx.Args()
-
- switch {
- case ctx.IsSet("chan_point"):
- chanPointStr = ctx.String("chan_point")
-
- case args.Present():
- chanPointStr = args.First()
-
- case !ctx.IsSet("all"):
- return er.Errorf("must specify chan_point if --all isn't set")
- }
-
- if chanPointStr != "" {
- chanPointRPC, err := parseChanPoint(chanPointStr)
- if err != nil {
- return err
- }
-
- chanBackup, errr := client.ExportChannelBackup(
- ctxb, &lnrpc.ExportChannelBackupRequest{
- ChanPoint: chanPointRPC,
- },
- )
- if errr != nil {
- return er.E(errr)
- }
-
- txid, err := chainhash.NewHash(
- chanPointRPC.GetFundingTxidBytes(),
- )
- if err != nil {
- return err
- }
-
- chanPoint := wire.OutPoint{
- Hash: *txid,
- Index: chanPointRPC.OutputIndex,
- }
-
- printJSON(struct {
- ChanPoint string `json:"chan_point"`
- ChanBackup []byte `json:"chan_backup"`
- }{
- ChanPoint: chanPoint.String(),
- ChanBackup: chanBackup.ChanBackup,
- })
- return nil
- }
-
- if !ctx.IsSet("all") {
- return er.Errorf("if a channel isn't specified, -all must be")
- }
-
- chanBackup, err := client.ExportAllChannelBackups(
- ctxb, &lnrpc.ChanBackupExportRequest{},
- )
- if err != nil {
- return er.E(err)
- }
-
- if ctx.IsSet("output_file") {
- return er.E(ioutil.WriteFile(
- ctx.String("output_file"),
- chanBackup.MultiChanBackup.MultiChanBackup,
- 0666,
- ))
- }
-
- // TODO(roasbeef): support for export | restore ?
-
- var chanPoints []string
- for _, chanPoint := range chanBackup.MultiChanBackup.ChanPoints {
- txid, err := chainhash.NewHash(chanPoint.GetFundingTxidBytes())
- if err != nil {
- return err
- }
-
- chanPoints = append(chanPoints, wire.OutPoint{
- Hash: *txid,
- Index: chanPoint.OutputIndex,
- }.String())
- }
-
- printRespJSON(chanBackup)
-
- return nil
-}
-
-var verifyChanBackupCommand = cli.Command{
- Name: "verifychanbackup",
- Category: "Channels",
- Usage: "Verify an existing channel backup",
- ArgsUsage: "[--single_backup] [--multi_backup] [--multi_file]",
- Description: `
- This command allows a user to verify an existing Single or Multi channel
- backup for integrity. This is useful when a user has a backup, but is
- unsure as to if it's valid or for the target node.
-
- The command will accept backups in one of three forms:
-
- * A single channel packed SCB, which can be obtained from
- exportchanbackup. This should be passed in hex encoded format.
-
- * A packed multi-channel SCB, which couples several individual
- static channel backups in single blob.
-
- * A file path which points to a packed multi-channel backup within a
- file, using the same format that lnd does in its channels.backup
- file.
- `,
- Flags: []cli.Flag{
- cli.StringFlag{
- Name: "single_backup",
- Usage: "a hex encoded single channel backup obtained " +
- "from exportchanbackup",
- },
- cli.StringFlag{
- Name: "multi_backup",
- Usage: "a hex encoded multi-channel backup obtained " +
- "from exportchanbackup",
- },
- cli.StringFlag{
- Name: "multi_file",
- Usage: "the path to a multi-channel back up file",
- },
- },
- Action: actionDecorator(verifyChanBackup),
-}
-
-func verifyChanBackup(ctx *cli.Context) er.R {
- ctxb := context.Background()
- client, cleanUp := getClient(ctx)
- defer cleanUp()
-
- // Show command help if no arguments provided
- if ctx.NArg() == 0 && ctx.NumFlags() == 0 {
- cli.ShowCommandHelp(ctx, "verifychanbackup")
- return nil
- }
-
- backups, err := parseChanBackups(ctx)
- if err != nil {
- return err
- }
-
- verifyReq := lnrpc.ChanBackupSnapshot{}
-
- if backups.GetChanBackups() != nil {
- verifyReq.SingleChanBackups = backups.GetChanBackups()
- }
- if backups.GetMultiChanBackup() != nil {
- verifyReq.MultiChanBackup = &lnrpc.MultiChanBackup{
- MultiChanBackup: backups.GetMultiChanBackup(),
- }
- }
-
- resp, errr := client.VerifyChanBackup(ctxb, &verifyReq)
- if errr != nil {
- return er.E(errr)
- }
-
- printRespJSON(resp)
- return nil
-}
-
-var restoreChanBackupCommand = cli.Command{
- Name: "restorechanbackup",
- Category: "Channels",
- Usage: "Restore an existing single or multi-channel static channel " +
- "backup",
- ArgsUsage: "[--single_backup] [--multi_backup] [--multi_file=",
- Description: `
- Allows a user to restore a Static Channel Backup (SCB) that was
- obtained either via the exportchanbackup command, or from lnd's
- automatically manged channels.backup file. This command should be used
- if a user is attempting to restore a channel due to data loss on a
- running node restored with the same seed as the node that created the
- channel. If successful, this command will allows the user to recover
- the settled funds stored in the recovered channels.
-
- The command will accept backups in one of three forms:
-
- * A single channel packed SCB, which can be obtained from
- exportchanbackup. This should be passed in hex encoded format.
-
- * A packed multi-channel SCB, which couples several individual
- static channel backups in single blob.
-
- * A file path which points to a packed multi-channel backup within a
- file, using the same format that lnd does in its channels.backup
- file.
- `,
- Flags: []cli.Flag{
- cli.StringFlag{
- Name: "single_backup",
- Usage: "a hex encoded single channel backup obtained " +
- "from exportchanbackup",
- },
- cli.StringFlag{
- Name: "multi_backup",
- Usage: "a hex encoded multi-channel backup obtained " +
- "from exportchanbackup",
- },
- cli.StringFlag{
- Name: "multi_file",
- Usage: "the path to a multi-channel back up file",
- },
- },
- Action: actionDecorator(restoreChanBackup),
-}
-
-// errMissingChanBackup is an error returned when we attempt to parse a channel
-// backup from a CLI command and it is missing.
-var errMissingChanBackup = er.GenericErrorType.CodeWithDetail("errMissingChanBackup",
- "missing channel backup")
-
-func parseChanBackups(ctx *cli.Context) (*lnrpc.RestoreChanBackupRequest, er.R) {
- switch {
- case ctx.IsSet("single_backup"):
- packedBackup, err := util.DecodeHex(
- ctx.String("single_backup"),
- )
- if err != nil {
- return nil, er.Errorf("unable to decode single packed "+
- "backup: %v", err)
- }
-
- return &lnrpc.RestoreChanBackupRequest{
- Backup: &lnrpc.RestoreChanBackupRequest_ChanBackups{
- ChanBackups: &lnrpc.ChannelBackups{
- ChanBackups: []*lnrpc.ChannelBackup{
- {
- ChanBackup: packedBackup,
- },
- },
- },
- },
- }, nil
-
- case ctx.IsSet("multi_backup"):
- packedMulti, err := util.DecodeHex(
- ctx.String("multi_backup"),
- )
- if err != nil {
- return nil, er.Errorf("unable to decode multi packed "+
- "backup: %v", err)
- }
-
- return &lnrpc.RestoreChanBackupRequest{
- Backup: &lnrpc.RestoreChanBackupRequest_MultiChanBackup{
- MultiChanBackup: packedMulti,
- },
- }, nil
-
- case ctx.IsSet("multi_file"):
- packedMulti, err := ioutil.ReadFile(ctx.String("multi_file"))
- if err != nil {
- return nil, er.Errorf("unable to decode multi packed "+
- "backup: %v", err)
- }
-
- return &lnrpc.RestoreChanBackupRequest{
- Backup: &lnrpc.RestoreChanBackupRequest_MultiChanBackup{
- MultiChanBackup: packedMulti,
- },
- }, nil
-
- default:
- return nil, errMissingChanBackup.Default()
- }
-}
-
-func restoreChanBackup(ctx *cli.Context) er.R {
- ctxb := context.Background()
- client, cleanUp := getClient(ctx)
- defer cleanUp()
-
- // Show command help if no arguments provided
- if ctx.NArg() == 0 && ctx.NumFlags() == 0 {
- cli.ShowCommandHelp(ctx, "restorechanbackup")
- return nil
- }
-
- var req lnrpc.RestoreChanBackupRequest
-
- backups, err := parseChanBackups(ctx)
- if err != nil {
- return err
- }
-
- req.Backup = backups.Backup
-
- _, errr := client.RestoreChannelBackups(ctxb, &req)
- if errr != nil {
- return er.Errorf("unable to restore chan backups: %v", errr)
- }
-
- return nil
-}
-
-var resyncCommand = cli.Command{
- Name: "resync",
- Category: "Wallet",
- Usage: "Scan over the chain to find any transactions which may not have been recorded in the wallet's database",
- ArgsUsage: "",
- Description: `Scan over the chain to find any transactions which may not have been recorded in the wallet's database`,
- Flags: []cli.Flag{
- cli.Int64Flag{
- Name: "fromHeight",
- Usage: "Start re-syncing to the chain from specified height, default or -1 will use the height of the chain when the wallet was created",
- },
- cli.Int64Flag{
- Name: "toHeight",
- Usage: "Stop resyncing when this height is reached, default or -1 will use the tip of the chain",
- },
- cli.StringFlag{
- Name: "addresses",
- Usage: "If specified, the wallet will ONLY scan the chain for these addresses, not others. If dropdb is specified then it will scan all addresses including these",
- },
- cli.BoolFlag{
- Name: "dropDB",
- Usage: "Clean most of the data out of the wallet transaction store, this is not a real resync, it just drops the wallet and then lets it begin working again",
- },
- },
- Action: actionDecorator(resync),
-}
-
-func resync(ctx *cli.Context) er.R {
- ctxb := context.Background()
- client, cleanUp := getClient(ctx)
- defer cleanUp()
- fh := int32(-1)
- if ctx.IsSet("fromHeight") {
- fh = int32(ctx.Int64("fromHeight"))
- }
- th := int32(-1)
- if ctx.IsSet("toHeight") {
- th = int32(ctx.Int64("toHeight"))
- }
- var a []string
- if ctx.IsSet("addresses") {
- a = ctx.StringSlice("addresses")
- }
- drop := false
- if ctx.IsSet("dropDB") {
- drop = ctx.Bool("dropDB")
- }
- req := &lnrpc.ReSyncChainRequest{
- FromHeight: fh,
- ToHeight: th,
- Addresses: a,
- DropDb: drop,
- }
-
- resp, err := client.ReSync(ctxb, req)
- if err != nil {
- return er.E(err)
- }
-
- printRespJSON(resp)
- return nil
-}
-
-var stopresyncCommand = cli.Command{
- Name: "stopresync",
- Category: "Wallet",
- Usage: "Stop a re-synchronization job before it's completion",
- ArgsUsage: "",
- Description: `Stop a re-synchronization job before it's completion`,
- Action: actionDecorator(stopresync),
-}
-
-func stopresync(ctx *cli.Context) er.R {
- ctxb := context.Background()
- client, cleanUp := getClient(ctx)
- defer cleanUp()
-
- var req lnrpc.StopReSyncRequest
-
- resp, err := client.StopReSync(ctxb, &req)
- if err != nil {
- return er.E(err)
- }
- printRespJSON(resp)
- return nil
-}
diff --git a/lnd/cmd/lncli/invoicesrpc_active.go b/lnd/cmd/lncli/invoicesrpc_active.go
deleted file mode 100644
index ac51f570..00000000
--- a/lnd/cmd/lncli/invoicesrpc_active.go
+++ /dev/null
@@ -1,260 +0,0 @@
-// +build invoicesrpc
-
-package main
-
-import (
- "context"
- "encoding/hex"
- "fmt"
-
- "strconv"
-
- "github.com/pkt-cash/pktd/lnd/lnrpc/invoicesrpc"
- "github.com/urfave/cli"
-)
-
-// invoicesCommands will return nil for non-invoicesrpc builds.
-func invoicesCommands() []cli.Command {
- return []cli.Command{
- cancelInvoiceCommand,
- addHoldInvoiceCommand,
- settleInvoiceCommand,
- }
-}
-
-func getInvoicesClient(ctx *cli.Context) (invoicesrpc.InvoicesClient, func()) {
- conn := getClientConn(ctx, false)
-
- cleanUp := func() {
- conn.Close()
- }
-
- return invoicesrpc.NewInvoicesClient(conn), cleanUp
-}
-
-var settleInvoiceCommand = cli.Command{
- Name: "settleinvoice",
- Category: "Invoices",
- Usage: "Reveal a preimage and use it to settle the corresponding invoice.",
- Description: `
- Todo.`,
- ArgsUsage: "preimage",
- Flags: []cli.Flag{
- cli.StringFlag{
- Name: "preimage",
- Usage: "the hex-encoded preimage (32 byte) which will " +
- "allow settling an incoming HTLC payable to this " +
- "preimage.",
- },
- },
- Action: actionDecorator(settleInvoice),
-}
-
-func settleInvoice(ctx *cli.Context) er.R {
- var (
- preimage []byte
- err error
- )
-
- client, cleanUp := getInvoicesClient(ctx)
- defer cleanUp()
-
- args := ctx.Args()
-
- switch {
- case ctx.IsSet("preimage"):
- preimage, err = util.DecodeHex(ctx.String("preimage"))
- case args.Present():
- preimage, err = util.DecodeHex(args.First())
- }
-
- if err != nil {
- return er.Errorf("unable to parse preimage: %v", err)
- }
-
- invoice := &invoicesrpc.SettleInvoiceMsg{
- Preimage: preimage,
- }
-
- resp, err := client.SettleInvoice(context.Background(), invoice)
- if err != nil {
- return err
- }
-
- printRespJSON(resp)
-
- return nil
-}
-
-var cancelInvoiceCommand = cli.Command{
- Name: "cancelinvoice",
- Category: "Invoices",
- Usage: "Cancels a (hold) invoice",
- Description: `
- Todo.`,
- ArgsUsage: "paymenthash",
- Flags: []cli.Flag{
- cli.StringFlag{
- Name: "paymenthash",
- Usage: "the hex-encoded payment hash (32 byte) for which the " +
- "corresponding invoice will be canceled.",
- },
- },
- Action: actionDecorator(cancelInvoice),
-}
-
-func cancelInvoice(ctx *cli.Context) er.R {
- var (
- paymentHash []byte
- err error
- )
-
- client, cleanUp := getInvoicesClient(ctx)
- defer cleanUp()
-
- args := ctx.Args()
-
- switch {
- case ctx.IsSet("paymenthash"):
- paymentHash, err = util.DecodeHex(ctx.String("paymenthash"))
- case args.Present():
- paymentHash, err = util.DecodeHex(args.First())
- }
-
- if err != nil {
- return er.Errorf("unable to parse preimage: %v", err)
- }
-
- invoice := &invoicesrpc.CancelInvoiceMsg{
- PaymentHash: paymentHash,
- }
-
- resp, err := client.CancelInvoice(context.Background(), invoice)
- if err != nil {
- return err
- }
-
- printRespJSON(resp)
-
- return nil
-}
-
-var addHoldInvoiceCommand = cli.Command{
- Name: "addholdinvoice",
- Category: "Invoices",
- Usage: "Add a new hold invoice.",
- Description: `
- Add a new invoice, expressing intent for a future payment.
-
- Invoices without an amount can be created by not supplying any
- parameters or providing an amount of 0. These invoices allow the payee
- to specify the amount of satoshis they wish to send.`,
- ArgsUsage: "hash [amt]",
- Flags: []cli.Flag{
- cli.StringFlag{
- Name: "memo",
- Usage: "a description of the payment to attach along " +
- "with the invoice (default=\"\")",
- },
- cli.Int64Flag{
- Name: "amt",
- Usage: "the amt of satoshis in this invoice",
- },
- cli.Int64Flag{
- Name: "amt_msat",
- Usage: "the amt of millisatoshis in this invoice",
- },
- cli.StringFlag{
- Name: "description_hash",
- Usage: "SHA-256 hash of the description of the payment. " +
- "Used if the purpose of payment cannot naturally " +
- "fit within the memo. If provided this will be " +
- "used instead of the description(memo) field in " +
- "the encoded invoice.",
- },
- cli.StringFlag{
- Name: "fallback_addr",
- Usage: "fallback on-chain address that can be used in " +
- "case the lightning payment fails",
- },
- cli.Int64Flag{
- Name: "expiry",
- Usage: "the invoice's expiry time in seconds. If not " +
- "specified, an expiry of 3600 seconds (1 hour) " +
- "is implied.",
- },
- cli.BoolTFlag{
- Name: "private",
- Usage: "encode routing hints in the invoice with " +
- "private channels in order to assist the " +
- "payer in reaching you",
- },
- },
- Action: actionDecorator(addHoldInvoice),
-}
-
-func addHoldInvoice(ctx *cli.Context) er.R {
- var (
- descHash []byte
- err error
- )
-
- client, cleanUp := getInvoicesClient(ctx)
- defer cleanUp()
-
- args := ctx.Args()
- if ctx.NArg() == 0 {
- cli.ShowCommandHelp(ctx, "addholdinvoice")
- return nil
- }
-
- hash, err := util.DecodeHex(args.First())
- if err != nil {
- return er.Errorf("unable to parse hash: %v", err)
- }
-
- args = args.Tail()
-
- amt := ctx.Int64("amt")
- amtMsat := ctx.Int64("amt_msat")
-
- if !ctx.IsSet("amt") && !ctx.IsSet("amt_msat") && args.Present() {
- amt, err = strconv.ParseInt(args.First(), 10, 64)
- if err != nil {
- return er.Errorf("unable to decode amt argument: %v", err)
- }
- }
-
- if err != nil {
- return er.Errorf("unable to parse preimage: %v", err)
- }
-
- descHash, err = util.DecodeHex(ctx.String("description_hash"))
- if err != nil {
- return er.Errorf("unable to parse description_hash: %v", err)
- }
-
- invoice := &invoicesrpc.AddHoldInvoiceRequest{
- Memo: ctx.String("memo"),
- Hash: hash,
- Value: amt,
- ValueMsat: amtMsat,
- DescriptionHash: descHash,
- FallbackAddr: ctx.String("fallback_addr"),
- Expiry: ctx.Int64("expiry"),
- Private: ctx.Bool("private"),
- }
-
- resp, err := client.AddHoldInvoice(context.Background(), invoice)
- if err != nil {
- return err
- }
-
- printJSON(struct {
- PayReq string `json:"pay_req"`
- }{
- PayReq: resp.PaymentRequest,
- })
-
- return nil
-}
diff --git a/lnd/cmd/lncli/invoicesrpc_default.go b/lnd/cmd/lncli/invoicesrpc_default.go
deleted file mode 100644
index 570dfa69..00000000
--- a/lnd/cmd/lncli/invoicesrpc_default.go
+++ /dev/null
@@ -1,10 +0,0 @@
-// +build !invoicesrpc
-
-package main
-
-import "github.com/urfave/cli"
-
-// invoicesCommands will return nil for non-invoicesrpc builds.
-func invoicesCommands() []cli.Command {
- return nil
-}
diff --git a/lnd/cmd/lncli/macaroon_jar.go b/lnd/cmd/lncli/macaroon_jar.go
deleted file mode 100644
index 2f04da25..00000000
--- a/lnd/cmd/lncli/macaroon_jar.go
+++ /dev/null
@@ -1,164 +0,0 @@
-package main
-
-import (
- "encoding/base64"
- "encoding/hex"
- "fmt"
- "strings"
-
- "github.com/pkt-cash/pktd/btcutil/er"
- "github.com/pkt-cash/pktd/btcutil/util"
- "github.com/pkt-cash/pktd/pktwallet/snacl"
- "gopkg.in/macaroon.v2"
-)
-
-const (
- encryptionPrefix = "snacl:"
-)
-
-// getPasswordFn is a function that asks the user to type a password after
-// presenting it the given prompt.
-type getPasswordFn func(prompt string) ([]byte, er.R)
-
-// macaroonJar is a struct that represents all macaroons of a profile.
-type macaroonJar struct {
- Default string `json:"default,omitempty"`
- Timeout int64 `json:"timeout,omitempty"`
- IP string `json:"ip,omitempty"`
- Jar []*macaroonEntry `json:"jar"`
-}
-
-// macaroonEntry is a struct that represents a single macaroon. Its content can
-// either be cleartext (hex encoded) or encrypted (snacl secretbox).
-type macaroonEntry struct {
- Name string `json:"name"`
- Data string `json:"data"`
-}
-
-// loadMacaroon returns the fully usable macaroon instance from the entry. This
-// detects whether the macaroon needs to be decrypted and does so if necessary.
-// An encrypted macaroon that needs to be decrypted will prompt for the user's
-// password by calling the provided password callback. Normally that should
-// result in the user being prompted for the password in the terminal.
-func (e *macaroonEntry) loadMacaroon(
- pwCallback getPasswordFn) (*macaroon.Macaroon, er.R) {
-
- if len(strings.TrimSpace(e.Data)) == 0 {
- return nil, er.Errorf("macaroon data is empty")
- }
-
- var (
- macBytes []byte
- err er.R
- )
-
- // Either decrypt or simply decode the macaroon data.
- if strings.HasPrefix(e.Data, encryptionPrefix) {
- parts := strings.Split(e.Data, ":")
- if len(parts) != 3 {
- return nil, er.Errorf("invalid encrypted macaroon " +
- "format, expected 'snacl::" +
- "'")
- }
-
- pw, err := pwCallback("Enter macaroon encryption password: ")
- if err != nil {
- return nil, er.Errorf("could not read password from "+
- "terminal: %v", err)
- }
-
- macBytes, err = decryptMacaroon(parts[1], parts[2], pw)
- if err != nil {
- return nil, er.Errorf("unable to decrypt macaroon: %v",
- err)
- }
- } else {
- macBytes, err = util.DecodeHex(e.Data)
- if err != nil {
- return nil, er.Errorf("unable to hex decode "+
- "macaroon: %v", err)
- }
- }
-
- // Parse the macaroon data into its native struct.
- mac := &macaroon.Macaroon{}
- if err := mac.UnmarshalBinary(macBytes); err != nil {
- return nil, er.Errorf("unable to decode macaroon: %v", err)
- }
- return mac, nil
-}
-
-// storeMacaroon stores a native macaroon instance to the entry. If a non-nil
-// password is provided, then the macaroon is encrypted with that password. If
-// not, the macaroon is stored as plain text.
-func (e *macaroonEntry) storeMacaroon(mac *macaroon.Macaroon, pw []byte) er.R {
- // First of all, make sure we can serialize the macaroon.
- macBytes, errr := mac.MarshalBinary()
- if errr != nil {
- return er.Errorf("unable to marshal macaroon: %v", errr)
- }
-
- if len(pw) == 0 {
- e.Data = hex.EncodeToString(macBytes)
- return nil
- }
-
- // The user did set a password. Let's derive an encryption key from it.
- key, err := snacl.NewSecretKey(
- &pw, snacl.DefaultN, snacl.DefaultR, snacl.DefaultP,
- )
- if err != nil {
- return er.Errorf("unable to create encryption key: %v", err)
- }
-
- // Encrypt the macaroon data with the derived key and store it in the
- // human readable format snacl::.
- encryptedMac, err := key.Encrypt(macBytes)
- if err != nil {
- return er.Errorf("unable to encrypt macaroon: %v", err)
- }
-
- keyB64 := base64.StdEncoding.EncodeToString(key.Marshal())
- dataB64 := base64.StdEncoding.EncodeToString(encryptedMac)
- e.Data = fmt.Sprintf("%s%s:%s", encryptionPrefix, keyB64, dataB64)
-
- return nil
-}
-
-// decryptMacaroon decrypts the cipher text macaroon by using the serialized
-// encryption key and the password.
-func decryptMacaroon(keyB64, dataB64 string, pw []byte) ([]byte, er.R) {
- // Base64 decode both the marshalled encryption key and macaroon data.
- keyData, errr := base64.StdEncoding.DecodeString(keyB64)
- if errr != nil {
- return nil, er.Errorf("could not base64 decode encryption "+
- "key: %v", errr)
- }
- encryptedMac, errr := base64.StdEncoding.DecodeString(dataB64)
- if errr != nil {
- return nil, er.Errorf("could not base64 decode macaroon "+
- "data: %v", errr)
- }
-
- // Unmarshal the encryption key and ask the user for the password.
- key := &snacl.SecretKey{}
- err := key.Unmarshal(keyData)
- if err != nil {
- return nil, er.Errorf("could not unmarshal encryption key: %v",
- err)
- }
-
- // Derive the final encryption key and then decrypt the macaroon with
- // it.
- err = key.DeriveKey(&pw)
- if err != nil {
- return nil, er.Errorf("could not derive encryption key, "+
- "possibly due to incorrect password: %v", err)
- }
- macBytes, err := key.Decrypt(encryptedMac)
- if err != nil {
- return nil, er.Errorf("could not decrypt macaroon data: %v",
- err)
- }
- return macBytes, nil
-}
diff --git a/lnd/cmd/lncli/macaroon_jar_test.go b/lnd/cmd/lncli/macaroon_jar_test.go
deleted file mode 100644
index 0bc840d5..00000000
--- a/lnd/cmd/lncli/macaroon_jar_test.go
+++ /dev/null
@@ -1,103 +0,0 @@
-package main
-
-import (
- "testing"
-
- "github.com/pkt-cash/pktd/btcutil/er"
- "github.com/pkt-cash/pktd/btcutil/util"
- "github.com/stretchr/testify/require"
- "gopkg.in/macaroon.v2"
-)
-
-var (
- dummyMacStr = "0201047465737402067788991234560000062052d26ed139ea5af8" +
- "3e675500c4ccb2471f62191b745bab820f129e5588a255d2"
- dummyMac, _ = util.DecodeHex(dummyMacStr)
- encryptedEntry = &macaroonEntry{
- Name: "encryptedMac",
- Data: "snacl:exX8xbUOb6Gih88ybL2jZGo+DBDPU2tYKkvo0eVVmbDGDoFP" +
- "zlv5xvqNK5eml0LKLcB8LdZRw43qXK1W2OLs/gBAAAAAAAAACAAA" +
- "AAAAAAABAAAAAAAAAA==:C8TN/aDOvSLiBCX+IdoPTx+UUWhVdGj" +
- "NQvbcaWp+KXQWqPfpRZpjJQ6B2PDx5mJxImcezJGPx8ShAqMdxWe" +
- "l2precU+1cOjk7HQFkYuu943eJ00s6JerAY+ssg==",
- }
- plaintextEntry = &macaroonEntry{
- Name: "plaintextMac",
- Data: dummyMacStr,
- }
-
- testPassword = []byte("S3curePazzw0rd")
- pwCallback = func(string) ([]byte, er.R) {
- return testPassword, nil
- }
- noPwCallback = func(string) ([]byte, er.R) {
- return nil, nil
- }
-)
-
-// TestMacaroonJarEncrypted tests that a macaroon can be stored and retrieved
-// safely by encrypting/decrypting it with a password.
-func TestMacaroonJarEncrypted(t *testing.T) {
- // Create a new macaroon entry from the dummy macaroon and encrypt it
- // with the test password.
- newEntry := &macaroonEntry{
- Name: "encryptedMac",
- }
- err := newEntry.storeMacaroon(toMacaroon(t, dummyMac), testPassword)
- util.RequireNoErr(t, err)
-
- // Now decrypt it again and make sure we get the same content back.
- mac, err := newEntry.loadMacaroon(pwCallback)
- util.RequireNoErr(t, err)
- macBytes, errr := mac.MarshalBinary()
- util.RequireNoErr(t, er.E(errr))
- require.Equal(t, dummyMac, macBytes)
-
- // The encrypted data of the entry we just created shouldn't be the
- // same as our test entry because of the salt snacl uses.
- require.NotEqual(t, encryptedEntry.Data, newEntry.Data)
-
- // Decrypt the hard coded test entry and make sure the decrypted content
- // matches our created entry.
- mac, err = encryptedEntry.loadMacaroon(pwCallback)
- util.RequireNoErr(t, err)
- macBytes, errr = mac.MarshalBinary()
- util.RequireNoErr(t, er.E(errr))
- require.Equal(t, dummyMac, macBytes)
-}
-
-// TestMacaroonJarPlaintext tests that a macaroon can be stored and retrieved
-// as plaintext as well.
-func TestMacaroonJarPlaintext(t *testing.T) {
- // Create a new macaroon entry from the dummy macaroon and encrypt it
- // with the test password.
- newEntry := &macaroonEntry{
- Name: "plaintextMac",
- }
- err := newEntry.storeMacaroon(toMacaroon(t, dummyMac), nil)
- util.RequireNoErr(t, err)
-
- // Now decrypt it again and make sure we get the same content back.
- mac, err := newEntry.loadMacaroon(noPwCallback)
- util.RequireNoErr(t, err)
- macBytes, errr := mac.MarshalBinary()
- util.RequireNoErr(t, er.E(errr))
- require.Equal(t, dummyMac, macBytes)
- require.Equal(t, plaintextEntry.Data, newEntry.Data)
-
- // Load the hard coded plaintext test entry and make sure the loaded
- // content matches our created entry.
- mac, err = plaintextEntry.loadMacaroon(noPwCallback)
- util.RequireNoErr(t, err)
- macBytes, errr = mac.MarshalBinary()
- util.RequireNoErr(t, er.E(errr))
- require.Equal(t, dummyMac, macBytes)
-}
-
-func toMacaroon(t *testing.T, macData []byte) *macaroon.Macaroon {
- mac := &macaroon.Macaroon{}
- errr := mac.UnmarshalBinary(macData)
- util.RequireNoErr(t, er.E(errr))
-
- return mac
-}
diff --git a/lnd/cmd/lncli/main.go b/lnd/cmd/lncli/main.go
deleted file mode 100644
index 62b783e0..00000000
--- a/lnd/cmd/lncli/main.go
+++ /dev/null
@@ -1,402 +0,0 @@
-// Copyright (c) 2013-2017 The btcsuite developers
-// Copyright (c) 2015-2016 The Decred developers
-// Copyright (C) 2015-2017 The Lightning Network Developers
-
-package main
-
-import (
- "crypto/tls"
- "fmt"
- "os"
- "path/filepath"
- "strings"
- "syscall"
-
- "github.com/pkt-cash/pktd/btcutil"
- "github.com/pkt-cash/pktd/btcutil/er"
- "github.com/pkt-cash/pktd/lnd/lncfg"
- "github.com/pkt-cash/pktd/lnd/lnrpc"
- "github.com/pkt-cash/pktd/lnd/macaroons"
- "github.com/pkt-cash/pktd/pktconfig/version"
- "github.com/urfave/cli"
-
- "golang.org/x/crypto/ssh/terminal"
- "google.golang.org/grpc"
- "google.golang.org/grpc/credentials"
-)
-
-const (
- defaultDataDir = "data"
- defaultChainSubDir = "chain"
- defaultTLSCertFilename = "tls.cert"
- defaultMacaroonFilename = "admin.macaroon"
- defaultRPCPort = "10009"
- defaultRPCHostPort = "localhost:" + defaultRPCPort
-)
-
-var (
- defaultLndDir = btcutil.AppDataDir("lnd", false)
- defaultTLSCertPath = filepath.Join(defaultLndDir, defaultTLSCertFilename)
-
- // maxMsgRecvSize is the largest message our client will receive. We
- // set this to 200MiB atm.
- maxMsgRecvSize = grpc.MaxCallRecvMsgSize(1 * 1024 * 1024 * 200)
-)
-
-func fatal(err er.R) {
- fmt.Fprintf(os.Stderr, "[lncli] %v\n", err)
- os.Exit(1)
-}
-
-func getWalletUnlockerClient(ctx *cli.Context) (lnrpc.WalletUnlockerClient, func()) {
- conn := getClientConn(ctx, true)
-
- cleanUp := func() {
- conn.Close()
- }
-
- return lnrpc.NewWalletUnlockerClient(conn), cleanUp
-}
-
-func getMetaServiceClient(ctx *cli.Context) (lnrpc.MetaServiceClient, func()) {
- conn := getClientConn(ctx, true)
-
- cleanUp := func() {
- conn.Close()
- }
-
- return lnrpc.NewMetaServiceClient(conn), cleanUp
-}
-
-
-func getClient(ctx *cli.Context) (lnrpc.LightningClient, func()) {
- conn := getClientConn(ctx, false)
-
- cleanUp := func() {
- conn.Close()
- }
-
- return lnrpc.NewLightningClient(conn), cleanUp
-}
-
-func getClientConn(ctx *cli.Context, skipMacaroons bool) *grpc.ClientConn {
- // First, we'll get the selected stored profile or an ephemeral one
- // created from the global options in the CLI context.
- profile, err := getGlobalOptions(ctx, skipMacaroons)
- if err != nil {
- fatal(er.Errorf("could not load global options: %v", err))
- }
-
- // Load the specified TLS certificate.
- certPool, err := profile.cert()
- if err != nil {
- fatal(er.Errorf("could not create cert pool: %v", err))
- }
-
- var opts []grpc.DialOption
- if ctx.GlobalBool("notls") {
- opts = append(opts, grpc.WithInsecure())
- } else {
- // Build transport credentials from the certificate pool. If there is no
- // certificate pool, we expect the server to use a non-self-signed
- // certificate such as a certificate obtained from Let's Encrypt.
- var creds credentials.TransportCredentials
- if certPool != nil {
- creds = credentials.NewClientTLSFromCert(certPool, "")
- } else {
- // Fallback to the system pool. Using an empty tls config is an
- // alternative to x509.SystemCertPool(). That call is not
- // supported on Windows.
- creds = credentials.NewTLS(&tls.Config{})
- }
-
- // Create a dial options array.
- opts = append(opts, grpc.WithTransportCredentials(creds))
- }
-
- // Only process macaroon credentials if --no-macaroons isn't set and
- // if we're not skipping macaroon processing.
- if !profile.NoMacaroons && !skipMacaroons {
- // Find out which macaroon to load.
- macName := profile.Macaroons.Default
- if ctx.GlobalIsSet("macfromjar") {
- macName = ctx.GlobalString("macfromjar")
- }
- var macEntry *macaroonEntry
- for _, entry := range profile.Macaroons.Jar {
- if entry.Name == macName {
- macEntry = entry
- break
- }
- }
- if macEntry == nil {
- fatal(er.Errorf("macaroon with name '%s' not found "+
- "in profile", macName))
- }
-
- // Get and possibly decrypt the specified macaroon.
- //
- // TODO(guggero): Make it possible to cache the password so we
- // don't need to ask for it every time.
- mac, err := macEntry.loadMacaroon(readPassword)
- if err != nil {
- fatal(er.Errorf("could not load macaroon: %v", err))
- }
-
- macConstraints := []macaroons.Constraint{
- // We add a time-based constraint to prevent replay of the
- // macaroon. It's good for 60 seconds by default to make up for
- // any discrepancy between client and server clocks, but leaking
- // the macaroon before it becomes invalid makes it possible for
- // an attacker to reuse the macaroon. In addition, the validity
- // time of the macaroon is extended by the time the server clock
- // is behind the client clock, or shortened by the time the
- // server clock is ahead of the client clock (or invalid
- // altogether if, in the latter case, this time is more than 60
- // seconds).
- // TODO(aakselrod): add better anti-replay protection.
- macaroons.TimeoutConstraint(profile.Macaroons.Timeout),
-
- // Lock macaroon down to a specific IP address.
- macaroons.IPLockConstraint(profile.Macaroons.IP),
-
- // ... Add more constraints if needed.
- }
-
- // Apply constraints to the macaroon.
- constrainedMac, err := macaroons.AddConstraints(
- mac, macConstraints...,
- )
- if err != nil {
- fatal(err)
- }
-
- // Now we append the macaroon credentials to the dial options.
- cred := macaroons.NewMacaroonCredential(constrainedMac)
- opts = append(opts, grpc.WithPerRPCCredentials(cred))
- }
-
- // We need to use a custom dialer so we can also connect to unix sockets
- // and not just TCP addresses.
- genericDialer := lncfg.ClientAddressDialer(defaultRPCPort)
- opts = append(opts, grpc.WithContextDialer(genericDialer))
- opts = append(opts, grpc.WithDefaultCallOptions(maxMsgRecvSize))
-
- conn, errr := grpc.Dial(profile.RPCServer, opts...)
- if errr != nil {
- fatal(er.Errorf("unable to connect to RPC server: %v", errr))
- }
-
- return conn
-}
-
-// extractPathArgs parses the TLS certificate and macaroon paths from the
-// command.
-func extractPathArgs(ctx *cli.Context) (string, string, er.R) {
- // We'll start off by parsing the active chain and network. These are
- // needed to determine the correct path to the macaroon when not
- // specified.
- chain := strings.ToLower(ctx.GlobalString("chain"))
- switch chain {
- case "bitcoin", "litecoin", "pkt":
- default:
- return "", "", er.Errorf("unknown chain: %v", chain)
- }
-
- network := strings.ToLower(ctx.GlobalString("network"))
- switch network {
- case "mainnet", "testnet", "regtest", "simnet":
- default:
- return "", "", er.Errorf("unknown network: %v", network)
- }
-
- // We'll now fetch the lnddir so we can make a decision on how to
- // properly read the macaroons (if needed) and also the cert. This will
- // either be the default, or will have been overwritten by the end
- // user.
- lndDir := lncfg.CleanAndExpandPath(ctx.GlobalString("lnddir"))
-
- // If the macaroon path as been manually provided, then we'll only
- // target the specified file.
- var macPath string
- if ctx.GlobalString("macaroonpath") != "" {
- macPath = lncfg.CleanAndExpandPath(ctx.GlobalString("macaroonpath"))
- } else {
- // Otherwise, we'll go into the path:
- // lnddir/data/chain// in order to fetch the
- // macaroon that we need.
- macPath = filepath.Join(
- lndDir, defaultDataDir, defaultChainSubDir, chain,
- network, defaultMacaroonFilename,
- )
- }
-
- tlsCertPath := lncfg.CleanAndExpandPath(ctx.GlobalString("tlscertpath"))
-
- // If a custom lnd directory was set, we'll also check if custom paths
- // for the TLS cert and macaroon file were set as well. If not, we'll
- // override their paths so they can be found within the custom lnd
- // directory set. This allows us to set a custom lnd directory, along
- // with custom paths to the TLS cert and macaroon file.
- if lndDir != defaultLndDir {
- tlsCertPath = filepath.Join(lndDir, defaultTLSCertFilename)
- }
-
- return tlsCertPath, macPath, nil
-}
-
-func main() {
- app := cli.NewApp()
- app.Name = "lncli"
- app.Version = version.Version()
- app.Usage = "control plane for your Lightning Network Daemon (lnd)"
- app.Flags = []cli.Flag{
- cli.StringFlag{
- Name: "rpcserver",
- Value: defaultRPCHostPort,
- Usage: "The host:port of LN daemon.",
- },
- cli.StringFlag{
- Name: "lnddir",
- Value: defaultLndDir,
- Usage: "The path to lnd's base directory.",
- },
- cli.BoolFlag{
- Name: "notls",
- Usage: "Disable TLS, needed if --notls is passed to pld.",
- },
- cli.StringFlag{
- Name: "tlscertpath",
- Value: defaultTLSCertPath,
- Usage: "The path to lnd's TLS certificate.",
- },
- cli.StringFlag{
- Name: "chain, c",
- Usage: "The chain lnd is running on, e.g. pkt.",
- Value: "pkt",
- },
- cli.StringFlag{
- Name: "network, n",
- Usage: "The network lnd is running on, e.g. mainnet, " +
- "testnet, etc.",
- Value: "mainnet",
- },
- cli.BoolFlag{
- Name: "no-macaroons",
- Usage: "Disable macaroon authentication.",
- },
- cli.StringFlag{
- Name: "macaroonpath",
- Usage: "The path to macaroon file.",
- },
- cli.Int64Flag{
- Name: "macaroontimeout",
- Value: 60,
- Usage: "Anti-replay macaroon validity time in seconds.",
- },
- cli.StringFlag{
- Name: "macaroonip",
- Usage: "If set, lock macaroon to specific IP address.",
- },
- cli.StringFlag{
- Name: "profile, p",
- Usage: "Instead of reading settings from command " +
- "line parameters or using the default " +
- "profile, use a specific profile. If " +
- "a default profile is set, this flag can be " +
- "set to an empty string to disable reading " +
- "values from the profiles file.",
- },
- cli.StringFlag{
- Name: "macfromjar",
- Usage: "Use this macaroon from the profile's " +
- "macaroon jar instead of the default one. " +
- "Can only be used if profiles are defined.",
- },
- }
- app.Commands = []cli.Command{
- createCommand,
- unlockCommand,
- changePasswordCommand,
- newAddressCommand,
- estimateFeeCommand,
- sendManyCommand,
- sendCoinsCommand,
- listUnspentCommand,
- connectCommand,
- disconnectCommand,
- openChannelCommand,
- closeChannelCommand,
- closeAllChannelsCommand,
- abandonChannelCommand,
- listPeersCommand,
- walletBalanceCommand,
- getAddressBalancesCommand,
- channelBalanceCommand,
- getInfoCommand,
- getRecoveryInfoCommand,
- pendingChannelsCommand,
- sendPaymentCommand,
- payInvoiceCommand,
- sendToRouteCommand,
- addInvoiceCommand,
- lookupInvoiceCommand,
- listInvoicesCommand,
- listChannelsCommand,
- closedChannelsCommand,
- listPaymentsCommand,
- describeGraphCommand,
- getNodeMetricsCommand,
- getChanInfoCommand,
- getNodeInfoCommand,
- queryRoutesCommand,
- getNetworkInfoCommand,
- debugLevelCommand,
- decodePayReqCommand,
- listChainTxnsCommand,
- stopCommand,
- signMessageCommand,
- verifyMessageCommand,
- feeReportCommand,
- updateChannelPolicyCommand,
- forwardingHistoryCommand,
- exportChanBackupCommand,
- verifyChanBackupCommand,
- restoreChanBackupCommand,
- bakeMacaroonCommand,
- listMacaroonIDsCommand,
- deleteMacaroonIDCommand,
- listPermissionsCommand,
- printMacaroonCommand,
- trackPaymentCommand,
- versionCommand,
- profileSubCommand,
- resyncCommand,
- stopresyncCommand,
- }
-
- // Add any extra commands determined by build flags.
- app.Commands = append(app.Commands, autopilotCommands()...)
- app.Commands = append(app.Commands, invoicesCommands()...)
- app.Commands = append(app.Commands, routerCommands()...)
- app.Commands = append(app.Commands, walletCommands()...)
- app.Commands = append(app.Commands, watchtowerCommands()...)
- app.Commands = append(app.Commands, wtclientCommands()...)
-
- if err := app.Run(os.Args); err != nil {
- fatal(er.E(err))
- }
-}
-
-// readPassword reads a password from the terminal. This requires there to be an
-// actual TTY so passing in a password from stdin won't work.
-func readPassword(text string) ([]byte, er.R) {
- fmt.Print(text)
-
- // The variable syscall.Stdin is of a different type in the Windows API
- // that's why we need the explicit cast. And of course the linter
- // doesn't like it either.
- pw, err := terminal.ReadPassword(int(syscall.Stdin)) // nolint:unconvert
- fmt.Println()
- return pw, er.E(err)
-}
diff --git a/lnd/cmd/lncli/profile.go b/lnd/cmd/lncli/profile.go
deleted file mode 100644
index 2846bff9..00000000
--- a/lnd/cmd/lncli/profile.go
+++ /dev/null
@@ -1,258 +0,0 @@
-package main
-
-import (
- "bytes"
- "crypto/x509"
- "encoding/json"
- "io/ioutil"
- "path"
- "strings"
-
- "github.com/pkt-cash/pktd/btcutil/er"
- "github.com/pkt-cash/pktd/lnd/lncfg"
- "github.com/pkt-cash/pktd/lnd/lnrpc"
- "github.com/pkt-cash/pktd/lnd/walletunlocker"
- "github.com/urfave/cli"
- "gopkg.in/macaroon.v2"
-)
-
-var (
- errNoProfileFile = er.GenericErrorType.CodeWithDetail("errNoProfileFile",
- "no profile file found")
-)
-
-// profileEntry is a struct that represents all settings for one specific
-// profile.
-type profileEntry struct {
- Name string `json:"name"`
- RPCServer string `json:"rpcserver"`
- LndDir string `json:"lnddir"`
- Chain string `json:"chain"`
- Network string `json:"network"`
- NoMacaroons bool `json:"no-macaroons,omitempty"`
- TLSCert string `json:"tlscert"`
- Macaroons *macaroonJar `json:"macaroons"`
-}
-
-// cert returns the profile's TLS certificate as a x509 certificate pool.
-func (e *profileEntry) cert() (*x509.CertPool, er.R) {
- if e.TLSCert == "" {
- return nil, nil
- }
-
- cp := x509.NewCertPool()
- if !cp.AppendCertsFromPEM([]byte(e.TLSCert)) {
- return nil, er.Errorf("credentials: failed to append " +
- "certificate")
- }
- return cp, nil
-}
-
-// getGlobalOptions returns the global connection options. If a profile file
-// exists, these global options might be read from a predefined profile. If no
-// profile exists, the global options from the command line are returned as an
-// ephemeral profile entry.
-func getGlobalOptions(ctx *cli.Context, skipMacaroons bool) (*profileEntry, er.R) {
-
- var profileName string
-
- // Try to load the default profile file and depending on its existence
- // what profile to use.
- f, err := loadProfileFile(defaultProfileFile)
- switch {
- // The legacy case where no profile file exists and the user also didn't
- // request to use one. We only consider the global options here.
- case errNoProfileFile.Is(err) && !ctx.GlobalIsSet("profile"):
- return profileFromContext(ctx, false, skipMacaroons)
-
- // The file doesn't exist but the user specified an explicit profile.
- case errNoProfileFile.Is(err) && ctx.GlobalIsSet("profile"):
- return nil, er.Errorf("profile file %s does not exist",
- defaultProfileFile)
-
- // There is a file but we couldn't read/parse it.
- case err != nil:
- return nil, er.Errorf("could not read profile file %s: "+
- "%v", defaultProfileFile, err)
-
- // The user explicitly disabled the use of profiles for this command by
- // setting the flag to an empty string. We fall back to the default/old
- // behavior.
- case ctx.GlobalIsSet("profile") && ctx.GlobalString("profile") == "":
- return profileFromContext(ctx, false, skipMacaroons)
-
- // There is a file, but no default profile is specified. The user also
- // didn't specify a profile to use so we fall back to the default/old
- // behavior.
- case !ctx.GlobalIsSet("profile") && len(f.Default) == 0:
- return profileFromContext(ctx, false, skipMacaroons)
-
- // The user didn't specify a profile but there is a default one defined.
- case !ctx.GlobalIsSet("profile") && len(f.Default) > 0:
- profileName = f.Default
-
- // The user specified a specific profile to use.
- case ctx.GlobalIsSet("profile"):
- profileName = ctx.GlobalString("profile")
- }
-
- // If we got to here, we do have a profile file and know the name of the
- // profile to use. Now we just need to make sure it does exist.
- for _, prof := range f.Profiles {
- if prof.Name == profileName {
- return prof, nil
- }
- }
-
- return nil, er.Errorf("profile '%s' not found in file %s", profileName,
- defaultProfileFile)
-}
-
-// profileFromContext creates an ephemeral profile entry from the global options
-// set in the CLI context.
-func profileFromContext(ctx *cli.Context, store, skipMacaroons bool) (
- *profileEntry, er.R) {
-
- // Parse the paths of the cert and macaroon. This will validate the
- // chain and network value as well.
- tlsCertPath, macPath, err := extractPathArgs(ctx)
- if err != nil {
- return nil, err
- }
-
- // Load the certificate file now, if specified. We store it as plain PEM
- // directly.
- var tlsCert []byte
- if lnrpc.FileExists(tlsCertPath) {
- var err error
- tlsCert, err = ioutil.ReadFile(tlsCertPath)
- if err != nil {
- return nil, er.Errorf("could not load TLS cert file "+
- "%s: %v", tlsCertPath, err)
- }
- }
-
- entry := &profileEntry{
- RPCServer: ctx.GlobalString("rpcserver"),
- LndDir: lncfg.CleanAndExpandPath(ctx.GlobalString("lnddir")),
- Chain: ctx.GlobalString("chain"),
- Network: ctx.GlobalString("network"),
- NoMacaroons: ctx.GlobalBool("no-macaroons"),
- TLSCert: string(tlsCert),
- }
-
- // If we aren't using macaroons in general (flag --no-macaroons) or
- // don't need macaroons for this command (wallet unlocker), we can now
- // return already.
- if skipMacaroons || ctx.GlobalBool("no-macaroons") {
- return entry, nil
- }
-
- // Now load and possibly encrypt the macaroon file.
- macBytes, errr := ioutil.ReadFile(macPath)
- if errr != nil {
- return nil, er.Errorf("unable to read macaroon path (check "+
- "the network setting!): %v", errr)
- }
- mac := &macaroon.Macaroon{}
- if errr = mac.UnmarshalBinary(macBytes); errr != nil {
- return nil, er.Errorf("unable to decode macaroon: %v", errr)
- }
-
- var pw []byte
- if store {
- // Read a password from the terminal. If it's empty, we won't
- // encrypt the macaroon and store it plaintext.
- pw, err = capturePassword(
- "Enter password to encrypt macaroon with or leave "+
- "blank to store in plaintext: ", true,
- walletunlocker.ValidatePassword,
- )
- if err != nil {
- return nil, er.Errorf("unable to get encryption "+
- "password: %v", err)
- }
- }
- macEntry := &macaroonEntry{}
- if err = macEntry.storeMacaroon(mac, pw); err != nil {
- return nil, er.Errorf("unable to store macaroon: %v", err)
- }
-
- // We determine the name of the macaroon from the file itself but cut
- // off the ".macaroon" at the end.
- macEntry.Name = path.Base(macPath)
- if path.Ext(macEntry.Name) == "macaroon" {
- macEntry.Name = strings.TrimSuffix(macEntry.Name, ".macaroon")
- }
-
- // Now that we have the macaroon jar as well, let's return the entry
- // with all the values populated.
- entry.Macaroons = &macaroonJar{
- Default: macEntry.Name,
- Timeout: ctx.GlobalInt64("macaroontimeout"),
- IP: ctx.GlobalString("macaroonip"),
- Jar: []*macaroonEntry{macEntry},
- }
-
- return entry, nil
-}
-
-// loadProfileFile tries to load the file specified and JSON deserialize it into
-// the profile file struct.
-func loadProfileFile(file string) (*profileFile, er.R) {
- if !lnrpc.FileExists(file) {
- return nil, errNoProfileFile.Default()
- }
-
- content, errr := ioutil.ReadFile(file)
- if errr != nil {
- return nil, er.Errorf("could not load profile file %s: %v",
- file, errr)
- }
- f := &profileFile{}
- err := f.unmarshalJSON(content)
- if err != nil {
- return nil, er.Errorf("could not unmarshal profile file %s: "+
- "%v", file, err)
- }
- return f, nil
-}
-
-// saveProfileFile stores the given profile file struct in the specified file,
-// overwriting it if it already existed.
-func saveProfileFile(file string, f *profileFile) er.R {
- content, err := f.marshalJSON()
- if err != nil {
- return er.Errorf("could not marshal profile: %v", err)
- }
- return er.E(ioutil.WriteFile(file, content, 0644))
-}
-
-// profileFile is a struct that represents the whole content of a profile file.
-type profileFile struct {
- Default string `json:"default,omitempty"`
- Profiles []*profileEntry `json:"profiles"`
-}
-
-// unmarshalJSON tries to parse the given JSON and unmarshal it into the
-// receiving instance.
-func (f *profileFile) unmarshalJSON(content []byte) er.R {
- return er.E(json.Unmarshal(content, f))
-}
-
-// marshalJSON serializes the receiving instance to formatted/indented JSON.
-func (f *profileFile) marshalJSON() ([]byte, er.R) {
- b, err := json.Marshal(f)
- if err != nil {
- return nil, er.Errorf("error JSON marshalling profile: %v",
- err)
- }
-
- var out bytes.Buffer
- err = json.Indent(&out, b, "", " ")
- if err != nil {
- return nil, er.Errorf("error indenting profile JSON: %v", err)
- }
- out.WriteString("\n")
- return out.Bytes(), nil
-}
diff --git a/lnd/cmd/lncli/routerrpc.go b/lnd/cmd/lncli/routerrpc.go
deleted file mode 100644
index 819f66d2..00000000
--- a/lnd/cmd/lncli/routerrpc.go
+++ /dev/null
@@ -1,13 +0,0 @@
-package main
-
-import "github.com/urfave/cli"
-
-// routerCommands returns a list of routerrpc commands.
-func routerCommands() []cli.Command {
- return []cli.Command{
- queryMissionControlCommand,
- queryProbCommand,
- resetMissionControlCommand,
- buildRouteCommand,
- }
-}
diff --git a/lnd/cmd/lncli/types.go b/lnd/cmd/lncli/types.go
deleted file mode 100644
index 58ccd7fb..00000000
--- a/lnd/cmd/lncli/types.go
+++ /dev/null
@@ -1,68 +0,0 @@
-package main
-
-import (
- "encoding/hex"
- "fmt"
- "strconv"
- "strings"
-
- "github.com/pkt-cash/pktd/btcutil/er"
- "github.com/pkt-cash/pktd/chaincfg/chainhash"
- "github.com/pkt-cash/pktd/lnd/lnrpc"
-)
-
-// OutPoint displays an outpoint string in the form ":".
-type OutPoint string
-
-// NewOutPointFromProto formats the lnrpc.OutPoint into an OutPoint for display.
-func NewOutPointFromProto(op *lnrpc.OutPoint) OutPoint {
- var hash chainhash.Hash
- copy(hash[:], op.TxidBytes)
- return OutPoint(fmt.Sprintf("%v:%d", hash, op.OutputIndex))
-}
-
-// NewProtoOutPoint parses an OutPoint into its corresponding lnrpc.OutPoint
-// type.
-func NewProtoOutPoint(op string) (*lnrpc.OutPoint, er.R) {
- parts := strings.Split(op, ":")
- if len(parts) != 2 {
- return nil, er.New("outpoint should be of the form txid:index")
- }
- txid := parts[0]
- if hex.DecodedLen(len(txid)) != chainhash.HashSize {
- return nil, er.Errorf("invalid hex-encoded txid %v", txid)
- }
- outputIndex, err := strconv.Atoi(parts[1])
- if err != nil {
- return nil, er.Errorf("invalid output index: %v", err)
- }
- return &lnrpc.OutPoint{
- TxidStr: txid,
- OutputIndex: uint32(outputIndex),
- }, nil
-}
-
-// Utxo displays information about an unspent output, including its address,
-// amount, pkscript, and confirmations.
-type Utxo struct {
- Type lnrpc.AddressType `json:"address_type"`
- Address string `json:"address"`
- AmountSat int64 `json:"amount_sat"`
- PkScript string `json:"pk_script"`
- OutPoint OutPoint `json:"outpoint"`
- Confirmations int64 `json:"confirmations"`
-}
-
-// NewUtxoFromProto creates a display Utxo from the Utxo proto. This filters out
-// the raw txid bytes from the provided outpoint, which will otherwise be
-// printed in base64.
-func NewUtxoFromProto(utxo *lnrpc.Utxo) *Utxo {
- return &Utxo{
- Type: utxo.AddressType,
- Address: utxo.Address,
- AmountSat: utxo.AmountSat,
- PkScript: utxo.PkScript,
- OutPoint: NewOutPointFromProto(utxo.Outpoint),
- Confirmations: utxo.Confirmations,
- }
-}
diff --git a/lnd/cmd/lncli/walletrpc_active.go b/lnd/cmd/lncli/walletrpc_active.go
deleted file mode 100644
index 365cfaaf..00000000
--- a/lnd/cmd/lncli/walletrpc_active.go
+++ /dev/null
@@ -1,759 +0,0 @@
-// +build walletrpc
-
-package main
-
-import (
- "context"
- "encoding/base64"
- "encoding/hex"
- "encoding/json"
- "fmt"
- "sort"
-
- "github.com/pkt-cash/pktd/btcutil/er"
- "github.com/pkt-cash/pktd/chaincfg/chainhash"
- "github.com/pkt-cash/pktd/lnd/lnrpc"
- "github.com/pkt-cash/pktd/lnd/lnrpc/walletrpc"
- "github.com/urfave/cli"
-)
-
-var (
- // psbtCommand is a wallet subcommand that is responsible for PSBT
- // operations.
- psbtCommand = cli.Command{
- Name: "psbt",
- Usage: "Interact with partially signed bitcoin transactions " +
- "(PSBTs).",
- Subcommands: []cli.Command{
- fundPsbtCommand,
- finalizePsbtCommand,
- },
- }
-)
-
-// walletCommands will return the set of commands to enable for walletrpc
-// builds.
-func walletCommands() []cli.Command {
- return []cli.Command{
- {
- Name: "wallet",
- Category: "Wallet",
- Usage: "Interact with the wallet.",
- Description: "",
- Subcommands: []cli.Command{
- pendingSweepsCommand,
- bumpFeeCommand,
- bumpCloseFeeCommand,
- listSweepsCommand,
- labelTxCommand,
- releaseOutputCommand,
- psbtCommand,
- },
- },
- }
-}
-
-func getWalletClient(ctx *cli.Context) (walletrpc.WalletKitClient, func()) {
- conn := getClientConn(ctx, false)
- cleanUp := func() {
- conn.Close()
- }
- return walletrpc.NewWalletKitClient(conn), cleanUp
-}
-
-var pendingSweepsCommand = cli.Command{
- Name: "pendingsweeps",
- Usage: "List all outputs that are pending to be swept within lnd.",
- ArgsUsage: "",
- Description: `
- List all on-chain outputs that lnd is currently attempting to sweep
- within its central batching engine. Outputs with similar fee rates are
- batched together in order to sweep them within a single transaction.
- `,
- Flags: []cli.Flag{},
- Action: actionDecorator(pendingSweeps),
-}
-
-func pendingSweeps(ctx *cli.Context) er.R {
- ctxb := context.Background()
- client, cleanUp := getWalletClient(ctx)
- defer cleanUp()
-
- req := &walletrpc.PendingSweepsRequest{}
- resp, err := client.PendingSweeps(ctxb, req)
- if err != nil {
- return err
- }
-
- // Sort them in ascending fee rate order for display purposes.
- sort.Slice(resp.PendingSweeps, func(i, j int) bool {
- return resp.PendingSweeps[i].SatPerByte <
- resp.PendingSweeps[j].SatPerByte
- })
-
- var pendingSweepsResp = struct {
- PendingSweeps []*PendingSweep `json:"pending_sweeps"`
- }{
- PendingSweeps: make([]*PendingSweep, 0, len(resp.PendingSweeps)),
- }
-
- for _, protoPendingSweep := range resp.PendingSweeps {
- pendingSweep := NewPendingSweepFromProto(protoPendingSweep)
- pendingSweepsResp.PendingSweeps = append(
- pendingSweepsResp.PendingSweeps, pendingSweep,
- )
- }
-
- printJSON(pendingSweepsResp)
-
- return nil
-}
-
-var bumpFeeCommand = cli.Command{
- Name: "bumpfee",
- Usage: "Bumps the fee of an arbitrary input/transaction.",
- ArgsUsage: "outpoint",
- Description: `
- This command takes a different approach than bitcoind's bumpfee command.
- lnd has a central batching engine in which inputs with similar fee rates
- are batched together to save on transaction fees. Due to this, we cannot
- rely on bumping the fee on a specific transaction, since transactions
- can change at any point with the addition of new inputs. The list of
- inputs that currently exist within lnd's central batching engine can be
- retrieved through lncli pendingsweeps.
-
- When bumping the fee of an input that currently exists within lnd's
- central batching engine, a higher fee transaction will be created that
- replaces the lower fee transaction through the Replace-By-Fee (RBF)
- policy.
-
- This command also serves useful when wanting to perform a
- Child-Pays-For-Parent (CPFP), where the child transaction pays for its
- parent's fee. This can be done by specifying an outpoint within the low
- fee transaction that is under the control of the wallet.
-
- A fee preference must be provided, either through the conf_target or
- sat_per_byte parameters.
-
- Note that this command currently doesn't perform any validation checks
- on the fee preference being provided. For now, the responsibility of
- ensuring that the new fee preference is sufficient is delegated to the
- user.
-
- The force flag enables sweeping of inputs that are negatively yielding.
- Normally it does not make sense to lose money on sweeping, unless a
- parent transaction needs to get confirmed and there is only a small
- output available to attach the child transaction to.
- `,
- Flags: []cli.Flag{
- cli.Uint64Flag{
- Name: "conf_target",
- Usage: "the number of blocks that the output should " +
- "be swept on-chain within",
- },
- cli.Uint64Flag{
- Name: "sat_per_byte",
- Usage: "a manual fee expressed in sat/byte that " +
- "should be used when sweeping the output",
- },
- cli.BoolFlag{
- Name: "force",
- Usage: "sweep even if the yield is negative",
- },
- },
- Action: actionDecorator(bumpFee),
-}
-
-func bumpFee(ctx *cli.Context) er.R {
- // Display the command's help message if we do not have the expected
- // number of arguments/flags.
- if ctx.NArg() != 1 {
- return er.E(cli.ShowCommandHelp(ctx, "bumpfee"))
- }
-
- // Validate and parse the relevant arguments/flags.
- protoOutPoint, err := NewProtoOutPoint(ctx.Args().Get(0))
- if err != nil {
- return err
- }
-
- client, cleanUp := getWalletClient(ctx)
- defer cleanUp()
-
- resp, err := client.BumpFee(context.Background(), &walletrpc.BumpFeeRequest{
- Outpoint: protoOutPoint,
- TargetConf: uint32(ctx.Uint64("conf_target")),
- SatPerByte: uint32(ctx.Uint64("sat_per_byte")),
- Force: ctx.Bool("force"),
- })
- if err != nil {
- return err
- }
-
- printRespJSON(resp)
-
- return nil
-}
-
-var bumpCloseFeeCommand = cli.Command{
- Name: "bumpclosefee",
- Usage: "Bumps the fee of a channel closing transaction.",
- ArgsUsage: "channel_point",
- Description: `
- This command allows the fee of a channel closing transaction to be
- increased by using the child-pays-for-parent mechanism. It will instruct
- the sweeper to sweep the anchor outputs of transactions in the set
- of valid commitments for the specified channel at the requested fee
- rate or confirmation target.
- `,
- Flags: []cli.Flag{
- cli.Uint64Flag{
- Name: "conf_target",
- Usage: "the number of blocks that the output should " +
- "be swept on-chain within",
- },
- cli.Uint64Flag{
- Name: "sat_per_byte",
- Usage: "a manual fee expressed in sat/byte that " +
- "should be used when sweeping the output",
- },
- },
- Action: actionDecorator(bumpCloseFee),
-}
-
-func bumpCloseFee(ctx *cli.Context) er.R {
- // Display the command's help message if we do not have the expected
- // number of arguments/flags.
- if ctx.NArg() != 1 {
- return er.E(cli.ShowCommandHelp(ctx, "bumpclosefee"))
- }
-
- // Validate the channel point.
- channelPoint := ctx.Args().Get(0)
- _, err := NewProtoOutPoint(channelPoint)
- if err != nil {
- return err
- }
-
- // Fetch all waiting close channels.
- client, cleanUp := getClient(ctx)
- defer cleanUp()
-
- // Fetch waiting close channel commitments.
- commitments, err := getWaitingCloseCommitments(client, channelPoint)
- if err != nil {
- return err
- }
-
- // Retrieve pending sweeps.
- walletClient, cleanUp := getWalletClient(ctx)
- defer cleanUp()
-
- ctxb := context.Background()
- sweeps, err := walletClient.PendingSweeps(
- ctxb, &walletrpc.PendingSweepsRequest{},
- )
- if err != nil {
- return err
- }
-
- // Match pending sweeps with commitments of the channel for which a bump
- // is requested and bump their fees.
- commitSet := map[string]struct{}{
- commitments.LocalTxid: {},
- commitments.RemoteTxid: {},
- }
- if commitments.RemotePendingTxid != "" {
- commitSet[commitments.RemotePendingTxid] = struct{}{}
- }
-
- for _, sweep := range sweeps.PendingSweeps {
- // Only bump anchor sweeps.
- if sweep.WitnessType != walletrpc.WitnessType_COMMITMENT_ANCHOR {
- continue
- }
-
- // Skip unrelated sweeps.
- sweepTxID, err := chainhash.NewHash(sweep.Outpoint.TxidBytes)
- if err != nil {
- return err
- }
- if _, match := commitSet[sweepTxID.String()]; !match {
- continue
- }
-
- // Bump fee of the anchor sweep.
- fmt.Printf("Bumping fee of %v:%v\n",
- sweepTxID, sweep.Outpoint.OutputIndex)
-
- _, err = walletClient.BumpFee(ctxb, &walletrpc.BumpFeeRequest{
- Outpoint: sweep.Outpoint,
- TargetConf: uint32(ctx.Uint64("conf_target")),
- SatPerByte: uint32(ctx.Uint64("sat_per_byte")),
- Force: true,
- })
- if err != nil {
- return err
- }
- }
-
- return nil
-}
-
-func getWaitingCloseCommitments(client lnrpc.LightningClient,
- channelPoint string) (*lnrpc.PendingChannelsResponse_Commitments,
- error) {
-
- ctxb := context.Background()
-
- req := &lnrpc.PendingChannelsRequest{}
- resp, err := client.PendingChannels(ctxb, req)
- if err != nil {
- return nil, err
- }
-
- // Lookup the channel commit tx hashes.
- for _, channel := range resp.WaitingCloseChannels {
- if channel.Channel.ChannelPoint == channelPoint {
- return channel.Commitments, nil
- }
- }
-
- return nil, er.New("channel not found")
-}
-
-var listSweepsCommand = cli.Command{
- Name: "listsweeps",
- Usage: "Lists all sweeps that have been published by our node.",
- Flags: []cli.Flag{
- cli.BoolFlag{
- Name: "verbose",
- Usage: "lookup full transaction",
- },
- },
- Description: `
- Get a list of the hex-encoded transaction ids of every sweep that our
- node has published. Note that these sweeps may not be confirmed on chain
- yet, as we store them on transaction broadcast, not confirmation.
-
- If the verbose flag is set, the full set of transactions will be
- returned, otherwise only the sweep transaction ids will be returned.
- `,
- Action: actionDecorator(listSweeps),
-}
-
-func listSweeps(ctx *cli.Context) er.R {
- client, cleanUp := getWalletClient(ctx)
- defer cleanUp()
-
- resp, err := client.ListSweeps(
- context.Background(), &walletrpc.ListSweepsRequest{
- Verbose: ctx.IsSet("verbose"),
- },
- )
- if err != nil {
- return err
- }
-
- printJSON(resp)
-
- return nil
-}
-
-var labelTxCommand = cli.Command{
- Name: "labeltx",
- Usage: "adds a label to a transaction",
- ArgsUsage: "txid label",
- Description: `
- Add a label to a transaction. If the transaction already has a label,
- this call will fail unless the overwrite option is set. The label is
- limited to 500 characters. Note that multi word labels must be contained
- in quotation marks ("").
- `,
- Flags: []cli.Flag{
- cli.BoolFlag{
- Name: "overwrite",
- Usage: "set to overwrite existing labels",
- },
- },
- Action: actionDecorator(labelTransaction),
-}
-
-func labelTransaction(ctx *cli.Context) er.R {
- // Display the command's help message if we do not have the expected
- // number of arguments/flags.
- if ctx.NArg() != 2 {
- return er.E(cli.ShowCommandHelp(ctx, "labeltx"))
- }
-
- // Get the transaction id and check that it is a valid hash.
- txid := ctx.Args().Get(0)
- hash, err := chainhash.NewHashFromStr(txid)
- if err != nil {
- return err
- }
-
- label := ctx.Args().Get(1)
-
- walletClient, cleanUp := getWalletClient(ctx)
- defer cleanUp()
-
- ctxb := context.Background()
- _, err = walletClient.LabelTransaction(
- ctxb, &walletrpc.LabelTransactionRequest{
- Txid: hash[:],
- Label: label,
- Overwrite: ctx.Bool("overwrite"),
- },
- )
- if err != nil {
- return err
- }
-
- fmt.Printf("Transaction: %v labelled with: %v\n", txid, label)
-
- return nil
-}
-
-// utxoLease contains JSON annotations for a lease on an unspent output.
-type utxoLease struct {
- ID string `json:"id"`
- OutPoint OutPoint `json:"outpoint"`
- Expiration uint64 `json:"expiration"`
-}
-
-// fundPsbtResponse is a struct that contains JSON annotations for nice result
-// serialization.
-type fundPsbtResponse struct {
- Psbt string `json:"psbt"`
- ChangeOutputIndex int32 `json:"change_output_index"`
- Locks []*utxoLease `json:"locks"`
-}
-
-var fundPsbtCommand = cli.Command{
- Name: "fund",
- Usage: "Fund a Partially Signed Bitcoin Transaction (PSBT).",
- ArgsUsage: "[--template_psbt=T | [--outputs=O [--inputs=I]]] " +
- "[--conf_target=C | --sat_per_vbyte=S]",
- Description: `
- The fund command creates a fully populated PSBT that contains enough
- inputs to fund the outputs specified in either the PSBT or the
- --outputs flag.
-
- If there are no inputs specified in the template (or --inputs flag),
- coin selection is performed automatically. If inputs are specified, the
- wallet assumes that full coin selection happened externally and it will
- not add any additional inputs to the PSBT. If the specified inputs
- aren't enough to fund the outputs with the given fee rate, an error is
- returned.
-
- After either selecting or verifying the inputs, all input UTXOs are
- locked with an internal app ID.
-
- The 'outputs' flag decodes addresses and the amount to send respectively
- in the following JSON format:
-
- --outputs='{"ExampleAddr": NumCoinsInSatoshis, "SecondAddr": Sats}'
-
- The optional 'inputs' flag decodes a JSON list of UTXO outpoints as
- returned by the listunspent command for example:
-
- --inputs='[":",":",...]'
- `,
- Flags: []cli.Flag{
- cli.StringFlag{
- Name: "template_psbt",
- Usage: "the outputs to fund and optional inputs to " +
- "spend provided in the base64 PSBT format",
- },
- cli.StringFlag{
- Name: "outputs",
- Usage: "a JSON compatible map of destination " +
- "addresses to amounts to send, must not " +
- "include a change address as that will be " +
- "added automatically by the wallet",
- },
- cli.StringFlag{
- Name: "inputs",
- Usage: "an optional JSON compatible list of UTXO " +
- "outpoints to use as the PSBT's inputs",
- },
- cli.Uint64Flag{
- Name: "conf_target",
- Usage: "the number of blocks that the transaction " +
- "should be confirmed on-chain within",
- Value: 6,
- },
- cli.Uint64Flag{
- Name: "sat_per_vbyte",
- Usage: "a manual fee expressed in sat/vbyte that " +
- "should be used when creating the transaction",
- },
- },
- Action: actionDecorator(fundPsbt),
-}
-
-func fundPsbt(ctx *cli.Context) er.R {
- // Display the command's help message if there aren't any flags
- // specified.
- if ctx.NumFlags() == 0 {
- return er.E(cli.ShowCommandHelp(ctx, "fund"))
- }
-
- req := &walletrpc.FundPsbtRequest{}
-
- // Parse template flags.
- switch {
- // The PSBT flag is mutally exclusive with the outputs/inputs flags.
- case ctx.IsSet("template_psbt") &&
- (ctx.IsSet("inputs") || ctx.IsSet("outputs")):
-
- return er.Errorf("cannot set template_psbt and inputs/" +
- "outputs flags at the same time")
-
- // Use a pre-existing PSBT as the transaction template.
- case len(ctx.String("template_psbt")) > 0:
- psbtBase64 := ctx.String("template_psbt")
- psbtBytes, err := base64.StdEncoding.DecodeString(psbtBase64)
- if err != nil {
- return err
- }
-
- req.Template = &walletrpc.FundPsbtRequest_Psbt{
- Psbt: psbtBytes,
- }
-
- // The user manually specified outputs and optional inputs in JSON
- // format.
- case len(ctx.String("outputs")) > 0:
- var (
- tpl = &walletrpc.TxTemplate{}
- amountToAddr map[string]uint64
- )
-
- // Parse the address to amount map as JSON now. At least one
- // entry must be present.
- jsonMap := []byte(ctx.String("outputs"))
- if err := json.Unmarshal(jsonMap, &amountToAddr); err != nil {
- return er.Errorf("error parsing outputs JSON: %v",
- err)
- }
- if len(amountToAddr) == 0 {
- return er.Errorf("at least one output must be " +
- "specified")
- }
- tpl.Outputs = amountToAddr
-
- // Inputs are optional.
- if len(ctx.String("inputs")) > 0 {
- var inputs []string
-
- jsonList := []byte(ctx.String("inputs"))
- if err := json.Unmarshal(jsonList, &inputs); err != nil {
- return er.Errorf("error parsing inputs JSON: "+
- "%v", err)
- }
-
- for idx, input := range inputs {
- op, err := NewProtoOutPoint(input)
- if err != nil {
- return er.Errorf("error parsing "+
- "UTXO outpoint %d: %v", idx,
- err)
- }
- tpl.Inputs = append(tpl.Inputs, op)
- }
- }
-
- req.Template = &walletrpc.FundPsbtRequest_Raw{
- Raw: tpl,
- }
-
- default:
- return er.Errorf("must specify either template_psbt or " +
- "outputs flag")
- }
-
- // Parse fee flags.
- switch {
- case ctx.IsSet("conf_target") && ctx.IsSet("sat_per_vbyte"):
- return er.Errorf("cannot set conf_target and sat_per_vbyte " +
- "at the same time")
-
- case ctx.Uint64("conf_target") > 0:
- req.Fees = &walletrpc.FundPsbtRequest_TargetConf{
- TargetConf: uint32(ctx.Uint64("conf_target")),
- }
-
- case ctx.Uint64("sat_per_vbyte") > 0:
- req.Fees = &walletrpc.FundPsbtRequest_SatPerVbyte{
- SatPerVbyte: uint32(ctx.Uint64("sat_per_vbyte")),
- }
- }
-
- walletClient, cleanUp := getWalletClient(ctx)
- defer cleanUp()
-
- response, err := walletClient.FundPsbt(context.Background(), req)
- if err != nil {
- return err
- }
-
- jsonLocks := make([]*utxoLease, len(response.LockedUtxos))
- for idx, lock := range response.LockedUtxos {
- jsonLocks[idx] = &utxoLease{
- ID: hex.EncodeToString(lock.Id),
- OutPoint: NewOutPointFromProto(lock.Outpoint),
- Expiration: lock.Expiration,
- }
- }
-
- printJSON(&fundPsbtResponse{
- Psbt: base64.StdEncoding.EncodeToString(
- response.FundedPsbt,
- ),
- ChangeOutputIndex: response.ChangeOutputIndex,
- Locks: jsonLocks,
- })
-
- return nil
-}
-
-// finalizePsbtResponse is a struct that contains JSON annotations for nice
-// result serialization.
-type finalizePsbtResponse struct {
- Psbt string `json:"psbt"`
- FinalTx string `json:"final_tx"`
-}
-
-var finalizePsbtCommand = cli.Command{
- Name: "finalize",
- Usage: "Finalize a Partially Signed Bitcoin Transaction (PSBT).",
- ArgsUsage: "funded_psbt",
- Description: `
- The finalize command expects a partial transaction with all inputs
- and outputs fully declared and tries to sign all inputs that belong to
- the wallet. Lnd must be the last signer of the transaction. That means,
- if there are any unsigned non-witness inputs or inputs without UTXO
- information attached or inputs without witness data that do not belong
- to lnd's wallet, this method will fail. If no error is returned, the
- PSBT is ready to be extracted and the final TX within to be broadcast.
-
- This method does NOT publish the transaction after it's been finalized
- successfully.
- `,
- Flags: []cli.Flag{
- cli.StringFlag{
- Name: "funded_psbt",
- Usage: "the base64 encoded PSBT to finalize",
- },
- },
- Action: actionDecorator(finalizePsbt),
-}
-
-func finalizePsbt(ctx *cli.Context) er.R {
- // Display the command's help message if we do not have the expected
- // number of arguments/flags.
- if ctx.NArg() != 1 && ctx.NumFlags() != 1 {
- return er.E(cli.ShowCommandHelp(ctx, "finalize"))
- }
-
- var (
- args = ctx.Args()
- psbtBase64 string
- )
- switch {
- case ctx.IsSet("funded_psbt"):
- psbtBase64 = ctx.String("funded_psbt")
- case args.Present():
- psbtBase64 = args.First()
- default:
- return er.Errorf("funded_psbt argument missing")
- }
-
- psbtBytes, err := base64.StdEncoding.DecodeString(psbtBase64)
- if err != nil {
- return err
- }
- req := &walletrpc.FinalizePsbtRequest{
- FundedPsbt: psbtBytes,
- }
-
- walletClient, cleanUp := getWalletClient(ctx)
- defer cleanUp()
-
- response, err := walletClient.FinalizePsbt(context.Background(), req)
- if err != nil {
- return err
- }
-
- printJSON(&finalizePsbtResponse{
- Psbt: base64.StdEncoding.EncodeToString(response.SignedPsbt),
- FinalTx: hex.EncodeToString(response.RawFinalTx),
- })
-
- return nil
-}
-
-var releaseOutputCommand = cli.Command{
- Name: "releaseoutput",
- Usage: "Release an output previously locked by lnd.",
- ArgsUsage: "outpoint",
- Description: `
- The releaseoutput command unlocks an output, allowing it to be available
- for coin selection if it remains unspent.
-
- The internal lnd app lock ID is used when releasing the output.
- Therefore only UTXOs locked by the fundpsbt command can currently be
- released with this command.
- `,
- Flags: []cli.Flag{
- cli.StringFlag{
- Name: "outpoint",
- Usage: "the output to unlock",
- },
- },
- Action: actionDecorator(releaseOutput),
-}
-
-func releaseOutput(ctx *cli.Context) er.R {
- // Display the command's help message if we do not have the expected
- // number of arguments/flags.
- if ctx.NArg() != 1 && ctx.NumFlags() != 1 {
- return er.E(cli.ShowCommandHelp(ctx, "releaseoutput"))
- }
-
- var (
- args = ctx.Args()
- outpointStr string
- )
- switch {
- case ctx.IsSet("outpoint"):
- outpointStr = ctx.String("outpoint")
- case args.Present():
- outpointStr = args.First()
- default:
- return er.Errorf("outpoint argument missing")
- }
-
- outpoint, err := NewProtoOutPoint(outpointStr)
- if err != nil {
- return er.Errorf("error parsing outpoint: %v", err)
- }
- req := &walletrpc.ReleaseOutputRequest{
- Outpoint: outpoint,
- Id: walletrpc.LndInternalLockID[:],
- }
-
- walletClient, cleanUp := getWalletClient(ctx)
- defer cleanUp()
-
- response, err := walletClient.ReleaseOutput(context.Background(), req)
- if err != nil {
- return err
- }
-
- printRespJSON(response)
-
- return nil
-}
diff --git a/lnd/cmd/lncli/walletrpc_default.go b/lnd/cmd/lncli/walletrpc_default.go
deleted file mode 100644
index f919a993..00000000
--- a/lnd/cmd/lncli/walletrpc_default.go
+++ /dev/null
@@ -1,10 +0,0 @@
-// +build !walletrpc
-
-package main
-
-import "github.com/urfave/cli"
-
-// walletCommands will return nil for non-walletrpc builds.
-func walletCommands() []cli.Command {
- return nil
-}
diff --git a/lnd/cmd/lncli/walletrpc_types.go b/lnd/cmd/lncli/walletrpc_types.go
deleted file mode 100644
index fec3cedf..00000000
--- a/lnd/cmd/lncli/walletrpc_types.go
+++ /dev/null
@@ -1,33 +0,0 @@
-package main
-
-import "github.com/pkt-cash/pktd/lnd/lnrpc/walletrpc"
-
-// PendingSweep is a CLI-friendly type of the walletrpc.PendingSweep proto. We
-// use this to show more useful string versions of byte slices and enums.
-type PendingSweep struct {
- OutPoint OutPoint `json:"outpoint"`
- WitnessType string `json:"witness_type"`
- AmountSat uint32 `json:"amount_sat"`
- SatPerByte uint32 `json:"sat_per_byte"`
- BroadcastAttempts uint32 `json:"broadcast_attempts"`
- NextBroadcastHeight uint32 `json:"next_broadcast_height"`
- RequestedSatPerByte uint32 `json:"requested_sat_per_byte"`
- RequestedConfTarget uint32 `json:"requested_conf_target"`
- Force bool `json:"force"`
-}
-
-// NewPendingSweepFromProto converts the walletrpc.PendingSweep proto type into
-// its corresponding CLI-friendly type.
-func NewPendingSweepFromProto(pendingSweep *walletrpc.PendingSweep) *PendingSweep {
- return &PendingSweep{
- OutPoint: NewOutPointFromProto(pendingSweep.Outpoint),
- WitnessType: pendingSweep.WitnessType.String(),
- AmountSat: pendingSweep.AmountSat,
- SatPerByte: pendingSweep.SatPerByte,
- BroadcastAttempts: pendingSweep.BroadcastAttempts,
- NextBroadcastHeight: pendingSweep.NextBroadcastHeight,
- RequestedSatPerByte: pendingSweep.RequestedSatPerByte,
- RequestedConfTarget: pendingSweep.RequestedConfTarget,
- Force: pendingSweep.Force,
- }
-}
diff --git a/lnd/cmd/lncli/watchtower_active.go b/lnd/cmd/lncli/watchtower_active.go
deleted file mode 100644
index 72217239..00000000
--- a/lnd/cmd/lncli/watchtower_active.go
+++ /dev/null
@@ -1,57 +0,0 @@
-// +build watchtowerrpc
-
-package main
-
-import (
- "context"
-
- "github.com/pkt-cash/pktd/btcutil/er"
- "github.com/pkt-cash/pktd/lnd/lnrpc/watchtowerrpc"
- "github.com/urfave/cli"
-)
-
-func watchtowerCommands() []cli.Command {
- return []cli.Command{
- {
- Name: "tower",
- Usage: "Interact with the watchtower.",
- Category: "Watchtower",
- Subcommands: []cli.Command{
- towerInfoCommand,
- },
- },
- }
-}
-
-func getWatchtowerClient(ctx *cli.Context) (watchtowerrpc.WatchtowerClient, func()) {
- conn := getClientConn(ctx, false)
- cleanup := func() {
- conn.Close()
- }
- return watchtowerrpc.NewWatchtowerClient(conn), cleanup
-}
-
-var towerInfoCommand = cli.Command{
- Name: "info",
- Usage: "Returns basic information related to the active watchtower.",
- Action: actionDecorator(towerInfo),
-}
-
-func towerInfo(ctx *cli.Context) er.R {
- if ctx.NArg() != 0 || ctx.NumFlags() > 0 {
- return er.E(cli.ShowCommandHelp(ctx, "info"))
- }
-
- client, cleanup := getWatchtowerClient(ctx)
- defer cleanup()
-
- req := &watchtowerrpc.GetInfoRequest{}
- resp, err := client.GetInfo(context.Background(), req)
- if err != nil {
- return err
- }
-
- printRespJSON(resp)
-
- return nil
-}
diff --git a/lnd/cmd/lncli/watchtower_default.go b/lnd/cmd/lncli/watchtower_default.go
deleted file mode 100644
index 41d887a9..00000000
--- a/lnd/cmd/lncli/watchtower_default.go
+++ /dev/null
@@ -1,10 +0,0 @@
-// +build !watchtowerrpc
-
-package main
-
-import "github.com/urfave/cli"
-
-// watchtowerCommands will return nil for non-watchtowerrpc builds.
-func watchtowerCommands() []cli.Command {
- return nil
-}
diff --git a/lnd/cmd/lncli/wtclient.go b/lnd/cmd/lncli/wtclient.go
deleted file mode 100644
index 6aadcfe8..00000000
--- a/lnd/cmd/lncli/wtclient.go
+++ /dev/null
@@ -1,272 +0,0 @@
-package main
-
-import (
- "context"
- "strings"
-
- "github.com/pkt-cash/pktd/btcutil/er"
- "github.com/pkt-cash/pktd/btcutil/util"
- "github.com/pkt-cash/pktd/lnd/lnrpc/wtclientrpc"
- "github.com/urfave/cli"
-)
-
-// wtclientCommands will return nil for non-wtclientrpc builds.
-func wtclientCommands() []cli.Command {
- return []cli.Command{
- {
- Name: "wtclient",
- Usage: "Interact with the watchtower client.",
- Category: "Watchtower",
- Subcommands: []cli.Command{
- addTowerCommand,
- removeTowerCommand,
- listTowersCommand,
- getTowerCommand,
- statsCommand,
- policyCommand,
- },
- },
- }
-}
-
-// getWtclient initializes a connection to the watchtower client RPC in order to
-// interact with it.
-func getWtclient(ctx *cli.Context) (wtclientrpc.WatchtowerClientClient, func()) {
- conn := getClientConn(ctx, false)
- cleanUp := func() {
- conn.Close()
- }
- return wtclientrpc.NewWatchtowerClientClient(conn), cleanUp
-}
-
-var addTowerCommand = cli.Command{
- Name: "add",
- Usage: "Register a watchtower to use for future sessions/backups.",
- Description: "If the watchtower has already been registered, then " +
- "this command serves as a way of updating the watchtower " +
- "with new addresses it is reachable over.",
- ArgsUsage: "pubkey@address",
- Action: actionDecorator(addTower),
-}
-
-func addTower(ctx *cli.Context) er.R {
- // Display the command's help message if the number of arguments/flags
- // is not what we expect.
- if ctx.NArg() != 1 || ctx.NumFlags() > 0 {
- return er.E(cli.ShowCommandHelp(ctx, "add"))
- }
-
- parts := strings.Split(ctx.Args().First(), "@")
- if len(parts) != 2 {
- return er.New("expected tower of format pubkey@address")
- }
- pubKey, err := util.DecodeHex(parts[0])
- if err != nil {
- return er.Errorf("invalid public key: %v", err)
- }
- address := parts[1]
-
- client, cleanUp := getWtclient(ctx)
- defer cleanUp()
-
- req := &wtclientrpc.AddTowerRequest{
- Pubkey: pubKey,
- Address: address,
- }
- resp, errr := client.AddTower(context.Background(), req)
- if errr != nil {
- return er.E(errr)
- }
-
- printRespJSON(resp)
- return nil
-}
-
-var removeTowerCommand = cli.Command{
- Name: "remove",
- Usage: "Remove a watchtower to prevent its use for future " +
- "sessions/backups.",
- Description: "An optional address can be provided to remove, " +
- "indicating that the watchtower is no longer reachable at " +
- "this address. If an address isn't provided, then the " +
- "watchtower will no longer be used for future sessions/backups.",
- ArgsUsage: "pubkey | pubkey@address",
- Action: actionDecorator(removeTower),
-}
-
-func removeTower(ctx *cli.Context) er.R {
- // Display the command's help message if the number of arguments/flags
- // is not what we expect.
- if ctx.NArg() != 1 || ctx.NumFlags() > 0 {
- return er.E(cli.ShowCommandHelp(ctx, "remove"))
- }
-
- // The command can have only one argument, but it can be interpreted in
- // either of the following formats:
- //
- // pubkey or pubkey@address
- //
- // The hex-encoded public key of the watchtower is always required,
- // while the second is an optional address we'll remove from the
- // watchtower's database record.
- parts := strings.Split(ctx.Args().First(), "@")
- if len(parts) > 2 {
- return er.New("expected tower of format pubkey@address")
- }
- pubKey, err := util.DecodeHex(parts[0])
- if err != nil {
- return er.Errorf("invalid public key: %v", err)
- }
- var address string
- if len(parts) == 2 {
- address = parts[1]
- }
-
- client, cleanUp := getWtclient(ctx)
- defer cleanUp()
-
- req := &wtclientrpc.RemoveTowerRequest{
- Pubkey: pubKey,
- Address: address,
- }
- resp, errr := client.RemoveTower(context.Background(), req)
- if errr != nil {
- return er.E(errr)
- }
-
- printRespJSON(resp)
- return nil
-}
-
-var listTowersCommand = cli.Command{
- Name: "towers",
- Usage: "Display information about all registered watchtowers.",
- Flags: []cli.Flag{
- cli.BoolFlag{
- Name: "include_sessions",
- Usage: "include sessions with the watchtower in the " +
- "response",
- },
- },
- Action: actionDecorator(listTowers),
-}
-
-func listTowers(ctx *cli.Context) er.R {
- // Display the command's help message if the number of arguments/flags
- // is not what we expect.
- if ctx.NArg() > 0 || ctx.NumFlags() > 1 {
- return er.E(cli.ShowCommandHelp(ctx, "towers"))
- }
-
- client, cleanUp := getWtclient(ctx)
- defer cleanUp()
-
- req := &wtclientrpc.ListTowersRequest{
- IncludeSessions: ctx.Bool("include_sessions"),
- }
- resp, errr := client.ListTowers(context.Background(), req)
- if errr != nil {
- return er.E(errr)
- }
-
- printRespJSON(resp)
-
- return nil
-}
-
-var getTowerCommand = cli.Command{
- Name: "tower",
- Usage: "Display information about a specific registered watchtower.",
- ArgsUsage: "pubkey",
- Flags: []cli.Flag{
- cli.BoolFlag{
- Name: "include_sessions",
- Usage: "include sessions with the watchtower in the " +
- "response",
- },
- },
- Action: actionDecorator(getTower),
-}
-
-func getTower(ctx *cli.Context) er.R {
- // Display the command's help message if the number of arguments/flags
- // is not what we expect.
- if ctx.NArg() != 1 || ctx.NumFlags() > 1 {
- return er.E(cli.ShowCommandHelp(ctx, "tower"))
- }
-
- // The command only has one argument, which we expect to be the
- // hex-encoded public key of the watchtower we'll display information
- // about.
- pubKey, err := util.DecodeHex(ctx.Args().Get(0))
- if err != nil {
- return er.Errorf("invalid public key: %v", err)
- }
-
- client, cleanUp := getWtclient(ctx)
- defer cleanUp()
-
- req := &wtclientrpc.GetTowerInfoRequest{
- Pubkey: pubKey,
- IncludeSessions: ctx.Bool("include_sessions"),
- }
- resp, errr := client.GetTowerInfo(context.Background(), req)
- if errr != nil {
- return er.E(errr)
- }
-
- printRespJSON(resp)
- return nil
-}
-
-var statsCommand = cli.Command{
- Name: "stats",
- Usage: "Display the session stats of the watchtower client.",
- Action: actionDecorator(stats),
-}
-
-func stats(ctx *cli.Context) er.R {
- // Display the command's help message if the number of arguments/flags
- // is not what we expect.
- if ctx.NArg() > 0 || ctx.NumFlags() > 0 {
- return er.E(cli.ShowCommandHelp(ctx, "stats"))
- }
-
- client, cleanUp := getWtclient(ctx)
- defer cleanUp()
-
- req := &wtclientrpc.StatsRequest{}
- resp, errr := client.Stats(context.Background(), req)
- if errr != nil {
- return er.E(errr)
- }
-
- printRespJSON(resp)
- return nil
-}
-
-var policyCommand = cli.Command{
- Name: "policy",
- Usage: "Display the active watchtower client policy configuration.",
- Action: actionDecorator(policy),
-}
-
-func policy(ctx *cli.Context) er.R {
- // Display the command's help message if the number of arguments/flags
- // is not what we expect.
- if ctx.NArg() > 0 || ctx.NumFlags() > 0 {
- return er.E(cli.ShowCommandHelp(ctx, "policy"))
- }
-
- client, cleanUp := getWtclient(ctx)
- defer cleanUp()
-
- req := &wtclientrpc.PolicyRequest{}
- resp, errr := client.Policy(context.Background(), req)
- if errr != nil {
- return er.E(errr)
- }
-
- printRespJSON(resp)
- return nil
-}
diff --git a/lnd/cmd/lnd/main.go b/lnd/cmd/lnd/main.go
deleted file mode 100644
index 5e9bea96..00000000
--- a/lnd/cmd/lnd/main.go
+++ /dev/null
@@ -1,43 +0,0 @@
-package main
-
-import (
- "fmt"
- "os"
-
- "github.com/jessevdk/go-flags"
- "github.com/pkt-cash/pktd/btcutil/er"
- "github.com/pkt-cash/pktd/lnd"
- "github.com/pkt-cash/pktd/lnd/signal"
-)
-
-func main() {
- // Load the configuration, and parse any command line options. This
- // function will also set up logging properly.
- loadedConfig, err := lnd.LoadConfig()
- if err != nil {
- errr := er.Wrapped(err)
- if e, ok := errr.(*flags.Error); !ok || e.Type != flags.ErrHelp {
- // Print error if not due to help request.
- _, _ = fmt.Fprintln(os.Stderr, err)
- os.Exit(1)
- }
-
- // Help was requested, exit normally.
- os.Exit(0)
- }
-
- // Hook interceptor for os signals.
- if err := signal.Intercept(); err != nil {
- _, _ = fmt.Fprintln(os.Stderr, err)
- os.Exit(1)
- }
-
- // Call the "real" main in a nested manner so the defers will properly
- // be executed in the case of a graceful shutdown.
- if err := lnd.Main(
- loadedConfig, lnd.ListenerCfg{}, signal.ShutdownChannel(),
- ); err != nil {
- _, _ = fmt.Fprintln(os.Stderr, err)
- os.Exit(1)
- }
-}
diff --git a/lnd/config.go b/lnd/config.go
deleted file mode 100644
index 9d4a7cad..00000000
--- a/lnd/config.go
+++ /dev/null
@@ -1,1638 +0,0 @@
-// Copyright (c) 2013-2017 The btcsuite developers
-// Copyright (c) 2015-2016 The Decred developers
-// Copyright (C) 2015-2020 The Lightning Network Developers
-
-package lnd
-
-import (
- "fmt"
- "io/ioutil"
- "net"
- "os"
- "os/user"
- "path"
- "path/filepath"
- "regexp"
- "strconv"
- "strings"
- "time"
-
- flags "github.com/jessevdk/go-flags"
- "github.com/pkt-cash/pktd/btcutil"
- "github.com/pkt-cash/pktd/btcutil/er"
- "github.com/pkt-cash/pktd/chaincfg/globalcfg"
- "github.com/pkt-cash/pktd/lnd/autopilot"
- "github.com/pkt-cash/pktd/lnd/chainreg"
- "github.com/pkt-cash/pktd/lnd/chanbackup"
- "github.com/pkt-cash/pktd/lnd/channeldb"
- "github.com/pkt-cash/pktd/lnd/discovery"
- "github.com/pkt-cash/pktd/lnd/htlcswitch"
- "github.com/pkt-cash/pktd/lnd/htlcswitch/hodl"
- "github.com/pkt-cash/pktd/lnd/input"
- "github.com/pkt-cash/pktd/lnd/lncfg"
- "github.com/pkt-cash/pktd/lnd/lnrpc/routerrpc"
- "github.com/pkt-cash/pktd/lnd/lnrpc/signrpc"
- "github.com/pkt-cash/pktd/lnd/routing"
- "github.com/pkt-cash/pktd/lnd/tor"
- "github.com/pkt-cash/pktd/neutrino"
- "github.com/pkt-cash/pktd/pktconfig/version"
- "github.com/pkt-cash/pktd/pktlog/log"
-)
-
-const (
- defaultDataDirname = "data"
- defaultChainSubDirname = "chain"
- defaultGraphSubDirname = "graph"
- defaultTowerSubDirname = "watchtower"
- defaultTLSCertFilename = "tls.cert"
- defaultTLSKeyFilename = "tls.key"
- defaultAdminMacFilename = "admin.macaroon"
- defaultReadMacFilename = "readonly.macaroon"
- defaultInvoiceMacFilename = "invoice.macaroon"
- defaultLogLevel = "info"
- defaultLogDirname = "logs"
- defaultLogFilename = "lnd.log"
- defaultRPCPort = 10009
- defaultRESTPort = 8080
- defaultPeerPort = 9735
- defaultRPCHost = "localhost"
-
- defaultNoSeedBackup = false
- defaultPaymentsExpirationGracePeriod = time.Duration(0)
- defaultTrickleDelay = 90 * 1000
- defaultChanStatusSampleInterval = time.Minute
- defaultChanEnableTimeout = 19 * time.Minute
- defaultChanDisableTimeout = 20 * time.Minute
- defaultHeightHintCacheQueryDisable = false
- defaultMaxLogFiles = 3
- defaultMaxLogFileSize = 10
- defaultMinBackoff = time.Second
- defaultMaxBackoff = time.Hour
- defaultLetsEncryptDirname = "letsencrypt"
- defaultLetsEncryptListen = ":80"
-
- defaultTorSOCKSPort = 9050
- defaultTorDNSHost = "soa.nodes.lightning.directory"
- defaultTorDNSPort = 53
- defaultTorControlPort = 9051
- defaultTorV2PrivateKeyFilename = "v2_onion_private_key"
- defaultTorV3PrivateKeyFilename = "v3_onion_private_key"
-
- // minTimeLockDelta is the minimum timelock we require for incoming
- // HTLCs on our channels.
- minTimeLockDelta = routing.MinCLTVDelta
-
- // defaultAcceptorTimeout is the time after which an RPCAcceptor will time
- // out and return false if it hasn't yet received a response.
- defaultAcceptorTimeout = 15 * time.Second
-
- defaultAlias = ""
- defaultColor = "#3399FF"
-
- // defaultHostSampleInterval is the default amount of time that the
- // HostAnnouncer will wait between DNS resolutions to check if the
- // backing IP of a host has changed.
- defaultHostSampleInterval = time.Minute * 5
-
- defaultChainInterval = time.Minute
- defaultChainTimeout = time.Second * 10
- defaultChainBackoff = time.Second * 30
- defaultChainAttempts = 3
-
- // Set defaults for a health check which ensures that we have space
- // available on disk. Although this check is off by default so that we
- // avoid breaking any existing setups (particularly on mobile), we still
- // set the other default values so that the health check can be easily
- // enabled with sane defaults.
- defaultRequiredDisk = 0.1
- defaultDiskInterval = time.Hour * 12
- defaultDiskTimeout = time.Second * 5
- defaultDiskBackoff = time.Minute
- defaultDiskAttempts = 0
-
- // defaultRemoteMaxHtlcs specifies the default limit for maximum
- // concurrent HTLCs the remote party may add to commitment transactions.
- // This value can be overridden with --default-remote-max-htlcs.
- defaultRemoteMaxHtlcs = 483
-
- // defaultMaxLocalCSVDelay is the maximum delay we accept on our
- // commitment output.
- // TODO(halseth): find a more scientific choice of value.
- defaultMaxLocalCSVDelay = 10000
-)
-
-var (
- // DefaultLndDir is the default directory where lnd tries to find its
- // configuration file and store its data. This is a directory in the
- // user's application data, for example:
- // C:\Users\\AppData\Local\Lnd on Windows
- // ~/.lnd on Linux
- // ~/Library/Application Support/Lnd on MacOS
- DefaultLndDir = btcutil.AppDataDir("lnd", false)
-
- // DefaultConfigFile is the default full path of lnd's configuration
- // file.
- DefaultConfigFile = filepath.Join(DefaultLndDir, lncfg.DefaultConfigFilename)
-
- defaultDataDir = filepath.Join(DefaultLndDir, defaultDataDirname)
- defaultLogDir = filepath.Join(DefaultLndDir, defaultLogDirname)
-
- defaultTowerDir = filepath.Join(defaultDataDir, defaultTowerSubDirname)
-
- defaultTLSCertPath = filepath.Join(DefaultLndDir, defaultTLSCertFilename)
- defaultTLSKeyPath = filepath.Join(DefaultLndDir, defaultTLSKeyFilename)
- defaultLetsEncryptDir = filepath.Join(DefaultLndDir, defaultLetsEncryptDirname)
-
- defaultBtcdDir = btcutil.AppDataDir("btcd", false)
- defaultBtcdRPCCertFile = filepath.Join(defaultBtcdDir, "rpc.cert")
-
- defaultLtcdDir = btcutil.AppDataDir("ltcd", false)
- defaultLtcdRPCCertFile = filepath.Join(defaultLtcdDir, "rpc.cert")
-
- defaultBitcoindDir = btcutil.AppDataDir("bitcoin", false)
- defaultLitecoindDir = btcutil.AppDataDir("litecoin", false)
-
- defaultTorSOCKS = net.JoinHostPort("localhost", strconv.Itoa(defaultTorSOCKSPort))
- defaultTorDNS = net.JoinHostPort(defaultTorDNSHost, strconv.Itoa(defaultTorDNSPort))
- defaultTorControl = net.JoinHostPort("localhost", strconv.Itoa(defaultTorControlPort))
-
- // bitcoindEsimateModes defines all the legal values for bitcoind's
- // estimatesmartfee RPC call.
- defaultBitcoindEstimateMode = "CONSERVATIVE"
- bitcoindEstimateModes = [2]string{"ECONOMICAL", defaultBitcoindEstimateMode}
-
- defaultSphinxDbName = "sphinxreplay.db"
-)
-
-// Config defines the configuration options for lnd.
-//
-// See LoadConfig for further details regarding the configuration
-// loading+parsing process.
-type Config struct {
- ShowVersion bool `short:"V" long:"version" description:"Display version information and exit"`
-
- LndDir string `long:"lnddir" description:"The base directory that contains lnd's data, logs, configuration file, etc."`
- ConfigFile string `short:"C" long:"configfile" description:"Path to configuration file"`
- DataDir string `short:"b" long:"datadir" description:"The directory to store lnd's data within"`
- SyncFreelist bool `long:"sync-freelist" description:"Whether the databases used within lnd should sync their freelist to disk. This is disabled by default resulting in improved memory performance during operation, but with an increase in startup time."`
-
- NoTLS bool `long:"notls" description:"Disable TLS on RPC and REST"`
- TLSCertPath string `long:"tlscertpath" description:"Path to write the TLS certificate for lnd's RPC and REST services"`
- TLSKeyPath string `long:"tlskeypath" description:"Path to write the TLS private key for lnd's RPC and REST services"`
- TLSExtraIPs []string `long:"tlsextraip" description:"Adds an extra ip to the generated certificate"`
- TLSExtraDomains []string `long:"tlsextradomain" description:"Adds an extra domain to the generated certificate"`
- TLSAutoRefresh bool `long:"tlsautorefresh" description:"Re-generate TLS certificate and key if the IPs or domains are changed"`
- TLSDisableAutofill bool `long:"tlsdisableautofill" description:"Do not include the interface IPs or the system hostname in TLS certificate, use first --tlsextradomain as Common Name instead, if set"`
-
- NoMacaroons bool `long:"no-macaroons" description:"Disable macaroon authentication, can only be used if server is not listening on a public interface."`
- AdminMacPath string `long:"adminmacaroonpath" description:"Path to write the admin macaroon for lnd's RPC and REST services if it doesn't exist"`
- ReadMacPath string `long:"readonlymacaroonpath" description:"Path to write the read-only macaroon for lnd's RPC and REST services if it doesn't exist"`
- InvoiceMacPath string `long:"invoicemacaroonpath" description:"Path to the invoice-only macaroon for lnd's RPC and REST services if it doesn't exist"`
- LogDir string `long:"logdir" description:"Directory to log output."`
- MaxLogFiles int `long:"maxlogfiles" description:"Maximum logfiles to keep (0 for no rotation)"`
- MaxLogFileSize int `long:"maxlogfilesize" description:"Maximum logfile size in MB"`
- AcceptorTimeout time.Duration `long:"acceptortimeout" description:"Time after which an RPCAcceptor will time out and return false if it hasn't yet received a response"`
-
- LetsEncryptDir string `long:"letsencryptdir" description:"The directory to store Let's Encrypt certificates within"`
- LetsEncryptListen string `long:"letsencryptlisten" description:"The IP:port on which lnd will listen for Let's Encrypt challenges. Let's Encrypt will always try to contact on port 80. Often non-root processes are not allowed to bind to ports lower than 1024. This configuration option allows a different port to be used, but must be used in combination with port forwarding from port 80. This configuration can also be used to specify another IP address to listen on, for example an IPv6 address."`
- LetsEncryptDomain string `long:"letsencryptdomain" description:"Request a Let's Encrypt certificate for this domain. Note that the certicate is only requested and stored when the first rpc connection comes in."`
-
- // We'll parse these 'raw' string arguments into real net.Addrs in the
- // loadConfig function. We need to expose the 'raw' strings so the
- // command line library can access them.
- // Only the parsed net.Addrs should be used!
- RawRPCListeners []string `long:"rpclisten" description:"Add an interface/port/socket to listen for RPC connections"`
- RawRESTListeners []string `long:"restlisten" description:"Add an interface/port/socket to listen for REST connections"`
- RawListeners []string `long:"listen" description:"Add an interface/port to listen for peer connections"`
- RawExternalIPs []string `long:"externalip" description:"Add an ip:port to the list of local addresses we claim to listen on to peers. If a port is not specified, the default (9735) will be used regardless of other parameters"`
- ExternalHosts []string `long:"externalhosts" description:"A set of hosts that should be periodically resolved to announce IPs for"`
- RPCListeners []net.Addr
- RESTListeners []net.Addr
- RestCORS []string `long:"restcors" description:"Add an ip:port/hostname to allow cross origin access from. To allow all origins, set as \"*\"."`
- Listeners []net.Addr
- ExternalIPs []net.Addr
- DisableListen bool `long:"nolisten" description:"Disable listening for incoming peer connections"`
- DisableRest bool `long:"norest" description:"Disable REST API"`
- DisableRestTLS bool `long:"no-rest-tls" description:"Disable TLS for REST connections"`
- NAT bool `long:"nat" description:"Toggle NAT traversal support (using either UPnP or NAT-PMP) to automatically advertise your external IP address to the network -- NOTE this does not support devices behind multiple NATs"`
- MinBackoff time.Duration `long:"minbackoff" description:"Shortest backoff when reconnecting to persistent peers. Valid time units are {s, m, h}."`
- MaxBackoff time.Duration `long:"maxbackoff" description:"Longest backoff when reconnecting to persistent peers. Valid time units are {s, m, h}."`
- ConnectionTimeout time.Duration `long:"connectiontimeout" description:"The timeout value for network connections. Valid time units are {ms, s, m, h}."`
-
- DebugLevel string `short:"d" long:"debuglevel" description:"Logging level for all subsystems {trace, debug, info, warn, error, critical} -- You may also specify ,=,=,... to set the log level for individual subsystems -- Use show to list available subsystems"`
-
- CPUProfile string `long:"cpuprofile" description:"Write CPU profile to the specified file"`
-
- Profile string `long:"profile" description:"Enable HTTP profiling on given port -- NOTE port must be between 1024 and 65535"`
-
- UnsafeDisconnect bool `long:"unsafe-disconnect" description:"DEPRECATED: Allows the rpcserver to intentionally disconnect from peers with open channels. THIS FLAG WILL BE REMOVED IN 0.10.0"`
- UnsafeReplay bool `long:"unsafe-replay" description:"Causes a link to replay the adds on its commitment txn after starting up, this enables testing of the sphinx replay logic."`
- MaxPendingChannels int `long:"maxpendingchannels" description:"The maximum number of incoming pending channels permitted per peer."`
- BackupFilePath string `long:"backupfilepath" description:"The target location of the channel backup file"`
-
- FeeURL string `long:"feeurl" description:"Optional URL for external fee estimation. If no URL is specified, the method for fee estimation will depend on the chosen backend and network."`
-
- Bitcoin *lncfg.Chain `group:"Bitcoin" namespace:"bitcoin"`
- BtcdMode *lncfg.Btcd `group:"btcd" namespace:"btcd"`
- BitcoindMode *lncfg.Bitcoind `group:"bitcoind" namespace:"bitcoind"`
- NeutrinoMode *lncfg.Neutrino `group:"neutrino" namespace:"neutrino"`
-
- Litecoin *lncfg.Chain `group:"Litecoin" namespace:"litecoin"`
- LtcdMode *lncfg.Btcd `group:"ltcd" namespace:"ltcd"`
- LitecoindMode *lncfg.Bitcoind `group:"litecoind" namespace:"litecoind"`
-
- Pkt *lncfg.Chain `group:"PKT" namespace:"pkt"`
-
- Autopilot *lncfg.AutoPilot `group:"Autopilot" namespace:"autopilot"`
-
- Tor *lncfg.Tor `group:"Tor" namespace:"tor"`
-
- SubRPCServers *subRPCServerConfigs `group:"subrpc"`
-
- Hodl *hodl.Config `group:"hodl" namespace:"hodl"`
-
- NoNetBootstrap bool `long:"nobootstrap" description:"If true, then automatic network bootstrapping will not be attempted."`
-
- NoSeedBackup bool `long:"noseedbackup" description:"If true, NO SEED WILL BE EXPOSED -- EVER, AND THE WALLET WILL BE ENCRYPTED USING THE DEFAULT PASSPHRASE. THIS FLAG IS ONLY FOR TESTING AND SHOULD NEVER BE USED ON MAINNET."`
-
- ResetWalletTransactions bool `long:"reset-wallet-transactions" description:"Removes all transaction history from the on-chain wallet on startup, forcing a full chain rescan starting at the wallet's birthday. Implements the same functionality as btcwallet's dropwtxmgr command. Should be set to false after successful execution to avoid rescanning on every restart of lnd."`
-
- PaymentsExpirationGracePeriod time.Duration `long:"payments-expiration-grace-period" description:"A period to wait before force closing channels with outgoing htlcs that have timed-out and are a result of this node initiated payments."`
- TrickleDelay int `long:"trickledelay" description:"Time in milliseconds between each release of announcements to the network"`
- ChanEnableTimeout time.Duration `long:"chan-enable-timeout" description:"The duration that a peer connection must be stable before attempting to send a channel update to reenable or cancel a pending disables of the peer's channels on the network."`
- ChanDisableTimeout time.Duration `long:"chan-disable-timeout" description:"The duration that must elapse after first detecting that an already active channel is actually inactive and sending channel update disabling it to the network. The pending disable can be canceled if the peer reconnects and becomes stable for chan-enable-timeout before the disable update is sent."`
- ChanStatusSampleInterval time.Duration `long:"chan-status-sample-interval" description:"The polling interval between attempts to detect if an active channel has become inactive due to its peer going offline."`
- HeightHintCacheQueryDisable bool `long:"height-hint-cache-query-disable" description:"Disable queries from the height-hint cache to try to recover channels stuck in the pending close state. Disabling height hint queries may cause longer chain rescans, resulting in a performance hit. Unset this after channels are unstuck so you can get better performance again."`
- Alias string `long:"alias" description:"The node alias. Used as a moniker by peers and intelligence services"`
- Color string `long:"color" description:"The color of the node in hex format (i.e. '#3399FF'). Used to customize node appearance in intelligence services"`
- MinChanSize int64 `long:"minchansize" description:"The smallest channel size (in satoshis) that we should accept. Incoming channels smaller than this will be rejected"`
- MaxChanSize int64 `long:"maxchansize" description:"The largest channel size (in satoshis) that we should accept. Incoming channels larger than this will be rejected"`
-
- DefaultRemoteMaxHtlcs uint16 `long:"default-remote-max-htlcs" description:"The default max_htlc applied when opening or accepting channels. This value limits the number of concurrent HTLCs that the remote party can add to the commitment. The maximum possible value is 483."`
-
- NumGraphSyncPeers int `long:"numgraphsyncpeers" description:"The number of peers that we should receive new graph updates from. This option can be tuned to save bandwidth for light clients or routing nodes."`
- HistoricalSyncInterval time.Duration `long:"historicalsyncinterval" description:"The polling interval between historical graph sync attempts. Each historical graph sync attempt ensures we reconcile with the remote peer's graph from the genesis block."`
-
- IgnoreHistoricalGossipFilters bool `long:"ignore-historical-gossip-filters" description:"If true, will not reply with historical data that matches the range specified by a remote peer's gossip_timestamp_filter. Doing so will result in lower memory and bandwidth requirements."`
-
- RejectPush bool `long:"rejectpush" description:"If true, lnd will not accept channel opening requests with non-zero push amounts. This should prevent accidental pushes to merchant nodes."`
-
- RejectHTLC bool `long:"rejecthtlc" description:"If true, lnd will not forward any HTLCs that are meant as onward payments. This option will still allow lnd to send HTLCs and receive HTLCs but lnd won't be used as a hop."`
-
- StaggerInitialReconnect bool `long:"stagger-initial-reconnect" description:"If true, will apply a randomized staggering between 0s and 30s when reconnecting to persistent peers on startup. The first 10 reconnections will be attempted instantly, regardless of the flag's value"`
-
- MaxOutgoingCltvExpiry uint32 `long:"max-cltv-expiry" description:"The maximum number of blocks funds could be locked up for when forwarding payments."`
-
- MaxChannelFeeAllocation float64 `long:"max-channel-fee-allocation" description:"The maximum percentage of total funds that can be allocated to a channel's commitment fee. This only applies for the initiator of the channel. Valid values are within [0.1, 1]."`
-
- DryRunMigration bool `long:"dry-run-migration" description:"If true, lnd will abort committing a migration if it would otherwise have been successful. This leaves the database unmodified, and still compatible with the previously active version of lnd."`
-
- net tor.Net
-
- EnableUpfrontShutdown bool `long:"enable-upfront-shutdown" description:"If true, option upfront shutdown script will be enabled. If peers that we open channels with support this feature, we will automatically set the script to which cooperative closes should be paid out to on channel open. This offers the partial protection of a channel peer disconnecting from us if cooperative close is attempted with a different script."`
-
- AcceptKeySend bool `long:"accept-keysend" description:"If true, spontaneous payments through keysend will be accepted. [experimental]"`
-
- KeysendHoldTime time.Duration `long:"keysend-hold-time" description:"If non-zero, keysend payments are accepted but not immediately settled. If the payment isn't settled manually after the specified time, it is canceled automatically. [experimental]"`
-
- GcCanceledInvoicesOnStartup bool `long:"gc-canceled-invoices-on-startup" description:"If true, we'll attempt to garbage collect canceled invoices upon start."`
-
- GcCanceledInvoicesOnTheFly bool `long:"gc-canceled-invoices-on-the-fly" description:"If true, we'll delete newly canceled invoices on the fly."`
-
- Routing *lncfg.Routing `group:"routing" namespace:"routing"`
-
- Workers *lncfg.Workers `group:"workers" namespace:"workers"`
-
- Caches *lncfg.Caches `group:"caches" namespace:"caches"`
-
- Prometheus lncfg.Prometheus `group:"prometheus" namespace:"prometheus"`
-
- WtClient *lncfg.WtClient `group:"wtclient" namespace:"wtclient"`
-
- Watchtower *lncfg.Watchtower `group:"watchtower" namespace:"watchtower"`
-
- ProtocolOptions *lncfg.ProtocolOptions `group:"protocol" namespace:"protocol"`
-
- AllowCircularRoute bool `long:"allow-circular-route" description:"If true, our node will allow htlc forwards that arrive and depart on the same channel."`
-
- HealthChecks *lncfg.HealthCheckConfig `group:"healthcheck" namespace:"healthcheck"`
-
- DB *lncfg.DB `group:"db" namespace:"db"`
-
- // registeredChains keeps track of all chains that have been registered
- // with the daemon.
- registeredChains *chainreg.ChainRegistry
-
- // networkDir is the path to the directory of the currently active
- // network. This path will hold the files related to each different
- // network.
- networkDir string
-
- // ActiveNetParams contains parameters of the target chain.
- ActiveNetParams chainreg.BitcoinNetParams
-}
-
-// DefaultConfig returns all default values for the Config struct.
-func DefaultConfig() Config {
- return Config{
- LndDir: DefaultLndDir,
- ConfigFile: DefaultConfigFile,
- DataDir: defaultDataDir,
- DebugLevel: defaultLogLevel,
- NoTLS: false,
- TLSCertPath: defaultTLSCertPath,
- TLSKeyPath: defaultTLSKeyPath,
- LetsEncryptDir: defaultLetsEncryptDir,
- LetsEncryptListen: defaultLetsEncryptListen,
- LogDir: defaultLogDir,
- MaxLogFiles: defaultMaxLogFiles,
- MaxLogFileSize: defaultMaxLogFileSize,
- AcceptorTimeout: defaultAcceptorTimeout,
- Bitcoin: &lncfg.Chain{
- MinHTLCIn: chainreg.DefaultBitcoinMinHTLCInMSat,
- MinHTLCOut: chainreg.DefaultBitcoinMinHTLCOutMSat,
- BaseFee: chainreg.DefaultBitcoinBaseFeeMSat,
- FeeRate: chainreg.DefaultBitcoinFeeRate,
- TimeLockDelta: chainreg.DefaultBitcoinTimeLockDelta,
- MaxLocalDelay: defaultMaxLocalCSVDelay,
- Node: "btcd",
- },
- BtcdMode: &lncfg.Btcd{
- Dir: defaultBtcdDir,
- RPCHost: defaultRPCHost,
- RPCCert: defaultBtcdRPCCertFile,
- },
- BitcoindMode: &lncfg.Bitcoind{
- Dir: defaultBitcoindDir,
- RPCHost: defaultRPCHost,
- EstimateMode: defaultBitcoindEstimateMode,
- },
- Litecoin: &lncfg.Chain{
- MinHTLCIn: chainreg.DefaultLitecoinMinHTLCInMSat,
- MinHTLCOut: chainreg.DefaultLitecoinMinHTLCOutMSat,
- BaseFee: chainreg.DefaultLitecoinBaseFeeMSat,
- FeeRate: chainreg.DefaultLitecoinFeeRate,
- TimeLockDelta: chainreg.DefaultLitecoinTimeLockDelta,
- MaxLocalDelay: defaultMaxLocalCSVDelay,
- Node: "ltcd",
- },
- LtcdMode: &lncfg.Btcd{
- Dir: defaultLtcdDir,
- RPCHost: defaultRPCHost,
- RPCCert: defaultLtcdRPCCertFile,
- },
- LitecoindMode: &lncfg.Bitcoind{
- Dir: defaultLitecoindDir,
- RPCHost: defaultRPCHost,
- EstimateMode: defaultBitcoindEstimateMode,
- },
- Pkt: &lncfg.Chain{
- MinHTLCIn: chainreg.DefaultPktMinHTLCInMSat,
- MinHTLCOut: chainreg.DefaultPktMinHTLCOutMSat,
- BaseFee: chainreg.DefaultPktBaseFeeMSat,
- FeeRate: chainreg.DefaultPktFeeRate,
- TimeLockDelta: chainreg.DefaultPktTimeLockDelta,
- MaxLocalDelay: defaultMaxLocalCSVDelay,
- Node: "neutrino",
- },
- NeutrinoMode: &lncfg.Neutrino{
- UserAgentName: neutrino.UserAgentName,
- UserAgentVersion: neutrino.UserAgentVersion,
- },
- UnsafeDisconnect: true,
- MaxPendingChannels: lncfg.DefaultMaxPendingChannels,
- NoSeedBackup: defaultNoSeedBackup,
- MinBackoff: defaultMinBackoff,
- MaxBackoff: defaultMaxBackoff,
- ConnectionTimeout: tor.DefaultConnTimeout,
- SubRPCServers: &subRPCServerConfigs{
- SignRPC: &signrpc.Config{},
- RouterRPC: routerrpc.DefaultConfig(),
- },
- Autopilot: &lncfg.AutoPilot{
- MaxChannels: 5,
- Allocation: 0.6,
- MinChannelSize: int64(minChanFundingSize),
- MaxChannelSize: int64(MaxFundingAmount),
- MinConfs: 1,
- ConfTarget: autopilot.DefaultConfTarget,
- Heuristic: map[string]float64{
- "top_centrality": 1.0,
- },
- },
- PaymentsExpirationGracePeriod: defaultPaymentsExpirationGracePeriod,
- TrickleDelay: defaultTrickleDelay,
- ChanStatusSampleInterval: defaultChanStatusSampleInterval,
- ChanEnableTimeout: defaultChanEnableTimeout,
- ChanDisableTimeout: defaultChanDisableTimeout,
- HeightHintCacheQueryDisable: defaultHeightHintCacheQueryDisable,
- Alias: defaultAlias,
- Color: defaultColor,
- MinChanSize: int64(minChanFundingSize),
- MaxChanSize: int64(0),
- DefaultRemoteMaxHtlcs: defaultRemoteMaxHtlcs,
- NumGraphSyncPeers: defaultMinPeers,
- HistoricalSyncInterval: discovery.DefaultHistoricalSyncInterval,
- Tor: &lncfg.Tor{
- SOCKS: defaultTorSOCKS,
- DNS: defaultTorDNS,
- Control: defaultTorControl,
- },
- net: &tor.ClearNet{},
- Workers: &lncfg.Workers{
- Read: lncfg.DefaultReadWorkers,
- Write: lncfg.DefaultWriteWorkers,
- Sig: lncfg.DefaultSigWorkers,
- },
- Caches: &lncfg.Caches{
- RejectCacheSize: channeldb.DefaultRejectCacheSize,
- ChannelCacheSize: channeldb.DefaultChannelCacheSize,
- },
- Prometheus: lncfg.DefaultPrometheus(),
- Watchtower: &lncfg.Watchtower{
- TowerDir: defaultTowerDir,
- },
- HealthChecks: &lncfg.HealthCheckConfig{
- ChainCheck: &lncfg.CheckConfig{
- Interval: defaultChainInterval,
- Timeout: defaultChainTimeout,
- Attempts: defaultChainAttempts,
- Backoff: defaultChainBackoff,
- },
- DiskCheck: &lncfg.DiskCheckConfig{
- RequiredRemaining: defaultRequiredDisk,
- CheckConfig: &lncfg.CheckConfig{
- Interval: defaultDiskInterval,
- Attempts: defaultDiskAttempts,
- Timeout: defaultDiskTimeout,
- Backoff: defaultDiskBackoff,
- },
- },
- },
- MaxOutgoingCltvExpiry: htlcswitch.DefaultMaxOutgoingCltvExpiry,
- MaxChannelFeeAllocation: htlcswitch.DefaultMaxLinkFeeAllocation,
- DB: lncfg.DefaultDB(),
- registeredChains: chainreg.NewChainRegistry(),
- ActiveNetParams: chainreg.BitcoinTestNetParams,
- }
-}
-
-// LoadConfig initializes and parses the config using a config file and command
-// line options.
-//
-// The configuration proceeds as follows:
-// 1) Start with a default config with sane settings
-// 2) Pre-parse the command line to check for an alternative config file
-// 3) Load configuration file overwriting defaults with any specified options
-// 4) Parse CLI options and overwrite/add any specified options
-func LoadConfig() (*Config, er.R) {
- // Pre-parse the command line options to pick up an alternative config
- // file.
- preCfg := DefaultConfig()
- if _, err := flags.Parse(&preCfg); err != nil {
- return nil, er.E(err)
- }
-
- // Show the version and exit if the version flag was specified.
- appName := filepath.Base(os.Args[0])
- appName = strings.TrimSuffix(appName, filepath.Ext(appName))
- usageMessage := fmt.Sprintf("Use %s -h to show usage", appName)
- if preCfg.ShowVersion {
- fmt.Println(appName, "version", version.Version())
- os.Exit(0)
- }
-
- // If the config file path has not been modified by the user, then we'll
- // use the default config file path. However, if the user has modified
- // their lnddir, then we should assume they intend to use the config
- // file within it.
- configFileDir := CleanAndExpandPath(preCfg.LndDir)
- configFilePath := CleanAndExpandPath(preCfg.ConfigFile)
- if configFileDir != DefaultLndDir {
- if configFilePath == DefaultConfigFile {
- configFilePath = filepath.Join(
- configFileDir, lncfg.DefaultConfigFilename,
- )
- }
- }
-
- // Next, load any additional configuration options from the file.
- var configFileError error
- cfg := preCfg
- if err := flags.IniParse(configFilePath, &cfg); err != nil {
- // If it's a parsing related error, then we'll return
- // immediately, otherwise we can proceed as possibly the config
- // file doesn't exist which is OK.
- if _, ok := err.(*flags.IniError); ok {
- return nil, er.E(err)
- }
-
- configFileError = err
- }
-
- // Finally, parse the remaining command line options again to ensure
- // they take precedence.
- if _, err := flags.Parse(&cfg); err != nil {
- return nil, er.E(err)
- }
-
- // Make sure everything we just loaded makes sense.
- cleanCfg, err := ValidateConfig(cfg, usageMessage)
- if err != nil {
- return nil, err
- }
-
- // Warn about missing config file only after all other configuration is
- // done. This prevents the warning on help messages and invalid
- // options. Note this should go directly before the return.
- if configFileError != nil {
- log.Warnf("%v", configFileError)
- }
-
- return cleanCfg, nil
-}
-
-// ValidateConfig check the given configuration to be sane. This makes sure no
-// illegal values or combination of values are set. All file system paths are
-// normalized. The cleaned up config is returned on success.
-func ValidateConfig(cfg Config, usageMessage string) (*Config, er.R) {
- // If the provided lnd directory is not the default, we'll modify the
- // path to all of the files and directories that will live within it.
- lndDir := CleanAndExpandPath(cfg.LndDir)
- if lndDir != DefaultLndDir {
- cfg.DataDir = filepath.Join(lndDir, defaultDataDirname)
- cfg.LetsEncryptDir = filepath.Join(
- lndDir, defaultLetsEncryptDirname,
- )
- cfg.TLSCertPath = filepath.Join(lndDir, defaultTLSCertFilename)
- cfg.TLSKeyPath = filepath.Join(lndDir, defaultTLSKeyFilename)
- cfg.LogDir = filepath.Join(lndDir, defaultLogDirname)
-
- // If the watchtower's directory is set to the default, i.e. the
- // user has not requested a different location, we'll move the
- // location to be relative to the specified lnd directory.
- if cfg.Watchtower.TowerDir == defaultTowerDir {
- cfg.Watchtower.TowerDir =
- filepath.Join(cfg.DataDir, defaultTowerSubDirname)
- }
- }
-
- funcName := "loadConfig"
- makeDirectory := func(dir string) er.R {
- errr := os.MkdirAll(dir, 0700)
- if errr != nil {
- // Show a nicer error message if it's because a symlink
- // is linked to a directory that does not exist
- // (probably because it's not mounted).
- var err er.R
- if e, ok := errr.(*os.PathError); ok && os.IsExist(errr) {
- link, lerr := os.Readlink(e.Path)
- if lerr == nil {
- str := "is symlink %s -> %s mounted?"
- err = er.Errorf(str, e.Path, link)
- }
- } else {
- err = er.E(errr)
- }
-
- str := "%s: Failed to create lnd directory: %v"
- err = er.Errorf(str, funcName, err)
- _, _ = fmt.Fprintln(os.Stderr, err)
- return err
- }
-
- return nil
- }
-
- // As soon as we're done parsing configuration options, ensure all paths
- // to directories and files are cleaned and expanded before attempting
- // to use them later on.
- cfg.DataDir = CleanAndExpandPath(cfg.DataDir)
- cfg.TLSCertPath = CleanAndExpandPath(cfg.TLSCertPath)
- cfg.TLSKeyPath = CleanAndExpandPath(cfg.TLSKeyPath)
- cfg.LetsEncryptDir = CleanAndExpandPath(cfg.LetsEncryptDir)
- cfg.AdminMacPath = CleanAndExpandPath(cfg.AdminMacPath)
- cfg.ReadMacPath = CleanAndExpandPath(cfg.ReadMacPath)
- cfg.InvoiceMacPath = CleanAndExpandPath(cfg.InvoiceMacPath)
- cfg.LogDir = CleanAndExpandPath(cfg.LogDir)
- cfg.BtcdMode.Dir = CleanAndExpandPath(cfg.BtcdMode.Dir)
- cfg.LtcdMode.Dir = CleanAndExpandPath(cfg.LtcdMode.Dir)
- cfg.BitcoindMode.Dir = CleanAndExpandPath(cfg.BitcoindMode.Dir)
- cfg.LitecoindMode.Dir = CleanAndExpandPath(cfg.LitecoindMode.Dir)
- cfg.Tor.PrivateKeyPath = CleanAndExpandPath(cfg.Tor.PrivateKeyPath)
- cfg.Tor.WatchtowerKeyPath = CleanAndExpandPath(cfg.Tor.WatchtowerKeyPath)
- cfg.Watchtower.TowerDir = CleanAndExpandPath(cfg.Watchtower.TowerDir)
-
- // Create the lnd directory and all other sub directories if they don't
- // already exist. This makes sure that directory trees are also created
- // for files that point to outside of the lnddir.
- dirs := []string{
- lndDir, cfg.DataDir,
- cfg.LetsEncryptDir, cfg.Watchtower.TowerDir,
- filepath.Dir(cfg.TLSCertPath), filepath.Dir(cfg.TLSKeyPath),
- filepath.Dir(cfg.AdminMacPath), filepath.Dir(cfg.ReadMacPath),
- filepath.Dir(cfg.InvoiceMacPath),
- filepath.Dir(cfg.Tor.PrivateKeyPath),
- filepath.Dir(cfg.Tor.WatchtowerKeyPath),
- }
- for _, dir := range dirs {
- if err := makeDirectory(dir); err != nil {
- return nil, err
- }
- }
-
- // Ensure that the user didn't attempt to specify negative values for
- // any of the autopilot params.
- if cfg.Autopilot.MaxChannels < 0 {
- str := "%s: autopilot.maxchannels must be non-negative"
- err := er.Errorf(str, funcName)
- _, _ = fmt.Fprintln(os.Stderr, err)
- return nil, err
- }
- if cfg.Autopilot.Allocation < 0 {
- str := "%s: autopilot.allocation must be non-negative"
- err := er.Errorf(str, funcName)
- _, _ = fmt.Fprintln(os.Stderr, err)
- return nil, err
- }
- if cfg.Autopilot.MinChannelSize < 0 {
- str := "%s: autopilot.minchansize must be non-negative"
- err := er.Errorf(str, funcName)
- _, _ = fmt.Fprintln(os.Stderr, err)
- return nil, err
- }
- if cfg.Autopilot.MaxChannelSize < 0 {
- str := "%s: autopilot.maxchansize must be non-negative"
- err := er.Errorf(str, funcName)
- _, _ = fmt.Fprintln(os.Stderr, err)
- return nil, err
- }
- if cfg.Autopilot.MinConfs < 0 {
- str := "%s: autopilot.minconfs must be non-negative"
- err := er.Errorf(str, funcName)
- _, _ = fmt.Fprintln(os.Stderr, err)
- return nil, err
- }
- if cfg.Autopilot.ConfTarget < 1 {
- str := "%s: autopilot.conftarget must be positive"
- err := er.Errorf(str, funcName)
- _, _ = fmt.Fprintln(os.Stderr, err)
- return nil, err
- }
-
- // Ensure that the specified values for the min and max channel size
- // are within the bounds of the normal chan size constraints.
- if cfg.Autopilot.MinChannelSize < int64(minChanFundingSize) {
- cfg.Autopilot.MinChannelSize = int64(minChanFundingSize)
- }
- if cfg.Autopilot.MaxChannelSize > int64(MaxFundingAmount) {
- cfg.Autopilot.MaxChannelSize = int64(MaxFundingAmount)
- }
-
- if _, err := validateAtplCfg(cfg.Autopilot); err != nil {
- return nil, err
- }
-
- // Ensure that --maxchansize is properly handled when set by user.
- // For non-Wumbo channels this limit remains 16777215 satoshis by default
- // as specified in BOLT-02. For wumbo channels this limit is 1,000,000,000.
- // satoshis (10 BTC). Always enforce --maxchansize explicitly set by user.
- // If unset (marked by 0 value), then enforce proper default.
- if cfg.MaxChanSize == 0 {
- if cfg.ProtocolOptions.Wumbo() {
- cfg.MaxChanSize = int64(MaxBtcFundingAmountWumbo)
- } else {
- cfg.MaxChanSize = int64(MaxBtcFundingAmount)
- }
- }
-
- // Ensure that the user specified values for the min and max channel
- // size make sense.
- if cfg.MaxChanSize < cfg.MinChanSize {
- return nil, er.Errorf("invalid channel size parameters: "+
- "max channel size %v, must be no less than min chan size %v",
- cfg.MaxChanSize, cfg.MinChanSize,
- )
- }
-
- // Don't allow superflous --maxchansize greater than
- // BOLT 02 soft-limit for non-wumbo channel
- if !cfg.ProtocolOptions.Wumbo() && cfg.MaxChanSize > int64(MaxFundingAmount) {
- return nil, er.Errorf("invalid channel size parameters: "+
- "maximum channel size %v is greater than maximum non-wumbo"+
- " channel size %v",
- cfg.MaxChanSize, MaxFundingAmount,
- )
- }
-
- // Ensure a valid max channel fee allocation was set.
- if cfg.MaxChannelFeeAllocation <= 0 || cfg.MaxChannelFeeAllocation > 1 {
- return nil, er.Errorf("invalid max channel fee allocation: "+
- "%v, must be within (0, 1]",
- cfg.MaxChannelFeeAllocation)
- }
-
- // Validate the Tor config parameters.
- socks, err := lncfg.ParseAddressString(
- cfg.Tor.SOCKS, strconv.Itoa(defaultTorSOCKSPort),
- cfg.net.ResolveTCPAddr,
- )
- if err != nil {
- return nil, err
- }
- cfg.Tor.SOCKS = socks.String()
-
- // We'll only attempt to normalize and resolve the DNS host if it hasn't
- // changed, as it doesn't need to be done for the default.
- if cfg.Tor.DNS != defaultTorDNS {
- dns, err := lncfg.ParseAddressString(
- cfg.Tor.DNS, strconv.Itoa(defaultTorDNSPort),
- cfg.net.ResolveTCPAddr,
- )
- if err != nil {
- return nil, err
- }
- cfg.Tor.DNS = dns.String()
- }
-
- control, err := lncfg.ParseAddressString(
- cfg.Tor.Control, strconv.Itoa(defaultTorControlPort),
- cfg.net.ResolveTCPAddr,
- )
- if err != nil {
- return nil, err
- }
- cfg.Tor.Control = control.String()
-
- // Ensure that tor socks host:port is not equal to tor control
- // host:port. This would lead to lnd not starting up properly.
- if cfg.Tor.SOCKS == cfg.Tor.Control {
- str := "%s: tor.socks and tor.control can not use " +
- "the same host:port"
- return nil, er.Errorf(str, funcName)
- }
-
- switch {
- case cfg.Tor.V2 && cfg.Tor.V3:
- return nil, er.New("either tor.v2 or tor.v3 can be set, " +
- "but not both")
- case cfg.DisableListen && (cfg.Tor.V2 || cfg.Tor.V3):
- return nil, er.New("listening must be enabled when " +
- "enabling inbound connections over Tor")
- }
-
- if cfg.Tor.PrivateKeyPath == "" {
- switch {
- case cfg.Tor.V2:
- cfg.Tor.PrivateKeyPath = filepath.Join(
- lndDir, defaultTorV2PrivateKeyFilename,
- )
- case cfg.Tor.V3:
- cfg.Tor.PrivateKeyPath = filepath.Join(
- lndDir, defaultTorV3PrivateKeyFilename,
- )
- }
- }
-
- if cfg.Tor.WatchtowerKeyPath == "" {
- switch {
- case cfg.Tor.V2:
- cfg.Tor.WatchtowerKeyPath = filepath.Join(
- cfg.Watchtower.TowerDir, defaultTorV2PrivateKeyFilename,
- )
- case cfg.Tor.V3:
- cfg.Tor.WatchtowerKeyPath = filepath.Join(
- cfg.Watchtower.TowerDir, defaultTorV3PrivateKeyFilename,
- )
- }
- }
-
- // Set up the network-related functions that will be used throughout
- // the daemon. We use the standard Go "net" package functions by
- // default. If we should be proxying all traffic through Tor, then
- // we'll use the Tor proxy specific functions in order to avoid leaking
- // our real information.
- if cfg.Tor.Active {
- cfg.net = &tor.ProxyNet{
- SOCKS: cfg.Tor.SOCKS,
- DNS: cfg.Tor.DNS,
- StreamIsolation: cfg.Tor.StreamIsolation,
- }
- }
-
- if cfg.DisableListen && cfg.NAT {
- return nil, er.New("NAT traversal cannot be used when " +
- "listening is disabled")
- }
- if cfg.NAT && len(cfg.ExternalHosts) != 0 {
- return nil, er.New("NAT support and externalhosts are " +
- "mutually exclusive, only one should be selected")
- }
-
- if !cfg.Bitcoin.Active && !cfg.Litecoin.Active && !cfg.Pkt.Active {
- // Default to PKT
- cfg.Pkt.Active = true
- }
-
- // Determine the active chain configuration and its parameters.
- switch {
- // At this moment, multiple active chains are not supported.
- case cfg.Litecoin.Active && cfg.Bitcoin.Active:
- str := "%s: Currently both Bitcoin and Litecoin cannot be " +
- "active together"
- return nil, er.Errorf(str, funcName)
-
- // Either Bitcoin must be active, or Litecoin must be active.
- // Otherwise, we don't know which chain we're on.
- case !cfg.Bitcoin.Active && !cfg.Litecoin.Active && !cfg.Pkt.Active:
- return nil, er.Errorf("%s: either bitcoin.active or "+
- "litecoin.active must be set to 1 (true)", funcName)
-
- case cfg.Pkt.Active:
- cfg.ActiveNetParams = chainreg.PktMainNetParams
- // Calling it /pkt/mainnet makes life easier
- cfg.ActiveNetParams.Name = "mainnet"
- cfg.Pkt.ChainDir = filepath.Join(cfg.DataDir,
- defaultChainSubDirname,
- chainreg.PktChain.String())
-
- // Finally we'll register the litecoin chain as our current
- // primary chain.
- cfg.registeredChains.RegisterPrimaryChain(chainreg.PktChain)
- MaxFundingAmount = maxPktFundingAmount
-
- case cfg.Litecoin.Active:
- err := cfg.Litecoin.Validate(minTimeLockDelta, minLtcRemoteDelay)
- if err != nil {
- return nil, err
- }
-
- // Multiple networks can't be selected simultaneously. Count
- // number of network flags passed; assign active network params
- // while we're at it.
- numNets := 0
- var ltcParams chainreg.LitecoinNetParams
- if cfg.Litecoin.MainNet {
- numNets++
- ltcParams = chainreg.LitecoinMainNetParams
- }
- if cfg.Litecoin.TestNet3 {
- numNets++
- ltcParams = chainreg.LitecoinTestNetParams
- }
- if cfg.Litecoin.RegTest {
- numNets++
- ltcParams = chainreg.LitecoinRegTestNetParams
- }
- if cfg.Litecoin.SimNet {
- numNets++
- ltcParams = chainreg.LitecoinSimNetParams
- }
-
- if numNets > 1 {
- str := "%s: The mainnet, testnet, and simnet params " +
- "can't be used together -- choose one of the " +
- "three"
- err := er.Errorf(str, funcName)
- return nil, err
- }
-
- // The target network must be provided, otherwise, we won't
- // know how to initialize the daemon.
- if numNets == 0 {
- str := "%s: either --litecoin.mainnet, or " +
- "litecoin.testnet must be specified"
- err := er.Errorf(str, funcName)
- return nil, err
- }
-
- // The litecoin chain is the current active chain. However
- // throughout the codebase we required chaincfg.Params. So as a
- // temporary hack, we'll mutate the default net params for
- // bitcoin with the litecoin specific information.
- chainreg.ApplyLitecoinParams(&cfg.ActiveNetParams, <cParams)
-
- switch cfg.Litecoin.Node {
- case "ltcd":
- err := parseRPCParams(cfg.Litecoin, cfg.LtcdMode,
- chainreg.LitecoinChain, funcName, cfg.ActiveNetParams)
- if err != nil {
- err := er.Errorf("unable to load RPC "+
- "credentials for ltcd: %v", err)
- return nil, err
- }
- case "litecoind":
- if cfg.Litecoin.SimNet {
- return nil, er.Errorf("%s: litecoind does not "+
- "support simnet", funcName)
- }
- err := parseRPCParams(cfg.Litecoin, cfg.LitecoindMode,
- chainreg.LitecoinChain, funcName, cfg.ActiveNetParams)
- if err != nil {
- err := er.Errorf("unable to load RPC "+
- "credentials for litecoind: %v", err)
- return nil, err
- }
- default:
- str := "%s: only ltcd and litecoind mode supported for " +
- "litecoin at this time"
- return nil, er.Errorf(str, funcName)
- }
-
- cfg.Litecoin.ChainDir = filepath.Join(cfg.DataDir,
- defaultChainSubDirname,
- chainreg.LitecoinChain.String())
-
- // Finally we'll register the litecoin chain as our current
- // primary chain.
- cfg.registeredChains.RegisterPrimaryChain(chainreg.LitecoinChain)
- MaxFundingAmount = maxLtcFundingAmount
-
- case cfg.Bitcoin.Active:
- // Multiple networks can't be selected simultaneously. Count
- // number of network flags passed; assign active network params
- // while we're at it.
- numNets := 0
- if cfg.Bitcoin.MainNet {
- numNets++
- cfg.ActiveNetParams = chainreg.BitcoinMainNetParams
- }
- if cfg.Bitcoin.TestNet3 {
- numNets++
- cfg.ActiveNetParams = chainreg.BitcoinTestNetParams
- }
- if cfg.Bitcoin.RegTest {
- numNets++
- cfg.ActiveNetParams = chainreg.BitcoinRegTestNetParams
- }
- if cfg.Bitcoin.SimNet {
- numNets++
- cfg.ActiveNetParams = chainreg.BitcoinSimNetParams
- }
- if numNets > 1 {
- str := "%s: The mainnet, testnet, regtest, and " +
- "simnet params can't be used together -- " +
- "choose one of the four"
- err := er.Errorf(str, funcName)
- return nil, err
- }
-
- // The target network must be provided, otherwise, we won't
- // know how to initialize the daemon.
- if numNets == 0 {
- str := "%s: either --bitcoin.mainnet, or " +
- "bitcoin.testnet, bitcoin.simnet, or bitcoin.regtest " +
- "must be specified"
- err := er.Errorf(str, funcName)
- return nil, err
- }
-
- err := cfg.Bitcoin.Validate(minTimeLockDelta, minBtcRemoteDelay)
- if err != nil {
- return nil, err
- }
-
- switch cfg.Bitcoin.Node {
- case "btcd":
- err := parseRPCParams(
- cfg.Bitcoin, cfg.BtcdMode, chainreg.BitcoinChain, funcName,
- cfg.ActiveNetParams,
- )
- if err != nil {
- err := er.Errorf("unable to load RPC "+
- "credentials for btcd: %v", err)
- return nil, err
- }
- case "bitcoind":
- if cfg.Bitcoin.SimNet {
- return nil, er.Errorf("%s: bitcoind does not "+
- "support simnet", funcName)
- }
-
- err := parseRPCParams(
- cfg.Bitcoin, cfg.BitcoindMode, chainreg.BitcoinChain, funcName,
- cfg.ActiveNetParams,
- )
- if err != nil {
- err := er.Errorf("unable to load RPC "+
- "credentials for bitcoind: %v", err)
- return nil, err
- }
- case "neutrino":
- // No need to get RPC parameters.
-
- default:
- str := "%s: only btcd, bitcoind, and neutrino mode " +
- "supported for bitcoin at this time"
- return nil, er.Errorf(str, funcName)
- }
-
- cfg.Bitcoin.ChainDir = filepath.Join(cfg.DataDir,
- defaultChainSubDirname,
- chainreg.BitcoinChain.String())
-
- // Finally we'll register the bitcoin chain as our current
- // primary chain.
- cfg.registeredChains.RegisterPrimaryChain(chainreg.BitcoinChain)
- }
- globalcfg.SelectConfig(cfg.ActiveNetParams.GlobalConf)
-
- // Ensure that the user didn't attempt to specify negative values for
- // any of the autopilot params.
- if cfg.Autopilot.MaxChannels < 0 {
- str := "%s: autopilot.maxchannels must be non-negative"
- err := er.Errorf(str, funcName)
- _, _ = fmt.Fprintln(os.Stderr, err)
- return nil, err
- }
- if cfg.Autopilot.Allocation < 0 {
- str := "%s: autopilot.allocation must be non-negative"
- err := er.Errorf(str, funcName)
- _, _ = fmt.Fprintln(os.Stderr, err)
- return nil, err
- }
- if cfg.Autopilot.MinChannelSize < 0 {
- str := "%s: autopilot.minchansize must be non-negative"
- err := er.Errorf(str, funcName)
- _, _ = fmt.Fprintln(os.Stderr, err)
- return nil, err
- }
- if cfg.Autopilot.MaxChannelSize < 0 {
- str := "%s: autopilot.maxchansize must be non-negative"
- err := er.Errorf(str, funcName)
- _, _ = fmt.Fprintln(os.Stderr, err)
- return nil, err
- }
-
- // Ensure that the specified values for the min and max channel size
- // don't are within the bounds of the normal chan size constraints.
- if cfg.Autopilot.MinChannelSize < int64(minChanFundingSize) {
- cfg.Autopilot.MinChannelSize = int64(minChanFundingSize)
- }
- if cfg.Autopilot.MaxChannelSize > int64(MaxFundingAmount) {
- cfg.Autopilot.MaxChannelSize = int64(MaxFundingAmount)
- }
-
- // Validate profile port number.
- if cfg.Profile != "" {
- profilePort, err := strconv.Atoi(cfg.Profile)
- if err != nil || profilePort < 1024 || profilePort > 65535 {
- str := "%s: The profile port must be between 1024 and 65535"
- err := er.Errorf(str, funcName)
- _, _ = fmt.Fprintln(os.Stderr, err)
- _, _ = fmt.Fprintln(os.Stderr, usageMessage)
- return nil, err
- }
- }
-
- // We'll now construct the network directory which will be where we
- // store all the data specific to this chain/network.
- cfg.networkDir = filepath.Join(
- cfg.DataDir, defaultChainSubDirname,
- cfg.registeredChains.PrimaryChain().String(),
- lncfg.NormalizeNetwork(cfg.ActiveNetParams.Name),
- )
-
- // If a custom macaroon directory wasn't specified and the data
- // directory has changed from the default path, then we'll also update
- // the path for the macaroons to be generated.
- if cfg.AdminMacPath == "" {
- cfg.AdminMacPath = filepath.Join(
- cfg.networkDir, defaultAdminMacFilename,
- )
- }
- if cfg.ReadMacPath == "" {
- cfg.ReadMacPath = filepath.Join(
- cfg.networkDir, defaultReadMacFilename,
- )
- }
- if cfg.InvoiceMacPath == "" {
- cfg.InvoiceMacPath = filepath.Join(
- cfg.networkDir, defaultInvoiceMacFilename,
- )
- }
-
- // Similarly, if a custom back up file path wasn't specified, then
- // we'll update the file location to match our set network directory.
- if cfg.BackupFilePath == "" {
- cfg.BackupFilePath = filepath.Join(
- cfg.networkDir, chanbackup.DefaultBackupFileName,
- )
- }
-
- // Append the network type to the log directory so it is "namespaced"
- // per network in the same fashion as the data directory.
- cfg.LogDir = filepath.Join(cfg.LogDir,
- cfg.registeredChains.PrimaryChain().String(),
- lncfg.NormalizeNetwork(cfg.ActiveNetParams.Name))
-
- // Parse, validate, and set debug log level(s).
- err = log.SetLogLevels(cfg.DebugLevel)
- if err != nil {
- err = er.Errorf("%s: %v", funcName, err.String())
- _, _ = fmt.Fprintln(os.Stderr, err)
- _, _ = fmt.Fprintln(os.Stderr, usageMessage)
- return nil, err
- }
-
- // At least one RPCListener is required. So listen on localhost per
- // default.
- if len(cfg.RawRPCListeners) == 0 {
- addr := fmt.Sprintf("localhost:%d", defaultRPCPort)
- cfg.RawRPCListeners = append(cfg.RawRPCListeners, addr)
- }
-
- // Listen on localhost if no REST listeners were specified.
- if len(cfg.RawRESTListeners) == 0 {
- addr := fmt.Sprintf("localhost:%d", defaultRESTPort)
- cfg.RawRESTListeners = append(cfg.RawRESTListeners, addr)
- }
-
- // Listen on the default interface/port if no listeners were specified.
- // An empty address string means default interface/address, which on
- // most unix systems is the same as 0.0.0.0. If Tor is active, we
- // default to only listening on localhost for hidden service
- // connections.
- if len(cfg.RawListeners) == 0 {
- addr := fmt.Sprintf(":%d", defaultPeerPort)
- if cfg.Tor.Active {
- addr = fmt.Sprintf("localhost:%d", defaultPeerPort)
- }
- cfg.RawListeners = append(cfg.RawListeners, addr)
- }
-
- // Add default port to all RPC listener addresses if needed and remove
- // duplicate addresses.
- cfg.RPCListeners, err = lncfg.NormalizeAddresses(
- cfg.RawRPCListeners, strconv.Itoa(defaultRPCPort),
- cfg.net.ResolveTCPAddr,
- )
- if err != nil {
- return nil, err
- }
-
- // Add default port to all REST listener addresses if needed and remove
- // duplicate addresses.
- cfg.RESTListeners, err = lncfg.NormalizeAddresses(
- cfg.RawRESTListeners, strconv.Itoa(defaultRESTPort),
- cfg.net.ResolveTCPAddr,
- )
- if err != nil {
- return nil, err
- }
-
- // For each of the RPC listeners (REST+gRPC), we'll ensure that users
- // have specified a safe combo for authentication. If not, we'll bail
- // out with an error. Since we don't allow disabling TLS for gRPC
- // connections we pass in tlsActive=true.
- err = lncfg.EnforceSafeAuthentication(
- cfg.RPCListeners, !cfg.NoMacaroons, true,
- )
- if err != nil {
- return nil, err
- }
-
- if cfg.DisableRest {
- log.Infof("REST API is disabled!")
- cfg.RESTListeners = nil
- } else {
- err = lncfg.EnforceSafeAuthentication(
- cfg.RESTListeners, !cfg.NoMacaroons, !cfg.DisableRestTLS,
- )
- if err != nil {
- return nil, err
- }
- }
-
- // Remove the listening addresses specified if listening is disabled.
- if cfg.DisableListen {
- log.Infof("Listening on the p2p interface is disabled!")
- cfg.Listeners = nil
- cfg.ExternalIPs = nil
- } else {
-
- // Add default port to all listener addresses if needed and remove
- // duplicate addresses.
- cfg.Listeners, err = lncfg.NormalizeAddresses(
- cfg.RawListeners, strconv.Itoa(defaultPeerPort),
- cfg.net.ResolveTCPAddr,
- )
- if err != nil {
- return nil, err
- }
-
- // Add default port to all external IP addresses if needed and remove
- // duplicate addresses.
- cfg.ExternalIPs, err = lncfg.NormalizeAddresses(
- cfg.RawExternalIPs, strconv.Itoa(defaultPeerPort),
- cfg.net.ResolveTCPAddr,
- )
- if err != nil {
- return nil, err
- }
-
- // For the p2p port it makes no sense to listen to an Unix socket.
- // Also, we would need to refactor the brontide listener to support
- // that.
- for _, p2pListener := range cfg.Listeners {
- if lncfg.IsUnix(p2pListener) {
- err := er.Errorf("unix socket addresses cannot be "+
- "used for the p2p connection listener: %s",
- p2pListener)
- return nil, err
- }
- }
- }
-
- // Ensure that the specified minimum backoff is below or equal to the
- // maximum backoff.
- if cfg.MinBackoff > cfg.MaxBackoff {
- return nil, er.Errorf("maxbackoff must be greater than " +
- "minbackoff")
- }
-
- // Newer versions of lnd added a new sub-config for bolt-specific
- // parameters. However we want to also allow existing users to use the
- // value on the top-level config. If the outer config value is set,
- // then we'll use that directly.
- if cfg.SyncFreelist {
- cfg.DB.Bolt.SyncFreelist = cfg.SyncFreelist
- }
-
- // Ensure that the user hasn't chosen a remote-max-htlc value greater
- // than the protocol maximum.
- maxRemoteHtlcs := uint16(input.MaxHTLCNumber / 2)
- if cfg.DefaultRemoteMaxHtlcs > maxRemoteHtlcs {
- return nil, er.Errorf("default-remote-max-htlcs (%v) must be "+
- "less than %v", cfg.DefaultRemoteMaxHtlcs,
- maxRemoteHtlcs)
- }
-
- // Validate the subconfigs for workers, caches, and the tower client.
- err = lncfg.Validate(
- cfg.Workers,
- cfg.Caches,
- cfg.WtClient,
- cfg.DB,
- cfg.HealthChecks,
- )
- if err != nil {
- return nil, err
- }
-
- // Finally, ensure that the user's color is correctly formatted,
- // otherwise the server will not be able to start after the unlocking
- // the wallet.
- _, err = parseHexColor(cfg.Color)
- if err != nil {
- return nil, er.Errorf("unable to parse node color: %v", err)
- }
-
- // All good, return the sanitized result.
- return &cfg, err
-}
-
-// localDatabaseDir returns the default directory where the
-// local bolt db files are stored.
-func (c *Config) localDatabaseDir() string {
- return filepath.Join(c.DataDir,
- defaultGraphSubDirname,
- lncfg.NormalizeNetwork(c.ActiveNetParams.Name))
-}
-
-func (c *Config) networkName() string {
- return lncfg.NormalizeNetwork(c.ActiveNetParams.Name)
-}
-
-// CleanAndExpandPath expands environment variables and leading ~ in the
-// passed path, cleans the result, and returns it.
-// This function is taken from https://github.com/btcsuite/btcd
-func CleanAndExpandPath(path string) string {
- if path == "" {
- return ""
- }
-
- // Expand initial ~ to OS specific home directory.
- if strings.HasPrefix(path, "~") {
- var homeDir string
- u, err := user.Current()
- if err == nil {
- homeDir = u.HomeDir
- } else {
- homeDir = os.Getenv("HOME")
- }
-
- path = strings.Replace(path, "~", homeDir, 1)
- }
-
- // NOTE: The os.ExpandEnv doesn't work with Windows-style %VARIABLE%,
- // but the variables can still be expanded via POSIX-style $VARIABLE.
- return filepath.Clean(os.ExpandEnv(path))
-}
-
-func parseRPCParams(cConfig *lncfg.Chain, nodeConfig interface{},
- net chainreg.ChainCode, funcName string,
- netParams chainreg.BitcoinNetParams) er.R { // nolint:unparam
-
- // First, we'll check our node config to make sure the RPC parameters
- // were set correctly. We'll also determine the path to the conf file
- // depending on the backend node.
- var daemonName, confDir, confFile string
- switch conf := nodeConfig.(type) {
- case *lncfg.Btcd:
- // If both RPCUser and RPCPass are set, we assume those
- // credentials are good to use.
- if conf.RPCUser != "" && conf.RPCPass != "" {
- return nil
- }
-
- // Get the daemon name for displaying proper errors.
- switch net {
- case chainreg.BitcoinChain:
- daemonName = "btcd"
- confDir = conf.Dir
- confFile = "btcd"
- case chainreg.LitecoinChain:
- daemonName = "ltcd"
- confDir = conf.Dir
- confFile = "ltcd"
- }
-
- // If only ONE of RPCUser or RPCPass is set, we assume the
- // user did that unintentionally.
- if conf.RPCUser != "" || conf.RPCPass != "" {
- return er.Errorf("please set both or neither of "+
- "%[1]v.rpcuser, %[1]v.rpcpass", daemonName)
- }
-
- case *lncfg.Bitcoind:
- // Ensure that if the ZMQ options are set, that they are not
- // equal.
- if conf.ZMQPubRawBlock != "" && conf.ZMQPubRawTx != "" {
- err := checkZMQOptions(
- conf.ZMQPubRawBlock, conf.ZMQPubRawTx,
- )
- if err != nil {
- return err
- }
- }
-
- // Ensure that if the estimate mode is set, that it is a legal
- // value.
- if conf.EstimateMode != "" {
- err := checkEstimateMode(conf.EstimateMode)
- if err != nil {
- return err
- }
- }
-
- // If all of RPCUser, RPCPass, ZMQBlockHost, and ZMQTxHost are
- // set, we assume those parameters are good to use.
- if conf.RPCUser != "" && conf.RPCPass != "" &&
- conf.ZMQPubRawBlock != "" && conf.ZMQPubRawTx != "" {
- return nil
- }
-
- // Get the daemon name for displaying proper errors.
- switch net {
- case chainreg.BitcoinChain:
- daemonName = "bitcoind"
- confDir = conf.Dir
- confFile = "bitcoin"
- case chainreg.LitecoinChain:
- daemonName = "litecoind"
- confDir = conf.Dir
- confFile = "litecoin"
- }
-
- // If not all of the parameters are set, we'll assume the user
- // did this unintentionally.
- if conf.RPCUser != "" || conf.RPCPass != "" ||
- conf.ZMQPubRawBlock != "" || conf.ZMQPubRawTx != "" {
-
- return er.Errorf("please set all or none of "+
- "%[1]v.rpcuser, %[1]v.rpcpass, "+
- "%[1]v.zmqpubrawblock, %[1]v.zmqpubrawtx",
- daemonName)
- }
- }
-
- // If we're in simnet mode, then the running btcd instance won't read
- // the RPC credentials from the configuration. So if lnd wasn't
- // specified the parameters, then we won't be able to start.
- if cConfig.SimNet {
- str := "%v: rpcuser and rpcpass must be set to your btcd " +
- "node's RPC parameters for simnet mode"
- return er.Errorf(str, funcName)
- }
-
- fmt.Println("Attempting automatic RPC configuration to " + daemonName)
-
- confFile = filepath.Join(confDir, fmt.Sprintf("%v.conf", confFile))
- switch cConfig.Node {
- case "btcd", "ltcd":
- nConf := nodeConfig.(*lncfg.Btcd)
- rpcUser, rpcPass, err := extractBtcdRPCParams(confFile)
- if err != nil {
- return er.Errorf("unable to extract RPC credentials:"+
- " %v, cannot start w/o RPC connection",
- err)
- }
- nConf.RPCUser, nConf.RPCPass = rpcUser, rpcPass
- case "bitcoind", "litecoind":
- nConf := nodeConfig.(*lncfg.Bitcoind)
- rpcUser, rpcPass, zmqBlockHost, zmqTxHost, err :=
- extractBitcoindRPCParams(netParams.Params.Name, confFile)
- if err != nil {
- return er.Errorf("unable to extract RPC credentials:"+
- " %v, cannot start w/o RPC connection",
- err)
- }
- nConf.RPCUser, nConf.RPCPass = rpcUser, rpcPass
- nConf.ZMQPubRawBlock, nConf.ZMQPubRawTx = zmqBlockHost, zmqTxHost
- }
-
- fmt.Printf("Automatically obtained %v's RPC credentials\n", daemonName)
- return nil
-}
-
-// extractBtcdRPCParams attempts to extract the RPC credentials for an existing
-// btcd instance. The passed path is expected to be the location of btcd's
-// application data directory on the target system.
-func extractBtcdRPCParams(btcdConfigPath string) (string, string, er.R) {
- // First, we'll open up the btcd configuration file found at the target
- // destination.
- btcdConfigFile, errr := os.Open(btcdConfigPath)
- if errr != nil {
- return "", "", er.E(errr)
- }
- defer func() { _ = btcdConfigFile.Close() }()
-
- // With the file open extract the contents of the configuration file so
- // we can attempt to locate the RPC credentials.
- configContents, errr := ioutil.ReadAll(btcdConfigFile)
- if errr != nil {
- return "", "", er.E(errr)
- }
-
- // Attempt to locate the RPC user using a regular expression. If we
- // don't have a match for our regular expression then we'll exit with
- // an error.
- rpcUserRegexp, errr := regexp.Compile(`(?m)^\s*rpcuser\s*=\s*([^\s]+)`)
- if errr != nil {
- return "", "", er.E(errr)
- }
- userSubmatches := rpcUserRegexp.FindSubmatch(configContents)
- if userSubmatches == nil {
- return "", "", er.Errorf("unable to find rpcuser in config")
- }
-
- // Similarly, we'll use another regular expression to find the set
- // rpcpass (if any). If we can't find the pass, then we'll exit with an
- // error.
- rpcPassRegexp, errr := regexp.Compile(`(?m)^\s*rpcpass\s*=\s*([^\s]+)`)
- if errr != nil {
- return "", "", er.E(errr)
- }
- passSubmatches := rpcPassRegexp.FindSubmatch(configContents)
- if passSubmatches == nil {
- return "", "", er.Errorf("unable to find rpcuser in config")
- }
-
- return string(userSubmatches[1]), string(passSubmatches[1]), nil
-}
-
-// extractBitcoindRPCParams attempts to extract the RPC credentials for an
-// existing bitcoind node instance. The passed path is expected to be the
-// location of bitcoind's bitcoin.conf on the target system. The routine looks
-// for a cookie first, optionally following the datadir configuration option in
-// the bitcoin.conf. If it doesn't find one, it looks for rpcuser/rpcpassword.
-func extractBitcoindRPCParams(networkName string,
- bitcoindConfigPath string) (string, string, string, string, er.R) {
-
- // First, we'll open up the bitcoind configuration file found at the
- // target destination.
- bitcoindConfigFile, errr := os.Open(bitcoindConfigPath)
- if errr != nil {
- return "", "", "", "", er.E(errr)
- }
- defer func() { _ = bitcoindConfigFile.Close() }()
-
- // With the file open extract the contents of the configuration file so
- // we can attempt to locate the RPC credentials.
- configContents, errr := ioutil.ReadAll(bitcoindConfigFile)
- if errr != nil {
- return "", "", "", "", er.E(errr)
- }
-
- // First, we'll look for the ZMQ hosts providing raw block and raw
- // transaction notifications.
- zmqBlockHostRE, errr := regexp.Compile(
- `(?m)^\s*zmqpubrawblock\s*=\s*([^\s]+)`,
- )
- if errr != nil {
- return "", "", "", "", er.E(errr)
- }
- zmqBlockHostSubmatches := zmqBlockHostRE.FindSubmatch(configContents)
- if len(zmqBlockHostSubmatches) < 2 {
- return "", "", "", "", er.Errorf("unable to find " +
- "zmqpubrawblock in config")
- }
- zmqTxHostRE, errr := regexp.Compile(`(?m)^\s*zmqpubrawtx\s*=\s*([^\s]+)`)
- if errr != nil {
- return "", "", "", "", er.E(errr)
- }
- zmqTxHostSubmatches := zmqTxHostRE.FindSubmatch(configContents)
- if len(zmqTxHostSubmatches) < 2 {
- return "", "", "", "", er.New("unable to find zmqpubrawtx " +
- "in config")
- }
- zmqBlockHost := string(zmqBlockHostSubmatches[1])
- zmqTxHost := string(zmqTxHostSubmatches[1])
- if err := checkZMQOptions(zmqBlockHost, zmqTxHost); err != nil {
- return "", "", "", "", err
- }
-
- // Next, we'll try to find an auth cookie. We need to detect the chain
- // by seeing if one is specified in the configuration file.
- dataDir := path.Dir(bitcoindConfigPath)
- dataDirRE, errr := regexp.Compile(`(?m)^\s*datadir\s*=\s*([^\s]+)`)
- if errr != nil {
- return "", "", "", "", er.E(errr)
- }
- dataDirSubmatches := dataDirRE.FindSubmatch(configContents)
- if dataDirSubmatches != nil {
- dataDir = string(dataDirSubmatches[1])
- }
-
- chainDir := "/"
- switch networkName {
- case "testnet3":
- chainDir = "/testnet3/"
- case "testnet4":
- chainDir = "/testnet4/"
- case "regtest":
- chainDir = "/regtest/"
- }
-
- cookie, err := ioutil.ReadFile(dataDir + chainDir + ".cookie")
- if err == nil {
- splitCookie := strings.Split(string(cookie), ":")
- if len(splitCookie) == 2 {
- return splitCookie[0], splitCookie[1], zmqBlockHost,
- zmqTxHost, nil
- }
- }
-
- // We didn't find a cookie, so we attempt to locate the RPC user using
- // a regular expression. If we don't have a match for our regular
- // expression then we'll exit with an error.
- rpcUserRegexp, errr := regexp.Compile(`(?m)^\s*rpcuser\s*=\s*([^\s]+)`)
- if errr != nil {
- return "", "", "", "", er.E(errr)
- }
- userSubmatches := rpcUserRegexp.FindSubmatch(configContents)
- if userSubmatches == nil {
- return "", "", "", "", er.Errorf("unable to find rpcuser in " +
- "config")
- }
-
- // Similarly, we'll use another regular expression to find the set
- // rpcpass (if any). If we can't find the pass, then we'll exit with an
- // error.
- rpcPassRegexp, errr := regexp.Compile(`(?m)^\s*rpcpassword\s*=\s*([^\s]+)`)
- if errr != nil {
- return "", "", "", "", er.E(errr)
- }
- passSubmatches := rpcPassRegexp.FindSubmatch(configContents)
- if passSubmatches == nil {
- return "", "", "", "", er.Errorf("unable to find rpcpassword " +
- "in config")
- }
-
- return string(userSubmatches[1]), string(passSubmatches[1]),
- zmqBlockHost, zmqTxHost, nil
-}
-
-// checkZMQOptions ensures that the provided addresses to use as the hosts for
-// ZMQ rawblock and rawtx notifications are different.
-func checkZMQOptions(zmqBlockHost, zmqTxHost string) er.R {
- if zmqBlockHost == zmqTxHost {
- return er.New("zmqpubrawblock and zmqpubrawtx must be set " +
- "to different addresses")
- }
-
- return nil
-}
-
-// checkEstimateMode ensures that the provided estimate mode is legal.
-func checkEstimateMode(estimateMode string) er.R {
- for _, mode := range bitcoindEstimateModes {
- if estimateMode == mode {
- return nil
- }
- }
-
- return er.Errorf("estimatemode must be one of the following: %v",
- bitcoindEstimateModes[:])
-}
diff --git a/lnd/contractcourt/anchor_resolver.go b/lnd/contractcourt/anchor_resolver.go
deleted file mode 100644
index f1c78f67..00000000
--- a/lnd/contractcourt/anchor_resolver.go
+++ /dev/null
@@ -1,209 +0,0 @@
-package contractcourt
-
-import (
- "io"
- "sync"
-
- "github.com/pkt-cash/pktd/btcutil"
- "github.com/pkt-cash/pktd/btcutil/er"
- "github.com/pkt-cash/pktd/chaincfg/chainhash"
- "github.com/pkt-cash/pktd/lnd/channeldb"
- "github.com/pkt-cash/pktd/lnd/input"
- "github.com/pkt-cash/pktd/lnd/sweep"
- "github.com/pkt-cash/pktd/pktlog/log"
- "github.com/pkt-cash/pktd/wire"
-)
-
-// anchorResolver is a resolver that will attempt to sweep our anchor output.
-type anchorResolver struct {
- // anchorSignDescriptor contains the information that is required to
- // sweep the anchor.
- anchorSignDescriptor input.SignDescriptor
-
- // anchor is the outpoint on the commitment transaction.
- anchor wire.OutPoint
-
- // resolved reflects if the contract has been fully resolved or not.
- resolved bool
-
- // broadcastHeight is the height that the original contract was
- // broadcast to the main-chain at. We'll use this value to bound any
- // historical queries to the chain for spends/confirmations.
- broadcastHeight uint32
-
- // chanPoint is the channel point of the original contract.
- chanPoint wire.OutPoint
-
- // currentReport stores the current state of the resolver for reporting
- // over the rpc interface.
- currentReport ContractReport
-
- // reportLock prevents concurrent access to the resolver report.
- reportLock sync.Mutex
-
- contractResolverKit
-}
-
-// newAnchorResolver instantiates a new anchor resolver.
-func newAnchorResolver(anchorSignDescriptor input.SignDescriptor,
- anchor wire.OutPoint, broadcastHeight uint32,
- chanPoint wire.OutPoint, resCfg ResolverConfig) *anchorResolver {
-
- amt := btcutil.Amount(anchorSignDescriptor.Output.Value)
-
- report := ContractReport{
- Outpoint: anchor,
- Type: ReportOutputAnchor,
- Amount: amt,
- LimboBalance: amt,
- RecoveredBalance: 0,
- }
-
- r := &anchorResolver{
- contractResolverKit: *newContractResolverKit(resCfg),
- anchorSignDescriptor: anchorSignDescriptor,
- anchor: anchor,
- broadcastHeight: broadcastHeight,
- chanPoint: chanPoint,
- currentReport: report,
- }
-
- return r
-}
-
-// ResolverKey returns an identifier which should be globally unique for this
-// particular resolver within the chain the original contract resides within.
-func (c *anchorResolver) ResolverKey() []byte {
- // The anchor resolver is stateless and doesn't need a database key.
- return nil
-}
-
-// Resolve offers the anchor output to the sweeper and waits for it to be swept.
-func (c *anchorResolver) Resolve() (ContractResolver, er.R) {
- // Attempt to update the sweep parameters to the post-confirmation
- // situation. We don't want to force sweep anymore, because the anchor
- // lost its special purpose to get the commitment confirmed. It is just
- // an output that we want to sweep only if it is economical to do so.
- //
- // An exclusive group is not necessary anymore, because we know that
- // this is the only anchor that can be swept.
- //
- // We also clear the parent tx information for cpfp, because the
- // commitment tx is confirmed.
- //
- // After a restart or when the remote force closes, the sweeper is not
- // yet aware of the anchor. In that case, it will be added as new input
- // to the sweeper.
- relayFeeRate := c.Sweeper.RelayFeePerKW()
-
- anchorInput := input.MakeBaseInput(
- &c.anchor,
- input.CommitmentAnchor,
- &c.anchorSignDescriptor,
- c.broadcastHeight,
- nil,
- )
-
- resultChan, err := c.Sweeper.SweepInput(
- &anchorInput,
- sweep.Params{
- Fee: sweep.FeePreference{
- FeeRate: relayFeeRate,
- },
- },
- )
- if err != nil {
- return nil, err
- }
-
- var (
- outcome channeldb.ResolverOutcome
- spendTx *chainhash.Hash
- )
-
- select {
- case sweepRes := <-resultChan:
- switch {
-
- // Anchor was swept successfully.
- case sweepRes.Err == nil:
- sweepTxID := sweepRes.Tx.TxHash()
-
- spendTx = &sweepTxID
- outcome = channeldb.ResolverOutcomeClaimed
-
- // Anchor was swept by someone else. This is possible after the
- // 16 block csv lock.
- case sweep.ErrRemoteSpend.Is(sweepRes.Err):
- log.Warnf("our anchor spent by someone else")
- outcome = channeldb.ResolverOutcomeUnclaimed
-
- // The sweeper gave up on sweeping the anchor. This happens
- // after the maximum number of sweep attempts has been reached.
- // See sweep.DefaultMaxSweepAttempts. Sweep attempts are
- // interspaced with random delays picked from a range that
- // increases exponentially.
- //
- // We consider the anchor as being lost.
- case sweep.ErrTooManyAttempts.Is(sweepRes.Err):
- log.Warnf("anchor sweep abandoned")
- outcome = channeldb.ResolverOutcomeUnclaimed
-
- // An unexpected error occurred.
- default:
- log.Errorf("unable to sweep anchor: %v", sweepRes.Err)
-
- return nil, sweepRes.Err
- }
-
- case <-c.quit:
- return nil, errResolverShuttingDown.Default()
- }
-
- // Update report to reflect that funds are no longer in limbo.
- c.reportLock.Lock()
- if outcome == channeldb.ResolverOutcomeClaimed {
- c.currentReport.RecoveredBalance = c.currentReport.LimboBalance
- }
- c.currentReport.LimboBalance = 0
- report := c.currentReport.resolverReport(
- spendTx, channeldb.ResolverTypeAnchor, outcome,
- )
- c.reportLock.Unlock()
-
- c.resolved = true
- return nil, c.PutResolverReport(nil, report)
-}
-
-// Stop signals the resolver to cancel any current resolution processes, and
-// suspend.
-//
-// NOTE: Part of the ContractResolver interface.
-func (c *anchorResolver) Stop() {
- close(c.quit)
-}
-
-// IsResolved returns true if the stored state in the resolve is fully
-// resolved. In this case the target output can be forgotten.
-//
-// NOTE: Part of the ContractResolver interface.
-func (c *anchorResolver) IsResolved() bool {
- return c.resolved
-}
-
-// report returns a report on the resolution state of the contract.
-func (c *anchorResolver) report() *ContractReport {
- c.reportLock.Lock()
- defer c.reportLock.Unlock()
-
- reportCopy := c.currentReport
- return &reportCopy
-}
-
-func (c *anchorResolver) Encode(w io.Writer) er.R {
- return er.New("serialization not supported")
-}
-
-// A compile time assertion to ensure anchorResolver meets the
-// ContractResolver interface.
-var _ ContractResolver = (*anchorResolver)(nil)
diff --git a/lnd/contractcourt/briefcase.go b/lnd/contractcourt/briefcase.go
deleted file mode 100644
index 7ead441f..00000000
--- a/lnd/contractcourt/briefcase.go
+++ /dev/null
@@ -1,1254 +0,0 @@
-package contractcourt
-
-import (
- "bytes"
- "io"
-
- "github.com/pkt-cash/pktd/btcutil/er"
- "github.com/pkt-cash/pktd/btcutil/util"
- "github.com/pkt-cash/pktd/chaincfg/chainhash"
- "github.com/pkt-cash/pktd/lnd/channeldb"
- "github.com/pkt-cash/pktd/lnd/channeldb/kvdb"
- "github.com/pkt-cash/pktd/lnd/input"
- "github.com/pkt-cash/pktd/lnd/lnwallet"
- "github.com/pkt-cash/pktd/wire"
-)
-
-// ContractResolutions is a wrapper struct around the two forms of resolutions
-// we may need to carry out once a contract is closing: resolving the
-// commitment output, and resolving any incoming+outgoing HTLC's still present
-// in the commitment.
-type ContractResolutions struct {
- // CommitHash is the txid of the commitment transaction.
- CommitHash chainhash.Hash
-
- // CommitResolution contains all data required to fully resolve a
- // commitment output.
- CommitResolution *lnwallet.CommitOutputResolution
-
- // HtlcResolutions contains all data required to fully resolve any
- // incoming+outgoing HTLC's present within the commitment transaction.
- HtlcResolutions lnwallet.HtlcResolutions
-
- // AnchorResolution contains the data required to sweep the anchor
- // output. If the channel type doesn't include anchors, the value of
- // this field will be nil.
- AnchorResolution *lnwallet.AnchorResolution
-}
-
-// IsEmpty returns true if the set of resolutions is "empty". A resolution is
-// empty if: our commitment output has been trimmed, and we don't have any
-// incoming or outgoing HTLC's active.
-func (c *ContractResolutions) IsEmpty() bool {
- return c.CommitResolution == nil &&
- len(c.HtlcResolutions.IncomingHTLCs) == 0 &&
- len(c.HtlcResolutions.OutgoingHTLCs) == 0 &&
- c.AnchorResolution == nil
-}
-
-// ArbitratorLog is the primary source of persistent storage for the
-// ChannelArbitrator. The log stores the current state of the
-// ChannelArbitrator's internal state machine, any items that are required to
-// properly make a state transition, and any unresolved contracts.
-type ArbitratorLog interface {
- // TODO(roasbeef): document on interface the errors expected to be
- // returned
-
- // CurrentState returns the current state of the ChannelArbitrator. It
- // takes an optional database transaction, which will be used if it is
- // non-nil, otherwise the lookup will be done in its own transaction.
- CurrentState(tx kvdb.RTx) (ArbitratorState, er.R)
-
- // CommitState persists, the current state of the chain attendant.
- CommitState(ArbitratorState) er.R
-
- // InsertUnresolvedContracts inserts a set of unresolved contracts into
- // the log. The log will then persistently store each contract until
- // they've been swapped out, or resolved. It takes a set of report which
- // should be written to disk if as well if it is non-nil.
- InsertUnresolvedContracts(reports []*channeldb.ResolverReport,
- resolvers ...ContractResolver) er.R
-
- // FetchUnresolvedContracts returns all unresolved contracts that have
- // been previously written to the log.
- FetchUnresolvedContracts() ([]ContractResolver, er.R)
-
- // SwapContract performs an atomic swap of the old contract for the new
- // contract. This method is used when after a contract has been fully
- // resolved, it produces another contract that needs to be resolved.
- SwapContract(old ContractResolver, new ContractResolver) er.R
-
- // ResolveContract marks a contract as fully resolved. Once a contract
- // has been fully resolved, it is deleted from persistent storage.
- ResolveContract(ContractResolver) er.R
-
- // LogContractResolutions stores a complete contract resolution for the
- // contract under watch. This method will be called once the
- // ChannelArbitrator either force closes a channel, or detects that the
- // remote party has broadcast their commitment on chain.
- LogContractResolutions(*ContractResolutions) er.R
-
- // FetchContractResolutions fetches the set of previously stored
- // contract resolutions from persistent storage.
- FetchContractResolutions() (*ContractResolutions, er.R)
-
- // InsertConfirmedCommitSet stores the known set of active HTLCs at the
- // time channel closure. We'll use this to reconstruct our set of chain
- // actions anew based on the confirmed and pending commitment state.
- InsertConfirmedCommitSet(c *CommitSet) er.R
-
- // FetchConfirmedCommitSet fetches the known confirmed active HTLC set
- // from the database. It takes an optional database transaction, which
- // will be used if it is non-nil, otherwise the lookup will be done in
- // its own transaction.
- FetchConfirmedCommitSet(tx kvdb.RTx) (*CommitSet, er.R)
-
- // FetchChainActions attempts to fetch the set of previously stored
- // chain actions. We'll use this upon restart to properly advance our
- // state machine forward.
- //
- // NOTE: This method only exists in order to be able to serve nodes had
- // channels in the process of closing before the CommitSet struct was
- // introduced.
- FetchChainActions() (ChainActionMap, er.R)
-
- // WipeHistory is to be called ONLY once *all* contracts have been
- // fully resolved, and the channel closure if finalized. This method
- // will delete all on-disk state within the persistent log.
- WipeHistory() er.R
-}
-
-// ArbitratorState is an enum that details the current state of the
-// ChannelArbitrator's state machine.
-type ArbitratorState uint8
-
-const (
- // StateDefault is the default state. In this state, no major actions
- // need to be executed.
- StateDefault ArbitratorState = 0
-
- // StateBroadcastCommit is a state that indicates that the attendant
- // has decided to broadcast the commitment transaction, but hasn't done
- // so yet.
- StateBroadcastCommit ArbitratorState = 1
-
- // StateCommitmentBroadcasted is a state that indicates that the
- // attendant has broadcasted the commitment transaction, and is now
- // waiting for it to confirm.
- StateCommitmentBroadcasted ArbitratorState = 6
-
- // StateContractClosed is a state that indicates the contract has
- // already been "closed", meaning the commitment is confirmed on chain.
- // At this point, we can now examine our active contracts, in order to
- // create the proper resolver for each one.
- StateContractClosed ArbitratorState = 2
-
- // StateWaitingFullResolution is a state that indicates that the
- // commitment transaction has been confirmed, and the attendant is now
- // waiting for all unresolved contracts to be fully resolved.
- StateWaitingFullResolution ArbitratorState = 3
-
- // StateFullyResolved is the final state of the attendant. In this
- // state, all related contracts have been resolved, and the attendant
- // can now be garbage collected.
- StateFullyResolved ArbitratorState = 4
-
- // StateError is the only error state of the resolver. If we enter this
- // state, then we cannot proceed with manual intervention as a state
- // transition failed.
- StateError ArbitratorState = 5
-)
-
-// String returns a human readable string describing the ArbitratorState.
-func (a ArbitratorState) String() string {
- switch a {
- case StateDefault:
- return "StateDefault"
-
- case StateBroadcastCommit:
- return "StateBroadcastCommit"
-
- case StateCommitmentBroadcasted:
- return "StateCommitmentBroadcasted"
-
- case StateContractClosed:
- return "StateContractClosed"
-
- case StateWaitingFullResolution:
- return "StateWaitingFullResolution"
-
- case StateFullyResolved:
- return "StateFullyResolved"
-
- case StateError:
- return "StateError"
-
- default:
- return "unknown state"
- }
-}
-
-// resolverType is an enum that enumerates the various types of resolvers. When
-// writing resolvers to disk, we prepend this to the raw bytes stored. This
-// allows us to properly decode the resolver into the proper type.
-type resolverType uint8
-
-const (
- // resolverTimeout is the type of a resolver that's tasked with
- // resolving an outgoing HTLC that is very close to timing out.
- resolverTimeout resolverType = 0
-
- // resolverSuccess is the type of a resolver that's tasked with
- // resolving an incoming HTLC that we already know the preimage of.
- resolverSuccess resolverType = 1
-
- // resolverOutgoingContest is the type of a resolver that's tasked with
- // resolving an outgoing HTLC that hasn't yet timed out.
- resolverOutgoingContest resolverType = 2
-
- // resolverIncomingContest is the type of a resolver that's tasked with
- // resolving an incoming HTLC that we don't yet know the preimage to.
- resolverIncomingContest resolverType = 3
-
- // resolverUnilateralSweep is the type of resolver that's tasked with
- // sweeping out direct commitment output form the remote party's
- // commitment transaction.
- resolverUnilateralSweep resolverType = 4
-)
-
-// resolverIDLen is the size of the resolver ID key. This is 36 bytes as we get
-// 32 bytes from the hash of the prev tx, and 4 bytes for the output index.
-const resolverIDLen = 36
-
-// resolverID is a key that uniquely identifies a resolver within a particular
-// chain. For this value we use the full outpoint of the resolver.
-type resolverID [resolverIDLen]byte
-
-// newResolverID returns a resolverID given the outpoint of a contract.
-func newResolverID(op wire.OutPoint) resolverID {
- var r resolverID
-
- copy(r[:], op.Hash[:])
-
- endian.PutUint32(r[32:], op.Index)
-
- return r
-}
-
-// logScope is a key that we use to scope the storage of a ChannelArbitrator
-// within the global log. We use this key to create a unique bucket within the
-// database and ensure that we don't have any key collisions. The log's scope
-// is define as: chainHash || chanPoint, where chanPoint is the chan point of
-// the original channel.
-type logScope [32 + 36]byte
-
-// newLogScope creates a new logScope key from the passed chainhash and
-// chanPoint.
-func newLogScope(chain chainhash.Hash, op wire.OutPoint) (*logScope, er.R) {
- var l logScope
- b := bytes.NewBuffer(l[0:0])
-
- if _, err := b.Write(chain[:]); err != nil {
- return nil, er.E(err)
- }
- if _, err := b.Write(op.Hash[:]); err != nil {
- return nil, er.E(err)
- }
-
- if err := util.WriteBin(b, endian, op.Index); err != nil {
- return nil, err
- }
-
- return &l, nil
-}
-
-var (
- // stateKey is the key that we use to store the current state of the
- // arbitrator.
- stateKey = []byte("state")
-
- // contractsBucketKey is the bucket within the logScope that will store
- // all the active unresolved contracts.
- contractsBucketKey = []byte("contractkey")
-
- // resolutionsKey is the key under the logScope that we'll use to store
- // the full set of resolutions for a channel.
- resolutionsKey = []byte("resolutions")
-
- // anchorResolutionKey is the key under the logScope that we'll use to
- // store the anchor resolution, if any.
- anchorResolutionKey = []byte("anchor-resolution")
-
- // actionsBucketKey is the key under the logScope that we'll use to
- // store all chain actions once they're determined.
- actionsBucketKey = []byte("chain-actions")
-
- // commitSetKey is the primary key under the logScope that we'll use to
- // store the confirmed active HTLC sets once we learn that a channel
- // has closed out on chain.
- commitSetKey = []byte("commit-set")
-)
-
-var (
- // errScopeBucketNoExist is returned when we can't find the proper
- // bucket for an arbitrator's scope.
- errScopeBucketNoExist = Err.CodeWithDetail("errScopeBucketNoExist", "scope bucket not found")
-
- // errNoContracts is returned when no contracts are found within the
- // log.
- errNoContracts = Err.CodeWithDetail("errNoContracts", "no stored contracts")
-
- // errNoResolutions is returned when the log doesn't contain any active
- // chain resolutions.
- errNoResolutions = Err.CodeWithDetail("errNoResolutions", "no contract resolutions exist")
-
- // errNoActions is retuned when the log doesn't contain any stored
- // chain actions.
- errNoActions = Err.CodeWithDetail("errNoActions", "no chain actions exist")
-
- // errNoCommitSet is return when the log doesn't contained a CommitSet.
- // This can happen if the channel hasn't closed yet, or a client is
- // running an older version that didn't yet write this state.
- errNoCommitSet = Err.CodeWithDetail("errNoCommitSet", "no commit set exists")
-)
-
-// boltArbitratorLog is an implementation of the ArbitratorLog interface backed
-// by a bolt DB instance.
-type boltArbitratorLog struct {
- db kvdb.Backend
-
- cfg ChannelArbitratorConfig
-
- scopeKey logScope
-}
-
-// newBoltArbitratorLog returns a new instance of the boltArbitratorLog given
-// an arbitrator config, and the items needed to create its log scope.
-func newBoltArbitratorLog(db kvdb.Backend, cfg ChannelArbitratorConfig,
- chainHash chainhash.Hash, chanPoint wire.OutPoint) (*boltArbitratorLog, er.R) {
-
- scope, err := newLogScope(chainHash, chanPoint)
- if err != nil {
- return nil, err
- }
-
- return &boltArbitratorLog{
- db: db,
- cfg: cfg,
- scopeKey: *scope,
- }, nil
-}
-
-// A compile time check to ensure boltArbitratorLog meets the ArbitratorLog
-// interface.
-var _ ArbitratorLog = (*boltArbitratorLog)(nil)
-
-func fetchContractReadBucket(tx kvdb.RTx, scopeKey []byte) (kvdb.RBucket, er.R) {
- scopeBucket := tx.ReadBucket(scopeKey)
- if scopeBucket == nil {
- return nil, errScopeBucketNoExist.Default()
- }
-
- contractBucket := scopeBucket.NestedReadBucket(contractsBucketKey)
- if contractBucket == nil {
- return nil, errNoContracts.Default()
- }
-
- return contractBucket, nil
-}
-
-func fetchContractWriteBucket(tx kvdb.RwTx, scopeKey []byte) (kvdb.RwBucket, er.R) {
- scopeBucket, err := tx.CreateTopLevelBucket(scopeKey)
- if err != nil {
- return nil, err
- }
-
- contractBucket, err := scopeBucket.CreateBucketIfNotExists(
- contractsBucketKey,
- )
- if err != nil {
- return nil, err
- }
-
- return contractBucket, nil
-}
-
-// writeResolver is a helper method that writes a contract resolver and stores
-// it it within the passed contractBucket using its unique resolutionsKey key.
-func (b *boltArbitratorLog) writeResolver(contractBucket kvdb.RwBucket,
- res ContractResolver) er.R {
-
- // Only persist resolvers that are stateful. Stateless resolvers don't
- // expose a resolver key.
- resKey := res.ResolverKey()
- if resKey == nil {
- return nil
- }
-
- // First, we'll write to the buffer the type of this resolver. Using
- // this byte, we can later properly deserialize the resolver properly.
- var (
- buf bytes.Buffer
- rType resolverType
- )
- switch res.(type) {
- case *htlcTimeoutResolver:
- rType = resolverTimeout
- case *htlcSuccessResolver:
- rType = resolverSuccess
- case *htlcOutgoingContestResolver:
- rType = resolverOutgoingContest
- case *htlcIncomingContestResolver:
- rType = resolverIncomingContest
- case *commitSweepResolver:
- rType = resolverUnilateralSweep
- }
- if _, err := buf.Write([]byte{byte(rType)}); err != nil {
- return er.E(err)
- }
-
- // With the type of the resolver written, we can then write out the raw
- // bytes of the resolver itself.
- if err := res.Encode(&buf); err != nil {
- return err
- }
-
- return contractBucket.Put(resKey, buf.Bytes())
-}
-
-// CurrentState returns the current state of the ChannelArbitrator. It takes an
-// optional database transaction, which will be used if it is non-nil, otherwise
-// the lookup will be done in its own transaction.
-//
-// NOTE: Part of the ContractResolver interface.
-func (b *boltArbitratorLog) CurrentState(tx kvdb.RTx) (ArbitratorState, er.R) {
- var (
- s ArbitratorState
- err er.R
- )
-
- if tx != nil {
- s, err = b.currentState(tx)
- } else {
- err = kvdb.View(b.db, func(tx kvdb.RTx) er.R {
- s, err = b.currentState(tx)
- return err
- }, func() {
- s = 0
- })
- }
-
- if err != nil && !errScopeBucketNoExist.Is(err) {
- return s, err
- }
-
- return s, nil
-}
-
-func (b *boltArbitratorLog) currentState(tx kvdb.RTx) (ArbitratorState, er.R) {
- scopeBucket := tx.ReadBucket(b.scopeKey[:])
- if scopeBucket == nil {
- return 0, errScopeBucketNoExist.Default()
- }
-
- stateBytes := scopeBucket.Get(stateKey)
- if stateBytes == nil {
- return 0, nil
- }
-
- return ArbitratorState(stateBytes[0]), nil
-}
-
-// CommitState persists, the current state of the chain attendant.
-//
-// NOTE: Part of the ContractResolver interface.
-func (b *boltArbitratorLog) CommitState(s ArbitratorState) er.R {
- return kvdb.Batch(b.db, func(tx kvdb.RwTx) er.R {
- scopeBucket, err := tx.CreateTopLevelBucket(b.scopeKey[:])
- if err != nil {
- return err
- }
-
- return scopeBucket.Put(stateKey[:], []byte{uint8(s)})
- })
-}
-
-// FetchUnresolvedContracts returns all unresolved contracts that have been
-// previously written to the log.
-//
-// NOTE: Part of the ContractResolver interface.
-func (b *boltArbitratorLog) FetchUnresolvedContracts() ([]ContractResolver, er.R) {
- resolverCfg := ResolverConfig{
- ChannelArbitratorConfig: b.cfg,
- Checkpoint: b.checkpointContract,
- }
- var contracts []ContractResolver
- err := kvdb.View(b.db, func(tx kvdb.RTx) er.R {
- contractBucket, err := fetchContractReadBucket(tx, b.scopeKey[:])
- if err != nil {
- return err
- }
-
- return contractBucket.ForEach(func(resKey, resBytes []byte) er.R {
- if len(resKey) != resolverIDLen {
- return nil
- }
-
- var res ContractResolver
-
- // We'll snip off the first byte of the raw resolver
- // bytes in order to extract what type of resolver
- // we're about to encode.
- resType := resolverType(resBytes[0])
-
- // Then we'll create a reader using the remaining
- // bytes.
- resReader := bytes.NewReader(resBytes[1:])
-
- switch resType {
- case resolverTimeout:
- res, err = newTimeoutResolverFromReader(
- resReader, resolverCfg,
- )
-
- case resolverSuccess:
- res, err = newSuccessResolverFromReader(
- resReader, resolverCfg,
- )
-
- case resolverOutgoingContest:
- res, err = newOutgoingContestResolverFromReader(
- resReader, resolverCfg,
- )
-
- case resolverIncomingContest:
- res, err = newIncomingContestResolverFromReader(
- resReader, resolverCfg,
- )
-
- case resolverUnilateralSweep:
- res, err = newCommitSweepResolverFromReader(
- resReader, resolverCfg,
- )
-
- default:
- return er.Errorf("unknown resolver type: %v", resType)
- }
-
- if err != nil {
- return err
- }
-
- contracts = append(contracts, res)
- return nil
- })
- }, func() {
- contracts = nil
- })
- if err != nil && !errScopeBucketNoExist.Is(err) && !errNoContracts.Is(err) {
- return nil, err
- }
-
- return contracts, nil
-}
-
-// InsertUnresolvedContracts inserts a set of unresolved contracts into the
-// log. The log will then persistently store each contract until they've been
-// swapped out, or resolved.
-//
-// NOTE: Part of the ContractResolver interface.
-func (b *boltArbitratorLog) InsertUnresolvedContracts(reports []*channeldb.ResolverReport,
- resolvers ...ContractResolver) er.R {
-
- return kvdb.Batch(b.db, func(tx kvdb.RwTx) er.R {
- contractBucket, err := fetchContractWriteBucket(tx, b.scopeKey[:])
- if err != nil {
- return err
- }
-
- for _, resolver := range resolvers {
- err = b.writeResolver(contractBucket, resolver)
- if err != nil {
- return err
- }
- }
-
- // Persist any reports that are present.
- for _, report := range reports {
- err := b.cfg.PutResolverReport(tx, report)
- if err != nil {
- return err
- }
- }
-
- return nil
- })
-}
-
-// SwapContract performs an atomic swap of the old contract for the new
-// contract. This method is used when after a contract has been fully resolved,
-// it produces another contract that needs to be resolved.
-//
-// NOTE: Part of the ContractResolver interface.
-func (b *boltArbitratorLog) SwapContract(oldContract, newContract ContractResolver) er.R {
- return kvdb.Batch(b.db, func(tx kvdb.RwTx) er.R {
- contractBucket, err := fetchContractWriteBucket(tx, b.scopeKey[:])
- if err != nil {
- return err
- }
-
- oldContractkey := oldContract.ResolverKey()
- if err := contractBucket.Delete(oldContractkey); err != nil {
- return err
- }
-
- return b.writeResolver(contractBucket, newContract)
- })
-}
-
-// ResolveContract marks a contract as fully resolved. Once a contract has been
-// fully resolved, it is deleted from persistent storage.
-//
-// NOTE: Part of the ContractResolver interface.
-func (b *boltArbitratorLog) ResolveContract(res ContractResolver) er.R {
- return kvdb.Batch(b.db, func(tx kvdb.RwTx) er.R {
- contractBucket, err := fetchContractWriteBucket(tx, b.scopeKey[:])
- if err != nil {
- return err
- }
-
- resKey := res.ResolverKey()
- return contractBucket.Delete(resKey)
- })
-}
-
-// LogContractResolutions stores a set of chain actions which are derived from
-// our set of active contracts, and the on-chain state. We'll write this et of
-// cations when: we decide to go on-chain to resolve a contract, or we detect
-// that the remote party has gone on-chain.
-//
-// NOTE: Part of the ContractResolver interface.
-func (b *boltArbitratorLog) LogContractResolutions(c *ContractResolutions) er.R {
- return kvdb.Batch(b.db, func(tx kvdb.RwTx) er.R {
- scopeBucket, err := tx.CreateTopLevelBucket(b.scopeKey[:])
- if err != nil {
- return err
- }
-
- var b bytes.Buffer
-
- if _, err := b.Write(c.CommitHash[:]); err != nil {
- return er.E(err)
- }
-
- // First, we'll write out the commit output's resolution.
- if c.CommitResolution == nil {
- if err := util.WriteBin(&b, endian, false); err != nil {
- return err
- }
- } else {
- if err := util.WriteBin(&b, endian, true); err != nil {
- return err
- }
- errr := encodeCommitResolution(&b, c.CommitResolution)
- if errr != nil {
- return errr
- }
- }
-
- // With the output for the commitment transaction written, we
- // can now write out the resolutions for the incoming and
- // outgoing HTLC's.
- numIncoming := uint32(len(c.HtlcResolutions.IncomingHTLCs))
- if err := util.WriteBin(&b, endian, numIncoming); err != nil {
- return err
- }
- for _, htlc := range c.HtlcResolutions.IncomingHTLCs {
- err := encodeIncomingResolution(&b, &htlc)
- if err != nil {
- return err
- }
- }
- numOutgoing := uint32(len(c.HtlcResolutions.OutgoingHTLCs))
- if err := util.WriteBin(&b, endian, numOutgoing); err != nil {
- return err
- }
- for _, htlc := range c.HtlcResolutions.OutgoingHTLCs {
- err := encodeOutgoingResolution(&b, &htlc)
- if err != nil {
- return err
- }
- }
-
- err = scopeBucket.Put(resolutionsKey, b.Bytes())
- if err != nil {
- return err
- }
-
- // Write out the anchor resolution if present.
- if c.AnchorResolution != nil {
- var b bytes.Buffer
- err := encodeAnchorResolution(&b, c.AnchorResolution)
- if err != nil {
- return err
- }
-
- err = scopeBucket.Put(anchorResolutionKey, b.Bytes())
- if err != nil {
- return err
- }
- }
-
- return nil
- })
-}
-
-// FetchContractResolutions fetches the set of previously stored contract
-// resolutions from persistent storage.
-//
-// NOTE: Part of the ContractResolver interface.
-func (b *boltArbitratorLog) FetchContractResolutions() (*ContractResolutions, er.R) {
- var c *ContractResolutions
- err := kvdb.View(b.db, func(tx kvdb.RTx) er.R {
- scopeBucket := tx.ReadBucket(b.scopeKey[:])
- if scopeBucket == nil {
- return errScopeBucketNoExist.Default()
- }
-
- resolutionBytes := scopeBucket.Get(resolutionsKey)
- if resolutionBytes == nil {
- return errNoResolutions.Default()
- }
-
- resReader := bytes.NewReader(resolutionBytes)
-
- _, err := util.ReadFull(resReader, c.CommitHash[:])
- if err != nil {
- return err
- }
-
- // First, we'll attempt to read out the commit resolution (if
- // it exists).
- var haveCommitRes bool
- err = util.ReadBin(resReader, endian, &haveCommitRes)
- if err != nil {
- return err
- }
- if haveCommitRes {
- c.CommitResolution = &lnwallet.CommitOutputResolution{}
- err = decodeCommitResolution(
- resReader, c.CommitResolution,
- )
- if err != nil {
- return err
- }
- }
-
- var (
- numIncoming uint32
- numOutgoing uint32
- )
-
- // Next, we'll read out the incoming and outgoing HTLC
- // resolutions.
- err = util.ReadBin(resReader, endian, &numIncoming)
- if err != nil {
- return err
- }
- c.HtlcResolutions.IncomingHTLCs = make([]lnwallet.IncomingHtlcResolution, numIncoming)
- for i := uint32(0); i < numIncoming; i++ {
- err := decodeIncomingResolution(
- resReader, &c.HtlcResolutions.IncomingHTLCs[i],
- )
- if err != nil {
- return err
- }
- }
-
- err = util.ReadBin(resReader, endian, &numOutgoing)
- if err != nil {
- return err
- }
- c.HtlcResolutions.OutgoingHTLCs = make([]lnwallet.OutgoingHtlcResolution, numOutgoing)
- for i := uint32(0); i < numOutgoing; i++ {
- err := decodeOutgoingResolution(
- resReader, &c.HtlcResolutions.OutgoingHTLCs[i],
- )
- if err != nil {
- return err
- }
- }
-
- anchorResBytes := scopeBucket.Get(anchorResolutionKey)
- if anchorResBytes != nil {
- c.AnchorResolution = &lnwallet.AnchorResolution{}
- resReader := bytes.NewReader(anchorResBytes)
- err := decodeAnchorResolution(
- resReader, c.AnchorResolution,
- )
- if err != nil {
- return err
- }
- }
-
- return nil
- }, func() {
- c = &ContractResolutions{}
- })
- if err != nil {
- return nil, err
- }
-
- return c, err
-}
-
-// FetchChainActions attempts to fetch the set of previously stored chain
-// actions. We'll use this upon restart to properly advance our state machine
-// forward.
-//
-// NOTE: Part of the ContractResolver interface.
-func (b *boltArbitratorLog) FetchChainActions() (ChainActionMap, er.R) {
- var actionsMap ChainActionMap
-
- err := kvdb.View(b.db, func(tx kvdb.RTx) er.R {
- scopeBucket := tx.ReadBucket(b.scopeKey[:])
- if scopeBucket == nil {
- return errScopeBucketNoExist.Default()
- }
-
- actionsBucket := scopeBucket.NestedReadBucket(actionsBucketKey)
- if actionsBucket == nil {
- return errNoActions.Default()
- }
-
- return actionsBucket.ForEach(func(action, htlcBytes []byte) er.R {
- if htlcBytes == nil {
- return nil
- }
-
- chainAction := ChainAction(action[0])
-
- htlcReader := bytes.NewReader(htlcBytes)
- htlcs, err := channeldb.DeserializeHtlcs(htlcReader)
- if err != nil {
- return err
- }
-
- actionsMap[chainAction] = htlcs
-
- return nil
- })
- }, func() {
- actionsMap = make(ChainActionMap)
- })
- if err != nil {
- return nil, err
- }
-
- return actionsMap, nil
-}
-
-// InsertConfirmedCommitSet stores the known set of active HTLCs at the time
-// channel closure. We'll use this to reconstruct our set of chain actions anew
-// based on the confirmed and pending commitment state.
-//
-// NOTE: Part of the ContractResolver interface.
-func (b *boltArbitratorLog) InsertConfirmedCommitSet(c *CommitSet) er.R {
- return kvdb.Batch(b.db, func(tx kvdb.RwTx) er.R {
- scopeBucket, err := tx.CreateTopLevelBucket(b.scopeKey[:])
- if err != nil {
- return err
- }
-
- var b bytes.Buffer
- if err := encodeCommitSet(&b, c); err != nil {
- return err
- }
-
- return scopeBucket.Put(commitSetKey, b.Bytes())
- })
-}
-
-// FetchConfirmedCommitSet fetches the known confirmed active HTLC set from the
-// database. It takes an optional database transaction, which will be used if it
-// is non-nil, otherwise the lookup will be done in its own transaction.
-//
-// NOTE: Part of the ContractResolver interface.
-func (b *boltArbitratorLog) FetchConfirmedCommitSet(tx kvdb.RTx) (*CommitSet, er.R) {
- if tx != nil {
- return b.fetchConfirmedCommitSet(tx)
- }
-
- var c *CommitSet
- err := kvdb.View(b.db, func(tx kvdb.RTx) er.R {
- var err er.R
- c, err = b.fetchConfirmedCommitSet(tx)
- return err
- }, func() {
- c = nil
- })
- if err != nil {
- return nil, err
- }
-
- return c, nil
-}
-
-func (b *boltArbitratorLog) fetchConfirmedCommitSet(tx kvdb.RTx) (*CommitSet, er.R) {
-
- scopeBucket := tx.ReadBucket(b.scopeKey[:])
- if scopeBucket == nil {
- return nil, errScopeBucketNoExist.Default()
- }
-
- commitSetBytes := scopeBucket.Get(commitSetKey)
- if commitSetBytes == nil {
- return nil, errNoCommitSet.Default()
- }
-
- return decodeCommitSet(bytes.NewReader(commitSetBytes))
-}
-
-// WipeHistory is to be called ONLY once *all* contracts have been fully
-// resolved, and the channel closure if finalized. This method will delete all
-// on-disk state within the persistent log.
-//
-// NOTE: Part of the ContractResolver interface.
-func (b *boltArbitratorLog) WipeHistory() er.R {
- return kvdb.Update(b.db, func(tx kvdb.RwTx) er.R {
- scopeBucket, err := tx.CreateTopLevelBucket(b.scopeKey[:])
- if err != nil {
- return err
- }
-
- // Once we have the main top-level bucket, we'll delete the key
- // that stores the state of the arbitrator.
- if err := scopeBucket.Delete(stateKey[:]); err != nil {
- return err
- }
-
- // Next, we'll delete any lingering contract state within the
- // contracts bucket by removing the bucket itself.
- err = scopeBucket.DeleteNestedBucket(contractsBucketKey)
- if err != nil && !kvdb.ErrBucketNotFound.Is(err) {
- return err
- }
-
- // Next, we'll delete storage of any lingering contract
- // resolutions.
- if err := scopeBucket.Delete(resolutionsKey); err != nil {
- return err
- }
-
- // We'll delete any chain actions that are still stored by
- // removing the enclosing bucket.
- err = scopeBucket.DeleteNestedBucket(actionsBucketKey)
- if err != nil && !kvdb.ErrBucketNotFound.Is(err) {
- return err
- }
-
- // Finally, we'll delete the enclosing bucket itself.
- return tx.DeleteTopLevelBucket(b.scopeKey[:])
- }, func() {})
-}
-
-// checkpointContract is a private method that will be fed into
-// ContractResolver instances to checkpoint their state once they reach
-// milestones during contract resolution. If the report provided is non-nil,
-// it should also be recorded.
-func (b *boltArbitratorLog) checkpointContract(c ContractResolver,
- reports ...*channeldb.ResolverReport) er.R {
-
- return kvdb.Update(b.db, func(tx kvdb.RwTx) er.R {
- contractBucket, err := fetchContractWriteBucket(tx, b.scopeKey[:])
- if err != nil {
- return err
- }
-
- if err := b.writeResolver(contractBucket, c); err != nil {
- return err
- }
-
- for _, report := range reports {
- if err := b.cfg.PutResolverReport(tx, report); err != nil {
- return err
- }
- }
-
- return nil
- }, func() {})
-}
-
-func encodeIncomingResolution(w io.Writer, i *lnwallet.IncomingHtlcResolution) er.R {
- if _, err := util.Write(w, i.Preimage[:]); err != nil {
- return err
- }
-
- if i.SignedSuccessTx == nil {
- if err := util.WriteBin(w, endian, false); err != nil {
- return err
- }
- } else {
- if err := util.WriteBin(w, endian, true); err != nil {
- return err
- }
-
- if err := i.SignedSuccessTx.Serialize(w); err != nil {
- return err
- }
- }
-
- if err := util.WriteBin(w, endian, i.CsvDelay); err != nil {
- return err
- }
- if _, err := util.Write(w, i.ClaimOutpoint.Hash[:]); err != nil {
- return err
- }
- if err := util.WriteBin(w, endian, i.ClaimOutpoint.Index); err != nil {
- return err
- }
- err := input.WriteSignDescriptor(w, &i.SweepSignDesc)
- if err != nil {
- return err
- }
-
- return nil
-}
-
-func decodeIncomingResolution(r io.Reader, h *lnwallet.IncomingHtlcResolution) er.R {
- if _, err := util.ReadFull(r, h.Preimage[:]); err != nil {
- return err
- }
-
- var txPresent bool
- if err := util.ReadBin(r, endian, &txPresent); err != nil {
- return err
- }
- if txPresent {
- h.SignedSuccessTx = &wire.MsgTx{}
- if err := h.SignedSuccessTx.Deserialize(r); err != nil {
- return err
- }
- }
-
- err := util.ReadBin(r, endian, &h.CsvDelay)
- if err != nil {
- return err
- }
- _, err = util.ReadFull(r, h.ClaimOutpoint.Hash[:])
- if err != nil {
- return err
- }
- err = util.ReadBin(r, endian, &h.ClaimOutpoint.Index)
- if err != nil {
- return err
- }
-
- return input.ReadSignDescriptor(r, &h.SweepSignDesc)
-}
-
-func encodeOutgoingResolution(w io.Writer, o *lnwallet.OutgoingHtlcResolution) er.R {
- if err := util.WriteBin(w, endian, o.Expiry); err != nil {
- return err
- }
-
- if o.SignedTimeoutTx == nil {
- if err := util.WriteBin(w, endian, false); err != nil {
- return err
- }
- } else {
- if err := util.WriteBin(w, endian, true); err != nil {
- return err
- }
-
- if err := o.SignedTimeoutTx.Serialize(w); err != nil {
- return err
- }
- }
-
- if err := util.WriteBin(w, endian, o.CsvDelay); err != nil {
- return err
- }
- if _, err := util.Write(w, o.ClaimOutpoint.Hash[:]); err != nil {
- return err
- }
- if err := util.WriteBin(w, endian, o.ClaimOutpoint.Index); err != nil {
- return err
- }
-
- return input.WriteSignDescriptor(w, &o.SweepSignDesc)
-}
-
-func decodeOutgoingResolution(r io.Reader, o *lnwallet.OutgoingHtlcResolution) er.R {
- err := util.ReadBin(r, endian, &o.Expiry)
- if err != nil {
- return err
- }
-
- var txPresent bool
- if err := util.ReadBin(r, endian, &txPresent); err != nil {
- return err
- }
- if txPresent {
- o.SignedTimeoutTx = &wire.MsgTx{}
- if err := o.SignedTimeoutTx.Deserialize(r); err != nil {
- return err
- }
- }
-
- err = util.ReadBin(r, endian, &o.CsvDelay)
- if err != nil {
- return err
- }
- _, err = util.ReadFull(r, o.ClaimOutpoint.Hash[:])
- if err != nil {
- return err
- }
- err = util.ReadBin(r, endian, &o.ClaimOutpoint.Index)
- if err != nil {
- return err
- }
-
- return input.ReadSignDescriptor(r, &o.SweepSignDesc)
-}
-
-func encodeCommitResolution(w io.Writer,
- c *lnwallet.CommitOutputResolution) er.R {
-
- if _, err := util.Write(w, c.SelfOutPoint.Hash[:]); err != nil {
- return err
- }
- err := util.WriteBin(w, endian, c.SelfOutPoint.Index)
- if err != nil {
- return err
- }
-
- err = input.WriteSignDescriptor(w, &c.SelfOutputSignDesc)
- if err != nil {
- return err
- }
-
- return util.WriteBin(w, endian, c.MaturityDelay)
-}
-
-func decodeCommitResolution(r io.Reader,
- c *lnwallet.CommitOutputResolution) er.R {
-
- _, err := util.ReadFull(r, c.SelfOutPoint.Hash[:])
- if err != nil {
- return err
- }
- err = util.ReadBin(r, endian, &c.SelfOutPoint.Index)
- if err != nil {
- return err
- }
-
- err = input.ReadSignDescriptor(r, &c.SelfOutputSignDesc)
- if err != nil {
- return err
- }
-
- return util.ReadBin(r, endian, &c.MaturityDelay)
-}
-
-func encodeAnchorResolution(w io.Writer,
- a *lnwallet.AnchorResolution) er.R {
-
- if _, err := util.Write(w, a.CommitAnchor.Hash[:]); err != nil {
- return err
- }
- err := util.WriteBin(w, endian, a.CommitAnchor.Index)
- if err != nil {
- return err
- }
-
- return input.WriteSignDescriptor(w, &a.AnchorSignDescriptor)
-}
-
-func decodeAnchorResolution(r io.Reader,
- a *lnwallet.AnchorResolution) er.R {
-
- _, err := util.ReadFull(r, a.CommitAnchor.Hash[:])
- if err != nil {
- return err
- }
- err = util.ReadBin(r, endian, &a.CommitAnchor.Index)
- if err != nil {
- return err
- }
-
- return input.ReadSignDescriptor(r, &a.AnchorSignDescriptor)
-}
-
-func encodeHtlcSetKey(w io.Writer, h *HtlcSetKey) er.R {
- err := util.WriteBin(w, endian, h.IsRemote)
- if err != nil {
- return err
- }
- return util.WriteBin(w, endian, h.IsPending)
-}
-
-func encodeCommitSet(w io.Writer, c *CommitSet) er.R {
- if err := encodeHtlcSetKey(w, c.ConfCommitKey); err != nil {
- return err
- }
-
- numSets := uint8(len(c.HtlcSets))
- if err := util.WriteBin(w, endian, numSets); err != nil {
- return err
- }
-
- for htlcSetKey, htlcs := range c.HtlcSets {
- htlcSetKey := htlcSetKey
- if err := encodeHtlcSetKey(w, &htlcSetKey); err != nil {
- return err
- }
-
- if err := channeldb.SerializeHtlcs(w, htlcs...); err != nil {
- return err
- }
- }
-
- return nil
-}
-
-func decodeHtlcSetKey(r io.Reader, h *HtlcSetKey) er.R {
- err := util.ReadBin(r, endian, &h.IsRemote)
- if err != nil {
- return err
- }
-
- return util.ReadBin(r, endian, &h.IsPending)
-}
-
-func decodeCommitSet(r io.Reader) (*CommitSet, er.R) {
- c := &CommitSet{
- ConfCommitKey: &HtlcSetKey{},
- HtlcSets: make(map[HtlcSetKey][]channeldb.HTLC),
- }
-
- if err := decodeHtlcSetKey(r, c.ConfCommitKey); err != nil {
- return nil, err
- }
-
- var numSets uint8
- if err := util.ReadBin(r, endian, &numSets); err != nil {
- return nil, err
- }
-
- for i := uint8(0); i < numSets; i++ {
- var htlcSetKey HtlcSetKey
- if err := decodeHtlcSetKey(r, &htlcSetKey); err != nil {
- return nil, err
- }
-
- htlcs, err := channeldb.DeserializeHtlcs(r)
- if err != nil {
- return nil, err
- }
-
- c.HtlcSets[htlcSetKey] = htlcs
- }
-
- return c, nil
-}
diff --git a/lnd/contractcourt/briefcase_test.go b/lnd/contractcourt/briefcase_test.go
deleted file mode 100644
index 8c4e125e..00000000
--- a/lnd/contractcourt/briefcase_test.go
+++ /dev/null
@@ -1,774 +0,0 @@
-package contractcourt
-
-import (
- "crypto/rand"
- "io/ioutil"
- "os"
- "reflect"
- "testing"
- "time"
-
- prand "math/rand"
-
- "github.com/davecgh/go-spew/spew"
- "github.com/pkt-cash/pktd/btcec"
- "github.com/pkt-cash/pktd/btcutil/er"
- "github.com/pkt-cash/pktd/chaincfg/chainhash"
- "github.com/pkt-cash/pktd/lnd/channeldb"
- "github.com/pkt-cash/pktd/lnd/channeldb/kvdb"
- "github.com/pkt-cash/pktd/lnd/input"
- "github.com/pkt-cash/pktd/lnd/lnwallet"
- "github.com/pkt-cash/pktd/txscript/params"
- "github.com/pkt-cash/pktd/wire"
-)
-
-var (
- testChainHash = [chainhash.HashSize]byte{
- 0x51, 0xb6, 0x37, 0xd8, 0xfc, 0xd2, 0xc6, 0xda,
- 0x48, 0x59, 0xe6, 0x96, 0x31, 0x13, 0xa1, 0x17,
- 0x2d, 0xe7, 0x93, 0xe4,
- }
-
- testChanPoint1 = wire.OutPoint{
- Hash: chainhash.Hash{
- 0x51, 0xb6, 0x37, 0xd8, 0xfc, 0xd2, 0xc6, 0xda,
- 0x48, 0x59, 0xe6, 0x96, 0x31, 0x13, 0xa1, 0x17,
- 0x2d, 0xe7, 0x93, 0xe4,
- },
- Index: 1,
- }
-
- testChanPoint2 = wire.OutPoint{
- Hash: chainhash.Hash{
- 0x48, 0x59, 0xe6, 0x96, 0x31, 0x13, 0xa1, 0x17,
- 0x51, 0xb6, 0x37, 0xd8, 0xfc, 0xd2, 0xc6, 0xda,
- 0x2d, 0xe7, 0x93, 0xe4,
- },
- Index: 2,
- }
-
- testChanPoint3 = wire.OutPoint{
- Hash: chainhash.Hash{
- 0x48, 0x59, 0xe6, 0x96, 0x31, 0x13, 0xa1, 0x17,
- 0x51, 0xb6, 0x37, 0xd8, 0xfc, 0xd2, 0xc6, 0xda,
- 0x2d, 0xe7, 0x93, 0xe4,
- },
- Index: 3,
- }
-
- testPreimage = [32]byte{
- 0x52, 0xb6, 0x37, 0xd8, 0xfc, 0xd2, 0xc6, 0xda,
- 0x48, 0x59, 0xe6, 0x96, 0x31, 0x13, 0xa1, 0x17,
- 0x2d, 0xe7, 0x93, 0xe4,
- }
-
- key1 = []byte{
- 0x04, 0x11, 0xdb, 0x93, 0xe1, 0xdc, 0xdb, 0x8a,
- 0x01, 0x6b, 0x49, 0x84, 0x0f, 0x8c, 0x53, 0xbc, 0x1e,
- 0xb6, 0x8a, 0x38, 0x2e, 0x97, 0xb1, 0x48, 0x2e, 0xca,
- 0xd7, 0xb1, 0x48, 0xa6, 0x90, 0x9a, 0x5c, 0xb2, 0xe0,
- 0xea, 0xdd, 0xfb, 0x84, 0xcc, 0xf9, 0x74, 0x44, 0x64,
- 0xf8, 0x2e, 0x16, 0x0b, 0xfa, 0x9b, 0x8b, 0x64, 0xf9,
- 0xd4, 0xc0, 0x3f, 0x99, 0x9b, 0x86, 0x43, 0xf6, 0x56,
- 0xb4, 0x12, 0xa3,
- }
-
- testSignDesc = input.SignDescriptor{
- SingleTweak: []byte{
- 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
- 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
- 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
- 0x02, 0x02, 0x02, 0x02, 0x02,
- },
- WitnessScript: []byte{
- 0x00, 0x14, 0xee, 0x91, 0x41, 0x7e, 0x85, 0x6c, 0xde,
- 0x10, 0xa2, 0x91, 0x1e, 0xdc, 0xbd, 0xbd, 0x69, 0xe2,
- 0xef, 0xb5, 0x71, 0x48,
- },
- Output: &wire.TxOut{
- Value: 5000000000,
- PkScript: []byte{
- 0x41, // OP_DATA_65
- 0x04, 0xd6, 0x4b, 0xdf, 0xd0, 0x9e, 0xb1, 0xc5,
- 0xfe, 0x29, 0x5a, 0xbd, 0xeb, 0x1d, 0xca, 0x42,
- 0x81, 0xbe, 0x98, 0x8e, 0x2d, 0xa0, 0xb6, 0xc1,
- 0xc6, 0xa5, 0x9d, 0xc2, 0x26, 0xc2, 0x86, 0x24,
- 0xe1, 0x81, 0x75, 0xe8, 0x51, 0xc9, 0x6b, 0x97,
- 0x3d, 0x81, 0xb0, 0x1c, 0xc3, 0x1f, 0x04, 0x78,
- 0x34, 0xbc, 0x06, 0xd6, 0xd6, 0xed, 0xf6, 0x20,
- 0xd1, 0x84, 0x24, 0x1a, 0x6a, 0xed, 0x8b, 0x63,
- 0xa6, // 65-byte signature
- 0xac, // OP_CHECKSIG
- },
- },
- HashType: params.SigHashAll,
- }
-)
-
-func makeTestDB() (kvdb.Backend, func(), er.R) {
- // First, create a temporary directory to be used for the duration of
- // this test.
- tempDirName, errr := ioutil.TempDir("", "arblog")
- if errr != nil {
- return nil, nil, er.E(errr)
- }
-
- db, err := kvdb.Create(kvdb.BoltBackendName, tempDirName+"/test.db", true)
- if err != nil {
- return nil, nil, err
- }
-
- cleanUp := func() {
- db.Close()
- os.RemoveAll(tempDirName)
- }
-
- return db, cleanUp, nil
-}
-
-func newTestBoltArbLog(chainhash chainhash.Hash,
- op wire.OutPoint) (ArbitratorLog, func(), er.R) {
-
- testDB, cleanUp, err := makeTestDB()
- if err != nil {
- return nil, nil, err
- }
-
- testArbCfg := ChannelArbitratorConfig{
- PutResolverReport: func(_ kvdb.RwTx,
- _ *channeldb.ResolverReport) er.R {
- return nil
- },
- }
- testLog, err := newBoltArbitratorLog(testDB, testArbCfg, chainhash, op)
- if err != nil {
- return nil, nil, err
- }
-
- return testLog, cleanUp, err
-}
-
-func randOutPoint() wire.OutPoint {
- var op wire.OutPoint
- rand.Read(op.Hash[:])
- op.Index = prand.Uint32()
-
- return op
-}
-
-func assertResolversEqual(t *testing.T, originalResolver ContractResolver,
- diskResolver ContractResolver) {
-
- assertTimeoutResEqual := func(ogRes, diskRes *htlcTimeoutResolver) {
- if !reflect.DeepEqual(ogRes.htlcResolution, diskRes.htlcResolution) {
- t.Fatalf("resolution mismatch: expected %#v, got %v#",
- ogRes.htlcResolution, diskRes.htlcResolution)
- }
- if ogRes.outputIncubating != diskRes.outputIncubating {
- t.Fatalf("expected %v, got %v",
- ogRes.outputIncubating, diskRes.outputIncubating)
- }
- if ogRes.resolved != diskRes.resolved {
- t.Fatalf("expected %v, got %v", ogRes.resolved,
- diskRes.resolved)
- }
- if ogRes.broadcastHeight != diskRes.broadcastHeight {
- t.Fatalf("expected %v, got %v",
- ogRes.broadcastHeight, diskRes.broadcastHeight)
- }
- if ogRes.htlc.HtlcIndex != diskRes.htlc.HtlcIndex {
- t.Fatalf("expected %v, got %v", ogRes.htlc.HtlcIndex,
- diskRes.htlc.HtlcIndex)
- }
- }
-
- assertSuccessResEqual := func(ogRes, diskRes *htlcSuccessResolver) {
- if !reflect.DeepEqual(ogRes.htlcResolution, diskRes.htlcResolution) {
- t.Fatalf("resolution mismatch: expected %#v, got %v#",
- ogRes.htlcResolution, diskRes.htlcResolution)
- }
- if ogRes.outputIncubating != diskRes.outputIncubating {
- t.Fatalf("expected %v, got %v",
- ogRes.outputIncubating, diskRes.outputIncubating)
- }
- if ogRes.resolved != diskRes.resolved {
- t.Fatalf("expected %v, got %v", ogRes.resolved,
- diskRes.resolved)
- }
- if ogRes.broadcastHeight != diskRes.broadcastHeight {
- t.Fatalf("expected %v, got %v",
- ogRes.broadcastHeight, diskRes.broadcastHeight)
- }
- if ogRes.htlc.RHash != diskRes.htlc.RHash {
- t.Fatalf("expected %v, got %v", ogRes.htlc.RHash,
- diskRes.htlc.RHash)
- }
- }
-
- switch ogRes := originalResolver.(type) {
- case *htlcTimeoutResolver:
- diskRes := diskResolver.(*htlcTimeoutResolver)
- assertTimeoutResEqual(ogRes, diskRes)
-
- case *htlcSuccessResolver:
- diskRes := diskResolver.(*htlcSuccessResolver)
- assertSuccessResEqual(ogRes, diskRes)
-
- case *htlcOutgoingContestResolver:
- diskRes := diskResolver.(*htlcOutgoingContestResolver)
- assertTimeoutResEqual(
- &ogRes.htlcTimeoutResolver, &diskRes.htlcTimeoutResolver,
- )
-
- case *htlcIncomingContestResolver:
- diskRes := diskResolver.(*htlcIncomingContestResolver)
- assertSuccessResEqual(
- &ogRes.htlcSuccessResolver, &diskRes.htlcSuccessResolver,
- )
-
- if ogRes.htlcExpiry != diskRes.htlcExpiry {
- t.Fatalf("expected %v, got %v", ogRes.htlcExpiry,
- diskRes.htlcExpiry)
- }
-
- case *commitSweepResolver:
- diskRes := diskResolver.(*commitSweepResolver)
- if !reflect.DeepEqual(ogRes.commitResolution, diskRes.commitResolution) {
- t.Fatalf("resolution mismatch: expected %v, got %v",
- ogRes.commitResolution, diskRes.commitResolution)
- }
- if ogRes.resolved != diskRes.resolved {
- t.Fatalf("expected %v, got %v", ogRes.resolved,
- diskRes.resolved)
- }
- if ogRes.broadcastHeight != diskRes.broadcastHeight {
- t.Fatalf("expected %v, got %v",
- ogRes.broadcastHeight, diskRes.broadcastHeight)
- }
- if ogRes.chanPoint != diskRes.chanPoint {
- t.Fatalf("expected %v, got %v", ogRes.chanPoint,
- diskRes.chanPoint)
- }
- }
-}
-
-// TestContractInsertionRetrieval tests that were able to insert a set of
-// unresolved contracts into the log, and retrieve the same set properly.
-func TestContractInsertionRetrieval(t *testing.T) {
- t.Parallel()
-
- // First, we'll create a test instance of the ArbitratorLog
- // implementation backed by boltdb.
- testLog, cleanUp, err := newTestBoltArbLog(
- testChainHash, testChanPoint1,
- )
- if err != nil {
- t.Fatalf("unable to create test log: %v", err)
- }
- defer cleanUp()
-
- // The log created, we'll create a series of resolvers, each properly
- // implementing the ContractResolver interface.
- timeoutResolver := htlcTimeoutResolver{
- htlcResolution: lnwallet.OutgoingHtlcResolution{
- Expiry: 99,
- SignedTimeoutTx: nil,
- CsvDelay: 99,
- ClaimOutpoint: randOutPoint(),
- SweepSignDesc: testSignDesc,
- },
- outputIncubating: true,
- resolved: true,
- broadcastHeight: 102,
- htlc: channeldb.HTLC{
- HtlcIndex: 12,
- },
- }
- successResolver := htlcSuccessResolver{
- htlcResolution: lnwallet.IncomingHtlcResolution{
- Preimage: testPreimage,
- SignedSuccessTx: nil,
- CsvDelay: 900,
- ClaimOutpoint: randOutPoint(),
- SweepSignDesc: testSignDesc,
- },
- outputIncubating: true,
- resolved: true,
- broadcastHeight: 109,
- htlc: channeldb.HTLC{
- RHash: testPreimage,
- },
- sweepTx: nil,
- }
- resolvers := []ContractResolver{
- &timeoutResolver,
- &successResolver,
- &commitSweepResolver{
- commitResolution: lnwallet.CommitOutputResolution{
- SelfOutPoint: testChanPoint2,
- SelfOutputSignDesc: testSignDesc,
- MaturityDelay: 99,
- },
- resolved: false,
- broadcastHeight: 109,
- chanPoint: testChanPoint1,
- },
- }
-
- // All resolvers require a unique ResolverKey() output. To achieve this
- // for the composite resolvers, we'll mutate the underlying resolver
- // with a new outpoint.
- contestTimeout := timeoutResolver
- contestTimeout.htlcResolution.ClaimOutpoint = randOutPoint()
- resolvers = append(resolvers, &htlcOutgoingContestResolver{
- htlcTimeoutResolver: contestTimeout,
- })
- contestSuccess := successResolver
- contestSuccess.htlcResolution.ClaimOutpoint = randOutPoint()
- resolvers = append(resolvers, &htlcIncomingContestResolver{
- htlcExpiry: 100,
- htlcSuccessResolver: contestSuccess,
- })
-
- // For quick lookup during the test, we'll create this map which allow
- // us to lookup a resolver according to its unique resolver key.
- resolverMap := make(map[string]ContractResolver)
- resolverMap[string(timeoutResolver.ResolverKey())] = resolvers[0]
- resolverMap[string(successResolver.ResolverKey())] = resolvers[1]
- resolverMap[string(resolvers[2].ResolverKey())] = resolvers[2]
- resolverMap[string(resolvers[3].ResolverKey())] = resolvers[3]
- resolverMap[string(resolvers[4].ResolverKey())] = resolvers[4]
-
- // Now, we'll insert the resolver into the log, we do not need to apply
- // any closures, so we will pass in nil.
- err = testLog.InsertUnresolvedContracts(nil, resolvers...)
- if err != nil {
- t.Fatalf("unable to insert resolvers: %v", err)
- }
-
- // With the resolvers inserted, we'll now attempt to retrieve them from
- // the database, so we can compare them to the versions we created
- // above.
- diskResolvers, err := testLog.FetchUnresolvedContracts()
- if err != nil {
- t.Fatalf("unable to retrieve resolvers: %v", err)
- }
-
- if len(diskResolvers) != len(resolvers) {
- t.Fatalf("expected %v got resolvers, instead got %v: %#v",
- len(resolvers), len(diskResolvers),
- diskResolvers)
- }
-
- // Now we'll run through each of the resolvers, and ensure that it maps
- // to a resolver perfectly that we inserted previously.
- for _, diskResolver := range diskResolvers {
- resKey := string(diskResolver.ResolverKey())
- originalResolver, ok := resolverMap[resKey]
- if !ok {
- t.Fatalf("unable to find resolver match for %T: %v",
- diskResolver, resKey)
- }
-
- assertResolversEqual(t, originalResolver, diskResolver)
- }
-
- // We'll now delete the state, then attempt to retrieve the set of
- // resolvers, no resolvers should be found.
- if err := testLog.WipeHistory(); err != nil {
- t.Fatalf("unable to wipe log: %v", err)
- }
- diskResolvers, err = testLog.FetchUnresolvedContracts()
- if err != nil {
- t.Fatalf("unable to fetch unresolved contracts: %v", err)
- }
- if len(diskResolvers) != 0 {
- t.Fatalf("no resolvers should be found, instead %v were",
- len(diskResolvers))
- }
-}
-
-// TestContractResolution tests that once we mark a contract as resolved, it's
-// properly removed from the database.
-func TestContractResolution(t *testing.T) {
- t.Parallel()
-
- // First, we'll create a test instance of the ArbitratorLog
- // implementation backed by boltdb.
- testLog, cleanUp, err := newTestBoltArbLog(
- testChainHash, testChanPoint1,
- )
- if err != nil {
- t.Fatalf("unable to create test log: %v", err)
- }
- defer cleanUp()
-
- // We'll now create a timeout resolver that we'll be using for the
- // duration of this test.
- timeoutResolver := &htlcTimeoutResolver{
- htlcResolution: lnwallet.OutgoingHtlcResolution{
- Expiry: 991,
- SignedTimeoutTx: nil,
- CsvDelay: 992,
- ClaimOutpoint: randOutPoint(),
- SweepSignDesc: testSignDesc,
- },
- outputIncubating: true,
- resolved: true,
- broadcastHeight: 192,
- htlc: channeldb.HTLC{
- HtlcIndex: 9912,
- },
- }
-
- // First, we'll insert the resolver into the database and ensure that
- // we get the same resolver out the other side. We do not need to apply
- // any closures.
- err = testLog.InsertUnresolvedContracts(nil, timeoutResolver)
- if err != nil {
- t.Fatalf("unable to insert contract into db: %v", err)
- }
- dbContracts, err := testLog.FetchUnresolvedContracts()
- if err != nil {
- t.Fatalf("unable to fetch contracts from db: %v", err)
- }
- assertResolversEqual(t, timeoutResolver, dbContracts[0])
-
- // Now, we'll mark the contract as resolved within the database.
- if err := testLog.ResolveContract(timeoutResolver); err != nil {
- t.Fatalf("unable to resolve contract: %v", err)
- }
-
- // At this point, no contracts should exist within the log.
- dbContracts, err = testLog.FetchUnresolvedContracts()
- if err != nil {
- t.Fatalf("unable to fetch contracts from db: %v", err)
- }
- if len(dbContracts) != 0 {
- t.Fatalf("no contract should be from in the db, instead %v "+
- "were", len(dbContracts))
- }
-}
-
-// TestContractSwapping ensures that callers are able to atomically swap to
-// distinct contracts for one another.
-func TestContractSwapping(t *testing.T) {
- t.Parallel()
-
- // First, we'll create a test instance of the ArbitratorLog
- // implementation backed by boltdb.
- testLog, cleanUp, err := newTestBoltArbLog(
- testChainHash, testChanPoint1,
- )
- if err != nil {
- t.Fatalf("unable to create test log: %v", err)
- }
- defer cleanUp()
-
- // We'll create two resolvers, a regular timeout resolver, and the
- // contest resolver that eventually turns into the timeout resolver.
- timeoutResolver := htlcTimeoutResolver{
- htlcResolution: lnwallet.OutgoingHtlcResolution{
- Expiry: 99,
- SignedTimeoutTx: nil,
- CsvDelay: 99,
- ClaimOutpoint: randOutPoint(),
- SweepSignDesc: testSignDesc,
- },
- outputIncubating: true,
- resolved: true,
- broadcastHeight: 102,
- htlc: channeldb.HTLC{
- HtlcIndex: 12,
- },
- }
- contestResolver := &htlcOutgoingContestResolver{
- htlcTimeoutResolver: timeoutResolver,
- }
-
- // We'll first insert the contest resolver into the log with no
- // additional updates.
- err = testLog.InsertUnresolvedContracts(nil, contestResolver)
- if err != nil {
- t.Fatalf("unable to insert contract into db: %v", err)
- }
-
- // With the resolver inserted, we'll now attempt to atomically swap it
- // for its underlying timeout resolver.
- err = testLog.SwapContract(contestResolver, &timeoutResolver)
- if err != nil {
- t.Fatalf("unable to swap contracts: %v", err)
- }
-
- // At this point, there should now only be a single contract in the
- // database.
- dbContracts, err := testLog.FetchUnresolvedContracts()
- if err != nil {
- t.Fatalf("unable to fetch contracts from db: %v", err)
- }
- if len(dbContracts) != 1 {
- t.Fatalf("one contract should be from in the db, instead %v "+
- "were", len(dbContracts))
- }
-
- // That single contract should be the underlying timeout resolver.
- assertResolversEqual(t, &timeoutResolver, dbContracts[0])
-}
-
-// TestContractResolutionsStorage tests that we're able to properly store and
-// retrieve contract resolutions written to disk.
-func TestContractResolutionsStorage(t *testing.T) {
- t.Parallel()
-
- // First, we'll create a test instance of the ArbitratorLog
- // implementation backed by boltdb.
- testLog, cleanUp, err := newTestBoltArbLog(
- testChainHash, testChanPoint1,
- )
- if err != nil {
- t.Fatalf("unable to create test log: %v", err)
- }
- defer cleanUp()
-
- // With the test log created, we'll now craft a contact resolution that
- // will be using for the duration of this test.
- res := ContractResolutions{
- CommitHash: testChainHash,
- CommitResolution: &lnwallet.CommitOutputResolution{
- SelfOutPoint: testChanPoint2,
- SelfOutputSignDesc: testSignDesc,
- MaturityDelay: 101,
- },
- HtlcResolutions: lnwallet.HtlcResolutions{
- IncomingHTLCs: []lnwallet.IncomingHtlcResolution{
- {
- Preimage: testPreimage,
- SignedSuccessTx: nil,
- CsvDelay: 900,
- ClaimOutpoint: randOutPoint(),
- SweepSignDesc: testSignDesc,
- },
- },
- OutgoingHTLCs: []lnwallet.OutgoingHtlcResolution{
- {
- Expiry: 103,
- SignedTimeoutTx: nil,
- CsvDelay: 923923,
- ClaimOutpoint: randOutPoint(),
- SweepSignDesc: testSignDesc,
- },
- },
- },
- AnchorResolution: &lnwallet.AnchorResolution{
- CommitAnchor: testChanPoint3,
- AnchorSignDescriptor: testSignDesc,
- },
- }
-
- // First make sure that fetching unlogged contract resolutions will
- // fail.
- _, err = testLog.FetchContractResolutions()
- if err == nil {
- t.Fatalf("expected reading unlogged resolution from db to fail")
- }
-
- // Insert the resolution into the database, then immediately retrieve
- // them so we can compare equality against the original version.
- if err := testLog.LogContractResolutions(&res); err != nil {
- t.Fatalf("unable to insert resolutions into db: %v", err)
- }
- diskRes, err := testLog.FetchContractResolutions()
- if err != nil {
- t.Fatalf("unable to read resolution from db: %v", err)
- }
-
- if !reflect.DeepEqual(&res, diskRes) {
- t.Fatalf("resolution mismatch: expected %#v\n, got %#v",
- &res, diskRes)
- }
-
- // We'll now delete the state, then attempt to retrieve the set of
- // resolvers, no resolutions should be found.
- if err := testLog.WipeHistory(); err != nil {
- t.Fatalf("unable to wipe log: %v", err)
- }
- _, err = testLog.FetchContractResolutions()
- if !errScopeBucketNoExist.Is(err) {
- t.Fatalf("unexpected error: %v", err)
- }
-}
-
-// TestStateMutation tests that we're able to properly mutate the state of the
-// log, then retrieve that same mutated state from disk.
-func TestStateMutation(t *testing.T) {
- t.Parallel()
-
- testLog, cleanUp, err := newTestBoltArbLog(
- testChainHash, testChanPoint1,
- )
- if err != nil {
- t.Fatalf("unable to create test log: %v", err)
- }
- defer cleanUp()
-
- // The default state of an arbitrator should be StateDefault.
- arbState, err := testLog.CurrentState(nil)
- if err != nil {
- t.Fatalf("unable to read arb state: %v", err)
- }
- if arbState != StateDefault {
- t.Fatalf("state mismatch: expected %v, got %v", StateDefault,
- arbState)
- }
-
- // We should now be able to mutate the state to an arbitrary one of our
- // choosing, then read that same state back from disk.
- if err := testLog.CommitState(StateFullyResolved); err != nil {
- t.Fatalf("unable to write state: %v", err)
- }
- arbState, err = testLog.CurrentState(nil)
- if err != nil {
- t.Fatalf("unable to read arb state: %v", err)
- }
- if arbState != StateFullyResolved {
- t.Fatalf("state mismatch: expected %v, got %v", StateFullyResolved,
- arbState)
- }
-
- // Next, we'll wipe our state and ensure that if we try to query for
- // the current state, we get the proper error.
- err = testLog.WipeHistory()
- if err != nil {
- t.Fatalf("unable to wipe history: %v", err)
- }
-
- // If we try to query for the state again, we should get the default
- // state again.
- arbState, err = testLog.CurrentState(nil)
- if err != nil {
- t.Fatalf("unable to query current state: %v", err)
- }
- if arbState != StateDefault {
- t.Fatalf("state mismatch: expected %v, got %v", StateDefault,
- arbState)
- }
-}
-
-// TestScopeIsolation tests the two distinct ArbitratorLog instances with two
-// distinct scopes, don't over write the state of one another.
-func TestScopeIsolation(t *testing.T) {
- t.Parallel()
-
- // We'll create two distinct test logs. Each log will have a unique
- // scope key, and therefore should be isolated from the other on disk.
- testLog1, cleanUp1, err := newTestBoltArbLog(
- testChainHash, testChanPoint1,
- )
- if err != nil {
- t.Fatalf("unable to create test log: %v", err)
- }
- defer cleanUp1()
-
- testLog2, cleanUp2, err := newTestBoltArbLog(
- testChainHash, testChanPoint2,
- )
- if err != nil {
- t.Fatalf("unable to create test log: %v", err)
- }
- defer cleanUp2()
-
- // We'll now update the current state of both the logs to a unique
- // state.
- if err := testLog1.CommitState(StateWaitingFullResolution); err != nil {
- t.Fatalf("unable to write state: %v", err)
- }
- if err := testLog2.CommitState(StateContractClosed); err != nil {
- t.Fatalf("unable to write state: %v", err)
- }
-
- // Querying each log, the states should be the prior one we set, and be
- // disjoint.
- log1State, err := testLog1.CurrentState(nil)
- if err != nil {
- t.Fatalf("unable to read arb state: %v", err)
- }
- log2State, err := testLog2.CurrentState(nil)
- if err != nil {
- t.Fatalf("unable to read arb state: %v", err)
- }
-
- if log1State == log2State {
- t.Fatalf("log states are the same: %v", log1State)
- }
-
- if log1State != StateWaitingFullResolution {
- t.Fatalf("state mismatch: expected %v, got %v",
- StateWaitingFullResolution, log1State)
- }
- if log2State != StateContractClosed {
- t.Fatalf("state mismatch: expected %v, got %v",
- StateContractClosed, log2State)
- }
-}
-
-// TestCommitSetStorage tests that we're able to properly read/write active
-// commitment sets.
-func TestCommitSetStorage(t *testing.T) {
- t.Parallel()
-
- testLog, cleanUp, err := newTestBoltArbLog(
- testChainHash, testChanPoint1,
- )
- if err != nil {
- t.Fatalf("unable to create test log: %v", err)
- }
- defer cleanUp()
-
- activeHTLCs := []channeldb.HTLC{
- {
- Amt: 1000,
- OnionBlob: make([]byte, 0),
- Signature: make([]byte, 0),
- },
- }
-
- confTypes := []HtlcSetKey{
- LocalHtlcSet, RemoteHtlcSet, RemotePendingHtlcSet,
- }
- for _, pendingRemote := range []bool{true, false} {
- for _, confType := range confTypes {
- commitSet := &CommitSet{
- ConfCommitKey: &confType,
- HtlcSets: make(map[HtlcSetKey][]channeldb.HTLC),
- }
- commitSet.HtlcSets[LocalHtlcSet] = activeHTLCs
- commitSet.HtlcSets[RemoteHtlcSet] = activeHTLCs
-
- if pendingRemote {
- commitSet.HtlcSets[RemotePendingHtlcSet] = activeHTLCs
- }
-
- err := testLog.InsertConfirmedCommitSet(commitSet)
- if err != nil {
- t.Fatalf("unable to write commit set: %v", err)
- }
-
- diskCommitSet, err := testLog.FetchConfirmedCommitSet(nil)
- if err != nil {
- t.Fatalf("unable to read commit set: %v", err)
- }
-
- if !reflect.DeepEqual(commitSet, diskCommitSet) {
- t.Fatalf("commit set mismatch: expected %v, got %v",
- spew.Sdump(commitSet), spew.Sdump(diskCommitSet))
- }
- }
- }
-
-}
-
-func init() {
- testSignDesc.KeyDesc.PubKey, _ = btcec.ParsePubKey(key1, btcec.S256())
-
- prand.Seed(time.Now().Unix())
-}
diff --git a/lnd/contractcourt/chain_arbitrator.go b/lnd/contractcourt/chain_arbitrator.go
deleted file mode 100644
index be4948b7..00000000
--- a/lnd/contractcourt/chain_arbitrator.go
+++ /dev/null
@@ -1,1136 +0,0 @@
-package contractcourt
-
-import (
- "sync"
- "sync/atomic"
- "time"
-
- "github.com/pkt-cash/pktd/btcutil"
- "github.com/pkt-cash/pktd/btcutil/er"
- "github.com/pkt-cash/pktd/chaincfg/chainhash"
- "github.com/pkt-cash/pktd/lnd/chainntnfs"
- "github.com/pkt-cash/pktd/lnd/channeldb"
- "github.com/pkt-cash/pktd/lnd/channeldb/kvdb"
- "github.com/pkt-cash/pktd/lnd/clock"
- "github.com/pkt-cash/pktd/lnd/input"
- "github.com/pkt-cash/pktd/lnd/labels"
- "github.com/pkt-cash/pktd/lnd/lnwallet"
- "github.com/pkt-cash/pktd/lnd/lnwallet/chainfee"
- "github.com/pkt-cash/pktd/lnd/lnwire"
- "github.com/pkt-cash/pktd/pktlog/log"
- "github.com/pkt-cash/pktd/pktwallet/walletdb"
- "github.com/pkt-cash/pktd/wire"
-)
-
-var Err = er.NewErrorType("lnd.contractcourt")
-
-// ErrChainArbExiting signals that the chain arbitrator is shutting down.
-var ErrChainArbExiting = Err.CodeWithDetail("ErrChainArbExiting", "ChainArbitrator exiting")
-
-// ResolutionMsg is a message sent by resolvers to outside sub-systems once an
-// outgoing contract has been fully resolved. For multi-hop contracts, if we
-// resolve the outgoing contract, we'll also need to ensure that the incoming
-// contract is resolved as well. We package the items required to resolve the
-// incoming contracts within this message.
-type ResolutionMsg struct {
- // SourceChan identifies the channel that this message is being sent
- // from. This is the channel's short channel ID.
- SourceChan lnwire.ShortChannelID
-
- // HtlcIndex is the index of the contract within the original
- // commitment trace.
- HtlcIndex uint64
-
- // Failure will be non-nil if the incoming contract should be canceled
- // all together. This can happen if the outgoing contract was dust, if
- // if the outgoing HTLC timed out.
- Failure lnwire.FailureMessage
-
- // PreImage will be non-nil if the incoming contract can successfully
- // be redeemed. This can happen if we learn of the preimage from the
- // outgoing HTLC on-chain.
- PreImage *[32]byte
-}
-
-// ChainArbitratorConfig is a configuration struct that contains all the
-// function closures and interface that required to arbitrate on-chain
-// contracts for a particular chain.
-type ChainArbitratorConfig struct {
- // ChainHash is the chain that this arbitrator is to operate within.
- ChainHash chainhash.Hash
-
- // IncomingBroadcastDelta is the delta that we'll use to decide when to
- // broadcast our commitment transaction if we have incoming htlcs. This
- // value should be set based on our current fee estimation of the
- // commitment transaction. We use this to determine when we should
- // broadcast instead of just the HTLC timeout, as we want to ensure
- // that the commitment transaction is already confirmed, by the time the
- // HTLC expires. Otherwise we may end up not settling the htlc on-chain
- // because the other party managed to time it out.
- IncomingBroadcastDelta uint32
-
- // OutgoingBroadcastDelta is the delta that we'll use to decide when to
- // broadcast our commitment transaction if there are active outgoing
- // htlcs. This value can be lower than the incoming broadcast delta.
- OutgoingBroadcastDelta uint32
-
- // NewSweepAddr is a function that returns a new address under control
- // by the wallet. We'll use this to sweep any no-delay outputs as a
- // result of unilateral channel closes.
- //
- // NOTE: This SHOULD return a p2wkh script.
- NewSweepAddr func() ([]byte, er.R)
-
- // PublishTx reliably broadcasts a transaction to the network. Once
- // this function exits without an error, then they transaction MUST
- // continually be rebroadcast if needed.
- PublishTx func(*wire.MsgTx, string) er.R
-
- // DeliverResolutionMsg is a function that will append an outgoing
- // message to the "out box" for a ChannelLink. This is used to cancel
- // backwards any HTLC's that are either dust, we're timing out, or
- // settling on-chain to the incoming link.
- DeliverResolutionMsg func(...ResolutionMsg) er.R
-
- // MarkLinkInactive is a function closure that the ChainArbitrator will
- // use to mark that active HTLC's shouldn't be attempted to be routed
- // over a particular channel. This function will be called in that a
- // ChannelArbitrator decides that it needs to go to chain in order to
- // resolve contracts.
- //
- // TODO(roasbeef): rename, routing based
- MarkLinkInactive func(wire.OutPoint) er.R
-
- // ContractBreach is a function closure that the ChainArbitrator will
- // use to notify the breachArbiter about a contract breach. It should
- // only return a non-nil error when the breachArbiter has preserved the
- // necessary breach info for this channel point, and it is safe to mark
- // the channel as pending close in the database.
- ContractBreach func(wire.OutPoint, *lnwallet.BreachRetribution) er.R
-
- // IsOurAddress is a function that returns true if the passed address
- // is known to the underlying wallet. Otherwise, false should be
- // returned.
- IsOurAddress func(btcutil.Address) bool
-
- // IncubateOutput sends either an incoming HTLC, an outgoing HTLC, or
- // both to the utxo nursery. Once this function returns, the nursery
- // should have safely persisted the outputs to disk, and should start
- // the process of incubation. This is used when a resolver wishes to
- // pass off the output to the nursery as we're only waiting on an
- // absolute/relative item block.
- IncubateOutputs func(wire.OutPoint, *lnwallet.OutgoingHtlcResolution,
- *lnwallet.IncomingHtlcResolution, uint32) er.R
-
- // PreimageDB is a global store of all known pre-images. We'll use this
- // to decide if we should broadcast a commitment transaction to claim
- // an HTLC on-chain.
- PreimageDB WitnessBeacon
-
- // Notifier is an instance of a chain notifier we'll use to watch for
- // certain on-chain events.
- Notifier chainntnfs.ChainNotifier
-
- // Signer is a signer backed by the active lnd node. This should be
- // capable of producing a signature as specified by a valid
- // SignDescriptor.
- Signer input.Signer
-
- // FeeEstimator will be used to return fee estimates.
- FeeEstimator chainfee.Estimator
-
- // ChainIO allows us to query the state of the current main chain.
- ChainIO lnwallet.BlockChainIO
-
- // DisableChannel disables a channel, resulting in it not being able to
- // forward payments.
- DisableChannel func(wire.OutPoint) er.R
-
- // Sweeper allows resolvers to sweep their final outputs.
- Sweeper UtxoSweeper
-
- // Registry is the invoice database that is used by resolvers to lookup
- // preimages and settle invoices.
- Registry Registry
-
- // NotifyClosedChannel is a function closure that the ChainArbitrator
- // will use to notify the ChannelNotifier about a newly closed channel.
- NotifyClosedChannel func(wire.OutPoint)
-
- // OnionProcessor is used to decode onion payloads for on-chain
- // resolution.
- OnionProcessor OnionProcessor
-
- // PaymentsExpirationGracePeriod indicates a time window we let the
- // other node to cancel an outgoing htlc that our node has initiated and
- // has timed out.
- PaymentsExpirationGracePeriod time.Duration
-
- // IsForwardedHTLC checks for a given htlc, identified by channel id and
- // htlcIndex, if it is a forwarded one.
- IsForwardedHTLC func(chanID lnwire.ShortChannelID, htlcIndex uint64) bool
-
- // Clock is the clock implementation that ChannelArbitrator uses.
- // It is useful for testing.
- Clock clock.Clock
-}
-
-// ChainArbitrator is a sub-system that oversees the on-chain resolution of all
-// active, and channel that are in the "pending close" state. Within the
-// contractcourt package, the ChainArbitrator manages a set of active
-// ContractArbitrators. Each ContractArbitrators is responsible for watching
-// the chain for any activity that affects the state of the channel, and also
-// for monitoring each contract in order to determine if any on-chain activity is
-// required. Outside sub-systems interact with the ChainArbitrator in order to
-// forcibly exit a contract, update the set of live signals for each contract,
-// and to receive reports on the state of contract resolution.
-type ChainArbitrator struct {
- started int32 // To be used atomically.
- stopped int32 // To be used atomically.
-
- sync.Mutex
-
- // activeChannels is a map of all the active contracts that are still
- // open, and not fully resolved.
- activeChannels map[wire.OutPoint]*ChannelArbitrator
-
- // activeWatchers is a map of all the active chainWatchers for channels
- // that are still considered open.
- activeWatchers map[wire.OutPoint]*chainWatcher
-
- // cfg is the config struct for the arbitrator that contains all
- // methods and interface it needs to operate.
- cfg ChainArbitratorConfig
-
- // chanSource will be used by the ChainArbitrator to fetch all the
- // active channels that it must still watch over.
- chanSource *channeldb.DB
-
- quit chan struct{}
-
- wg sync.WaitGroup
-}
-
-// NewChainArbitrator returns a new instance of the ChainArbitrator using the
-// passed config struct, and backing persistent database.
-func NewChainArbitrator(cfg ChainArbitratorConfig,
- db *channeldb.DB) *ChainArbitrator {
-
- return &ChainArbitrator{
- cfg: cfg,
- activeChannels: make(map[wire.OutPoint]*ChannelArbitrator),
- activeWatchers: make(map[wire.OutPoint]*chainWatcher),
- chanSource: db,
- quit: make(chan struct{}),
- }
-}
-
-// arbChannel is a wrapper around an open channel that channel arbitrators
-// interact with.
-type arbChannel struct {
- // channel is the in-memory channel state.
- channel *channeldb.OpenChannel
-
- // c references the chain arbitrator and is used by arbChannel
- // internally.
- c *ChainArbitrator
-}
-
-// NewAnchorResolutions returns the anchor resolutions for currently valid
-// commitment transactions.
-//
-// NOTE: Part of the ArbChannel interface.
-func (a *arbChannel) NewAnchorResolutions() ([]*lnwallet.AnchorResolution,
- er.R) {
-
- // Get a fresh copy of the database state to base the anchor resolutions
- // on. Unfortunately the channel instance that we have here isn't the
- // same instance that is used by the link.
- chanPoint := a.channel.FundingOutpoint
-
- channel, err := a.c.chanSource.FetchChannel(chanPoint)
- if err != nil {
- return nil, err
- }
-
- chanMachine, err := lnwallet.NewLightningChannel(
- a.c.cfg.Signer, channel, nil,
- )
- if err != nil {
- return nil, err
- }
-
- return chanMachine.NewAnchorResolutions()
-}
-
-// ForceCloseChan should force close the contract that this attendant is
-// watching over. We'll use this when we decide that we need to go to chain. It
-// should in addition tell the switch to remove the corresponding link, such
-// that we won't accept any new updates. The returned summary contains all items
-// needed to eventually resolve all outputs on chain.
-//
-// NOTE: Part of the ArbChannel interface.
-func (a *arbChannel) ForceCloseChan() (*lnwallet.LocalForceCloseSummary, er.R) {
- // First, we mark the channel as borked, this ensure
- // that no new state transitions can happen, and also
- // that the link won't be loaded into the switch.
- if err := a.channel.MarkBorked(); err != nil {
- return nil, err
- }
-
- // With the channel marked as borked, we'll now remove
- // the link from the switch if its there. If the link
- // is active, then this method will block until it
- // exits.
- chanPoint := a.channel.FundingOutpoint
-
- if err := a.c.cfg.MarkLinkInactive(chanPoint); err != nil {
- log.Errorf("unable to mark link inactive: %v", err)
- }
-
- // Now that we know the link can't mutate the channel
- // state, we'll read the channel from disk the target
- // channel according to its channel point.
- channel, err := a.c.chanSource.FetchChannel(chanPoint)
- if err != nil {
- return nil, err
- }
-
- // Finally, we'll force close the channel completing
- // the force close workflow.
- chanMachine, err := lnwallet.NewLightningChannel(
- a.c.cfg.Signer, channel, nil,
- )
- if err != nil {
- return nil, err
- }
- return chanMachine.ForceClose()
-}
-
-// newActiveChannelArbitrator creates a new instance of an active channel
-// arbitrator given the state of the target channel.
-func newActiveChannelArbitrator(channel *channeldb.OpenChannel,
- c *ChainArbitrator, chanEvents *ChainEventSubscription) (*ChannelArbitrator, er.R) {
-
- log.Tracef("Creating ChannelArbitrator for ChannelPoint(%v)",
- channel.FundingOutpoint)
-
- // TODO(roasbeef): fetch best height (or pass in) so can ensure block
- // epoch delivers all the notifications to
-
- chanPoint := channel.FundingOutpoint
-
- // Next we'll create the matching configuration struct that contains
- // all interfaces and methods the arbitrator needs to do its job.
- arbCfg := ChannelArbitratorConfig{
- ChanPoint: chanPoint,
- Channel: c.getArbChannel(channel),
- ShortChanID: channel.ShortChanID(),
-
- MarkCommitmentBroadcasted: channel.MarkCommitmentBroadcasted,
- MarkChannelClosed: func(summary *channeldb.ChannelCloseSummary,
- statuses ...channeldb.ChannelStatus) er.R {
-
- err := channel.CloseChannel(summary, statuses...)
- if err != nil {
- return err
- }
- c.cfg.NotifyClosedChannel(summary.ChanPoint)
- return nil
- },
- IsPendingClose: false,
- ChainArbitratorConfig: c.cfg,
- ChainEvents: chanEvents,
- PutResolverReport: func(tx kvdb.RwTx,
- report *channeldb.ResolverReport) er.R {
-
- return c.chanSource.PutResolverReport(
- tx, c.cfg.ChainHash, &channel.FundingOutpoint,
- report,
- )
- },
- }
-
- // The final component needed is an arbitrator log that the arbitrator
- // will use to keep track of its internal state using a backed
- // persistent log.
- //
- // TODO(roasbeef); abstraction leak...
- // * rework: adaptor method to set log scope w/ factory func
- chanLog, err := newBoltArbitratorLog(
- c.chanSource.Backend, arbCfg, c.cfg.ChainHash, chanPoint,
- )
- if err != nil {
- return nil, err
- }
-
- arbCfg.MarkChannelResolved = func() er.R {
- return c.ResolveContract(chanPoint)
- }
-
- // Finally, we'll need to construct a series of htlc Sets based on all
- // currently known valid commitments.
- htlcSets := make(map[HtlcSetKey]htlcSet)
- htlcSets[LocalHtlcSet] = newHtlcSet(channel.LocalCommitment.Htlcs)
- htlcSets[RemoteHtlcSet] = newHtlcSet(channel.RemoteCommitment.Htlcs)
-
- pendingRemoteCommitment, err := channel.RemoteCommitChainTip()
- if err != nil && !channeldb.ErrNoPendingCommit.Is(err) {
- return nil, err
- }
- if pendingRemoteCommitment != nil {
- htlcSets[RemotePendingHtlcSet] = newHtlcSet(
- pendingRemoteCommitment.Commitment.Htlcs,
- )
- }
-
- return NewChannelArbitrator(
- arbCfg, htlcSets, chanLog,
- ), nil
-}
-
-// getArbChannel returns an open channel wrapper for use by channel arbitrators.
-func (c *ChainArbitrator) getArbChannel(
- channel *channeldb.OpenChannel) *arbChannel {
-
- return &arbChannel{
- channel: channel,
- c: c,
- }
-}
-
-// ResolveContract marks a contract as fully resolved within the database.
-// This is only to be done once all contracts which were live on the channel
-// before hitting the chain have been resolved.
-func (c *ChainArbitrator) ResolveContract(chanPoint wire.OutPoint) er.R {
-
- log.Infof("Marking ChannelPoint(%v) fully resolved", chanPoint)
-
- // First, we'll we'll mark the channel as fully closed from the PoV of
- // the channel source.
- err := c.chanSource.MarkChanFullyClosed(&chanPoint)
- if err != nil {
- log.Errorf("ChainArbitrator: unable to mark ChannelPoint(%v) "+
- "fully closed: %v", chanPoint, err)
- return err
- }
-
- // Now that the channel has been marked as fully closed, we'll stop
- // both the channel arbitrator and chain watcher for this channel if
- // they're still active.
- var arbLog ArbitratorLog
- c.Lock()
- chainArb := c.activeChannels[chanPoint]
- delete(c.activeChannels, chanPoint)
-
- chainWatcher := c.activeWatchers[chanPoint]
- delete(c.activeWatchers, chanPoint)
- c.Unlock()
-
- if chainArb != nil {
- arbLog = chainArb.log
-
- if err := chainArb.Stop(); err != nil {
- log.Warnf("unable to stop ChannelArbitrator(%v): %v",
- chanPoint, err)
- }
- }
- if chainWatcher != nil {
- if err := chainWatcher.Stop(); err != nil {
- log.Warnf("unable to stop ChainWatcher(%v): %v",
- chanPoint, err)
- }
- }
-
- // Once this has been marked as resolved, we'll wipe the log that the
- // channel arbitrator was using to store its persistent state. We do
- // this after marking the channel resolved, as otherwise, the
- // arbitrator would be re-created, and think it was starting from the
- // default state.
- if arbLog != nil {
- if err := arbLog.WipeHistory(); err != nil {
- return err
- }
- }
-
- return nil
-}
-
-// Start launches all goroutines that the ChainArbitrator needs to operate.
-func (c *ChainArbitrator) Start() er.R {
- if !atomic.CompareAndSwapInt32(&c.started, 0, 1) {
- return nil
- }
-
- log.Tracef("Starting ChainArbitrator")
-
- // First, we'll fetch all the channels that are still open, in order to
- // collect them within our set of active contracts.
- openChannels, err := c.chanSource.FetchAllChannels()
- if err != nil {
- return err
- }
-
- if len(openChannels) > 0 {
- log.Infof("Creating ChannelArbitrators for %v active channels",
- len(openChannels))
- }
-
- // For each open channel, we'll configure then launch a corresponding
- // ChannelArbitrator.
- for _, channel := range openChannels {
- chanPoint := channel.FundingOutpoint
- channel := channel
-
- // First, we'll create an active chainWatcher for this channel
- // to ensure that we detect any relevant on chain events.
- chainWatcher, err := newChainWatcher(
- chainWatcherConfig{
- chanState: channel,
- notifier: c.cfg.Notifier,
- signer: c.cfg.Signer,
- isOurAddr: c.cfg.IsOurAddress,
- contractBreach: func(retInfo *lnwallet.BreachRetribution) er.R {
- return c.cfg.ContractBreach(chanPoint, retInfo)
- },
- extractStateNumHint: lnwallet.GetStateNumHint,
- },
- )
- if err != nil {
- return err
- }
-
- c.activeWatchers[chanPoint] = chainWatcher
- channelArb, err := newActiveChannelArbitrator(
- channel, c, chainWatcher.SubscribeChannelEvents(),
- )
- if err != nil {
- return err
- }
-
- c.activeChannels[chanPoint] = channelArb
-
- // Republish any closing transactions for this channel.
- err = c.publishClosingTxs(channel)
- if err != nil {
- return err
- }
- }
-
- // In addition to the channels that we know to be open, we'll also
- // launch arbitrators to finishing resolving any channels that are in
- // the pending close state.
- closingChannels, err := c.chanSource.FetchClosedChannels(true)
- if err != nil {
- return err
- }
-
- if len(closingChannels) > 0 {
- log.Infof("Creating ChannelArbitrators for %v closing channels",
- len(closingChannels))
- }
-
- // Next, for each channel is the closing state, we'll launch a
- // corresponding more restricted resolver, as we don't have to watch
- // the chain any longer, only resolve the contracts on the confirmed
- // commitment.
- for _, closeChanInfo := range closingChannels {
- // We can leave off the CloseContract and ForceCloseChan
- // methods as the channel is already closed at this point.
- chanPoint := closeChanInfo.ChanPoint
- arbCfg := ChannelArbitratorConfig{
- ChanPoint: chanPoint,
- ShortChanID: closeChanInfo.ShortChanID,
- ChainArbitratorConfig: c.cfg,
- ChainEvents: &ChainEventSubscription{},
- IsPendingClose: true,
- ClosingHeight: closeChanInfo.CloseHeight,
- CloseType: closeChanInfo.CloseType,
- PutResolverReport: func(tx kvdb.RwTx,
- report *channeldb.ResolverReport) er.R {
-
- return c.chanSource.PutResolverReport(
- tx, c.cfg.ChainHash, &chanPoint, report,
- )
- },
- }
- chanLog, err := newBoltArbitratorLog(
- c.chanSource.Backend, arbCfg, c.cfg.ChainHash, chanPoint,
- )
- if err != nil {
- return err
- }
- arbCfg.MarkChannelResolved = func() er.R {
- return c.ResolveContract(chanPoint)
- }
-
- // We can also leave off the set of HTLC's here as since the
- // channel is already in the process of being full resolved, no
- // new HTLC's will be added.
- c.activeChannels[chanPoint] = NewChannelArbitrator(
- arbCfg, nil, chanLog,
- )
- }
-
- // Now, we'll start all chain watchers in parallel to shorten start up
- // duration. In neutrino mode, this allows spend registrations to take
- // advantage of batch spend reporting, instead of doing a single rescan
- // per chain watcher.
- //
- // NOTE: After this point, we Stop the chain arb to ensure that any
- // lingering goroutines are cleaned up before exiting.
- watcherErrs := make(chan er.R, len(c.activeWatchers))
- var wg sync.WaitGroup
- for _, watcher := range c.activeWatchers {
- wg.Add(1)
- go func(w *chainWatcher) {
- defer wg.Done()
- select {
- case watcherErrs <- w.Start():
- case <-c.quit:
- watcherErrs <- ErrChainArbExiting.Default()
- }
- }(watcher)
- }
-
- // Once all chain watchers have been started, seal the err chan to
- // signal the end of the err stream.
- go func() {
- wg.Wait()
- close(watcherErrs)
- }()
-
- // stopAndLog is a helper function which shuts down the chain arb and
- // logs errors if they occur.
- stopAndLog := func() {
- if err := c.Stop(); err != nil {
- log.Errorf("ChainArbitrator could not shutdown: %v", err)
- }
- }
-
- // Handle all errors returned from spawning our chain watchers. If any
- // of them failed, we will stop the chain arb to shutdown any active
- // goroutines.
- for err := range watcherErrs {
- if err != nil {
- stopAndLog()
- return err
- }
- }
-
- // Before we start all of our arbitrators, we do a preliminary state
- // lookup so that we can combine all of these lookups in a single db
- // transaction.
- var startStates map[wire.OutPoint]*chanArbStartState
-
- err = kvdb.View(c.chanSource, func(tx walletdb.ReadTx) er.R {
- for _, arbitrator := range c.activeChannels {
- startState, err := arbitrator.getStartState(tx)
- if err != nil {
- return err
- }
-
- startStates[arbitrator.cfg.ChanPoint] = startState
- }
-
- return nil
- }, func() {
- startStates = make(
- map[wire.OutPoint]*chanArbStartState,
- len(c.activeChannels),
- )
- })
- if err != nil {
- stopAndLog()
- return err
- }
-
- // Launch all the goroutines for each arbitrator so they can carry out
- // their duties.
- for _, arbitrator := range c.activeChannels {
- startState, ok := startStates[arbitrator.cfg.ChanPoint]
- if !ok {
- stopAndLog()
- return er.Errorf("arbitrator: %v has no start state",
- arbitrator.cfg.ChanPoint)
- }
-
- if err := arbitrator.Start(startState); err != nil {
- stopAndLog()
- return err
- }
- }
-
- // Subscribe to a single stream of block epoch notifications that we
- // will dispatch to all active arbitrators.
- blockEpoch, err := c.cfg.Notifier.RegisterBlockEpochNtfn(nil)
- if err != nil {
- return err
- }
-
- // Start our goroutine which will dispatch blocks to each arbitrator.
- c.wg.Add(1)
- go func() {
- defer c.wg.Done()
- c.dispatchBlocks(blockEpoch)
- }()
-
- // TODO(roasbeef): eventually move all breach watching here
-
- return nil
-}
-
-// blockRecipient contains the information we need to dispatch a block to a
-// channel arbitrator.
-type blockRecipient struct {
- // chanPoint is the funding outpoint of the channel.
- chanPoint wire.OutPoint
-
- // blocks is the channel that new block heights are sent into. This
- // channel should be sufficiently buffered as to not block the sender.
- blocks chan<- int32
-
- // quit is closed if the receiving entity is shutting down.
- quit chan struct{}
-}
-
-// dispatchBlocks consumes a block epoch notification stream and dispatches
-// blocks to each of the chain arb's active channel arbitrators. This function
-// must be run in a goroutine.
-func (c *ChainArbitrator) dispatchBlocks(
- blockEpoch *chainntnfs.BlockEpochEvent) {
-
- // getRecipients is a helper function which acquires the chain arb
- // lock and returns a set of block recipients which can be used to
- // dispatch blocks.
- getRecipients := func() []blockRecipient {
- c.Lock()
- blocks := make([]blockRecipient, 0, len(c.activeChannels))
- for _, channel := range c.activeChannels {
- blocks = append(blocks, blockRecipient{
- chanPoint: channel.cfg.ChanPoint,
- blocks: channel.blocks,
- quit: channel.quit,
- })
- }
- c.Unlock()
-
- return blocks
- }
-
- // On exit, cancel our blocks subscription and close each block channel
- // so that the arbitrators know they will no longer be receiving blocks.
- defer func() {
- blockEpoch.Cancel()
-
- recipients := getRecipients()
- for _, recipient := range recipients {
- close(recipient.blocks)
- }
- }()
-
- // Consume block epochs until we receive the instruction to shutdown.
- for {
- select {
- // Consume block epochs, exiting if our subscription is
- // terminated.
- case block, ok := <-blockEpoch.Epochs:
- if !ok {
- log.Trace("dispatchBlocks block epoch " +
- "cancelled")
- return
- }
-
- // Get the set of currently active channels block
- // subscription channels and dispatch the block to
- // each.
- for _, recipient := range getRecipients() {
- select {
- // Deliver the block to the arbitrator.
- case recipient.blocks <- block.Height:
-
- // If the recipient is shutting down, exit
- // without delivering the block. This may be
- // the case when two blocks are mined in quick
- // succession, and the arbitrator resolves
- // after the first block, and does not need to
- // consume the second block.
- case <-recipient.quit:
- log.Debugf("channel: %v exit without "+
- "receiving block: %v",
- recipient.chanPoint,
- block.Height)
-
- // If the chain arb is shutting down, we don't
- // need to deliver any more blocks (everything
- // will be shutting down).
- case <-c.quit:
- return
- }
- }
-
- // Exit if the chain arbitrator is shutting down.
- case <-c.quit:
- return
- }
- }
-}
-
-// publishClosingTxs will load any stored cooperative or unilater closing
-// transactions and republish them. This helps ensure propagation of the
-// transactions in the event that prior publications failed.
-func (c *ChainArbitrator) publishClosingTxs(
- channel *channeldb.OpenChannel) er.R {
-
- // If the channel has had its unilateral close broadcasted already,
- // republish it in case it didn't propagate.
- if channel.HasChanStatus(channeldb.ChanStatusCommitBroadcasted) {
- err := c.rebroadcast(
- channel, channeldb.ChanStatusCommitBroadcasted,
- )
- if err != nil {
- return err
- }
- }
-
- // If the channel has had its cooperative close broadcasted
- // already, republish it in case it didn't propagate.
- if channel.HasChanStatus(channeldb.ChanStatusCoopBroadcasted) {
- err := c.rebroadcast(
- channel, channeldb.ChanStatusCoopBroadcasted,
- )
- if err != nil {
- return err
- }
- }
-
- return nil
-}
-
-// rebroadcast is a helper method which will republish the unilateral or
-// cooperative close transaction or a channel in a particular state.
-//
-// NOTE: There is no risk to caling this method if the channel isn't in either
-// CommimentBroadcasted or CoopBroadcasted, but the logs will be misleading.
-func (c *ChainArbitrator) rebroadcast(channel *channeldb.OpenChannel,
- state channeldb.ChannelStatus) er.R {
-
- chanPoint := channel.FundingOutpoint
-
- var (
- closeTx *wire.MsgTx
- kind string
- err er.R
- )
- switch state {
- case channeldb.ChanStatusCommitBroadcasted:
- kind = "force"
- closeTx, err = channel.BroadcastedCommitment()
-
- case channeldb.ChanStatusCoopBroadcasted:
- kind = "coop"
- closeTx, err = channel.BroadcastedCooperative()
-
- default:
- return er.Errorf("unknown closing state: %v", state)
- }
-
- switch {
-
- // This can happen for channels that had their closing tx published
- // before we started storing it to disk.
- case channeldb.ErrNoCloseTx.Is(err):
- log.Warnf("Channel %v is in state %v, but no %s closing tx "+
- "to re-publish...", chanPoint, state, kind)
- return nil
-
- case err != nil:
- return err
- }
-
- log.Infof("Re-publishing %s close tx(%v) for channel %v",
- kind, closeTx.TxHash(), chanPoint)
-
- label := labels.MakeLabel(
- labels.LabelTypeChannelClose, &channel.ShortChannelID,
- )
- err = c.cfg.PublishTx(closeTx, label)
- if err != nil && !lnwallet.ErrDoubleSpend.Is(err) {
- log.Warnf("Unable to broadcast %s close tx(%v): %v",
- kind, closeTx.TxHash(), err)
- }
-
- return nil
-}
-
-// Stop signals the ChainArbitrator to trigger a graceful shutdown. Any active
-// channel arbitrators will be signalled to exit, and this method will block
-// until they've all exited.
-func (c *ChainArbitrator) Stop() er.R {
- if !atomic.CompareAndSwapInt32(&c.stopped, 0, 1) {
- return nil
- }
-
- log.Infof("Stopping ChainArbitrator")
-
- close(c.quit)
-
- var (
- activeWatchers = make(map[wire.OutPoint]*chainWatcher)
- activeChannels = make(map[wire.OutPoint]*ChannelArbitrator)
- )
-
- // Copy the current set of active watchers and arbitrators to shutdown.
- // We don't want to hold the lock when shutting down each watcher or
- // arbitrator individually, as they may need to acquire this mutex.
- c.Lock()
- for chanPoint, watcher := range c.activeWatchers {
- activeWatchers[chanPoint] = watcher
- }
- for chanPoint, arbitrator := range c.activeChannels {
- activeChannels[chanPoint] = arbitrator
- }
- c.Unlock()
-
- for chanPoint, watcher := range activeWatchers {
- log.Tracef("Attempting to stop ChainWatcher(%v)",
- chanPoint)
-
- if err := watcher.Stop(); err != nil {
- log.Errorf("unable to stop watcher for "+
- "ChannelPoint(%v): %v", chanPoint, err)
- }
- }
- for chanPoint, arbitrator := range activeChannels {
- log.Tracef("Attempting to stop ChannelArbitrator(%v)",
- chanPoint)
-
- if err := arbitrator.Stop(); err != nil {
- log.Errorf("unable to stop arbitrator for "+
- "ChannelPoint(%v): %v", chanPoint, err)
- }
- }
-
- c.wg.Wait()
-
- return nil
-}
-
-// ContractUpdate is a message packages the latest set of active HTLCs on a
-// commitment, and also identifies which commitment received a new set of
-// HTLCs.
-type ContractUpdate struct {
- // HtlcKey identifies which commitment the HTLCs below are present on.
- HtlcKey HtlcSetKey
-
- // Htlcs are the of active HTLCs on the commitment identified by the
- // above HtlcKey.
- Htlcs []channeldb.HTLC
-}
-
-// ContractSignals wraps the two signals that affect the state of a channel
-// being watched by an arbitrator. The two signals we care about are: the
-// channel has a new set of HTLC's, and the remote party has just broadcast
-// their version of the commitment transaction.
-type ContractSignals struct {
- // HtlcUpdates is a channel that the link will use to update the
- // designated channel arbitrator when the set of HTLCs on any valid
- // commitment changes.
- HtlcUpdates chan *ContractUpdate
-
- // ShortChanID is the up to date short channel ID for a contract. This
- // can change either if when the contract was added it didn't yet have
- // a stable identifier, or in the case of a reorg.
- ShortChanID lnwire.ShortChannelID
-}
-
-// UpdateContractSignals sends a set of active, up to date contract signals to
-// the ChannelArbitrator which is has been assigned to the channel infield by
-// the passed channel point.
-func (c *ChainArbitrator) UpdateContractSignals(chanPoint wire.OutPoint,
- signals *ContractSignals) er.R {
-
- log.Infof("Attempting to update ContractSignals for ChannelPoint(%v)",
- chanPoint)
-
- c.Lock()
- arbitrator, ok := c.activeChannels[chanPoint]
- c.Unlock()
- if !ok {
- return er.Errorf("unable to find arbitrator")
- }
-
- arbitrator.UpdateContractSignals(signals)
-
- return nil
-}
-
-// GetChannelArbitrator safely returns the channel arbitrator for a given
-// channel outpoint.
-func (c *ChainArbitrator) GetChannelArbitrator(chanPoint wire.OutPoint) (
- *ChannelArbitrator, er.R) {
-
- c.Lock()
- arbitrator, ok := c.activeChannels[chanPoint]
- c.Unlock()
- if !ok {
- return nil, er.Errorf("unable to find arbitrator")
- }
-
- return arbitrator, nil
-}
-
-// forceCloseReq is a request sent from an outside sub-system to the arbitrator
-// that watches a particular channel to broadcast the commitment transaction,
-// and enter the resolution phase of the channel.
-type forceCloseReq struct {
- // errResp is a channel that will be sent upon either in the case of
- // force close success (nil error), or in the case on an error.
- //
- // NOTE; This channel MUST be buffered.
- errResp chan er.R
-
- // closeTx is a channel that carries the transaction which ultimately
- // closed out the channel.
- closeTx chan *wire.MsgTx
-}
-
-// ForceCloseContract attempts to force close the channel infield by the passed
-// channel point. A force close will immediately terminate the contract,
-// causing it to enter the resolution phase. If the force close was successful,
-// then the force close transaction itself will be returned.
-//
-// TODO(roasbeef): just return the summary itself?
-func (c *ChainArbitrator) ForceCloseContract(chanPoint wire.OutPoint) (*wire.MsgTx, er.R) {
- c.Lock()
- arbitrator, ok := c.activeChannels[chanPoint]
- c.Unlock()
- if !ok {
- return nil, er.Errorf("unable to find arbitrator")
- }
-
- log.Infof("Attempting to force close ChannelPoint(%v)", chanPoint)
-
- // Before closing, we'll attempt to send a disable update for the
- // channel. We do so before closing the channel as otherwise the current
- // edge policy won't be retrievable from the graph.
- if err := c.cfg.DisableChannel(chanPoint); err != nil {
- log.Warnf("Unable to disable channel %v on "+
- "close: %v", chanPoint, err)
- }
-
- errChan := make(chan er.R, 1)
- respChan := make(chan *wire.MsgTx, 1)
-
- // With the channel found, and the request crafted, we'll send over a
- // force close request to the arbitrator that watches this channel.
- select {
- case arbitrator.forceCloseReqs <- &forceCloseReq{
- errResp: errChan,
- closeTx: respChan,
- }:
- case <-c.quit:
- return nil, ErrChainArbExiting.Default()
- }
-
- // We'll await two responses: the error response, and the transaction
- // that closed out the channel.
- select {
- case err := <-errChan:
- if err != nil {
- return nil, err
- }
- case <-c.quit:
- return nil, ErrChainArbExiting.Default()
- }
-
- var closeTx *wire.MsgTx
- select {
- case closeTx = <-respChan:
- case <-c.quit:
- return nil, ErrChainArbExiting.Default()
- }
-
- return closeTx, nil
-}
-
-// WatchNewChannel sends the ChainArbitrator a message to create a
-// ChannelArbitrator tasked with watching over a new channel. Once a new
-// channel has finished its final funding flow, it should be registered with
-// the ChainArbitrator so we can properly react to any on-chain events.
-func (c *ChainArbitrator) WatchNewChannel(newChan *channeldb.OpenChannel) er.R {
- c.Lock()
- defer c.Unlock()
-
- log.Infof("Creating new ChannelArbitrator for ChannelPoint(%v)",
- newChan.FundingOutpoint)
-
- // If we're already watching this channel, then we'll ignore this
- // request.
- chanPoint := newChan.FundingOutpoint
- if _, ok := c.activeChannels[chanPoint]; ok {
- return nil
- }
-
- // First, also create an active chainWatcher for this channel to ensure
- // that we detect any relevant on chain events.
- chainWatcher, err := newChainWatcher(
- chainWatcherConfig{
- chanState: newChan,
- notifier: c.cfg.Notifier,
- signer: c.cfg.Signer,
- isOurAddr: c.cfg.IsOurAddress,
- contractBreach: func(retInfo *lnwallet.BreachRetribution) er.R {
- return c.cfg.ContractBreach(chanPoint, retInfo)
- },
- extractStateNumHint: lnwallet.GetStateNumHint,
- },
- )
- if err != nil {
- return err
- }
-
- c.activeWatchers[newChan.FundingOutpoint] = chainWatcher
-
- // We'll also create a new channel arbitrator instance using this new
- // channel, and our internal state.
- channelArb, err := newActiveChannelArbitrator(
- newChan, c, chainWatcher.SubscribeChannelEvents(),
- )
- if err != nil {
- return err
- }
-
- // With the arbitrator created, we'll add it to our set of active
- // arbitrators, then launch it.
- c.activeChannels[chanPoint] = channelArb
-
- if err := channelArb.Start(nil); err != nil {
- return err
- }
-
- return chainWatcher.Start()
-}
-
-// SubscribeChannelEvents returns a new active subscription for the set of
-// possible on-chain events for a particular channel. The struct can be used by
-// callers to be notified whenever an event that changes the state of the
-// channel on-chain occurs.
-func (c *ChainArbitrator) SubscribeChannelEvents(
- chanPoint wire.OutPoint) (*ChainEventSubscription, er.R) {
-
- // First, we'll attempt to look up the active watcher for this channel.
- // If we can't find it, then we'll return an error back to the caller.
- watcher, ok := c.activeWatchers[chanPoint]
- if !ok {
- return nil, er.Errorf("unable to find watcher for: %v",
- chanPoint)
- }
-
- // With the watcher located, we'll request for it to create a new chain
- // event subscription client.
- return watcher.SubscribeChannelEvents(), nil
-}
-
-// TODO(roasbeef): arbitration reports
-// * types: contested, waiting for success conf, etc
diff --git a/lnd/contractcourt/chain_arbitrator_test.go b/lnd/contractcourt/chain_arbitrator_test.go
deleted file mode 100644
index 67cceec5..00000000
--- a/lnd/contractcourt/chain_arbitrator_test.go
+++ /dev/null
@@ -1,247 +0,0 @@
-package contractcourt
-
-import (
- "io/ioutil"
- "net"
- "os"
- "testing"
-
- "github.com/pkt-cash/pktd/btcutil/er"
- "github.com/pkt-cash/pktd/chaincfg/chainhash"
- "github.com/pkt-cash/pktd/lnd/chainntnfs"
- "github.com/pkt-cash/pktd/lnd/channeldb"
- "github.com/pkt-cash/pktd/lnd/clock"
- "github.com/pkt-cash/pktd/lnd/lntest/mock"
- "github.com/pkt-cash/pktd/lnd/lnwallet"
- "github.com/pkt-cash/pktd/wire"
-)
-
-// TestChainArbitratorRepulishCloses tests that the chain arbitrator will
-// republish closing transactions for channels marked CommitementBroadcast or
-// CoopBroadcast in the database at startup.
-func TestChainArbitratorRepublishCloses(t *testing.T) {
- t.Parallel()
-
- tempPath, errr := ioutil.TempDir("", "testdb")
- if errr != nil {
- t.Fatal(errr)
- }
- defer os.RemoveAll(tempPath)
-
- db, err := channeldb.Open(tempPath)
- if err != nil {
- t.Fatal(err)
- }
- defer db.Close()
-
- // Create 10 test channels and sync them to the database.
- const numChans = 10
- var channels []*channeldb.OpenChannel
- for i := 0; i < numChans; i++ {
- lChannel, _, cleanup, err := lnwallet.CreateTestChannels(
- channeldb.SingleFunderTweaklessBit,
- )
- if err != nil {
- t.Fatal(err)
- }
- defer cleanup()
-
- channel := lChannel.State()
-
- // We manually set the db here to make sure all channels are
- // synced to the same db.
- channel.Db = db
-
- addr := &net.TCPAddr{
- IP: net.ParseIP("127.0.0.1"),
- Port: 18556,
- }
- if err := channel.SyncPending(addr, 101); err != nil {
- t.Fatal(err)
- }
-
- channels = append(channels, channel)
- }
-
- // Mark half of the channels as commitment broadcasted.
- for i := 0; i < numChans/2; i++ {
- closeTx := channels[i].FundingTxn.Copy()
- closeTx.TxIn[0].PreviousOutPoint = channels[i].FundingOutpoint
- err := channels[i].MarkCommitmentBroadcasted(closeTx, true)
- if err != nil {
- t.Fatal(err)
- }
-
- err = channels[i].MarkCoopBroadcasted(closeTx, true)
- if err != nil {
- t.Fatal(err)
- }
- }
-
- // We keep track of the transactions published by the ChainArbitrator
- // at startup.
- published := make(map[chainhash.Hash]int)
-
- chainArbCfg := ChainArbitratorConfig{
- ChainIO: &mock.ChainIO{},
- Notifier: &mock.ChainNotifier{
- SpendChan: make(chan *chainntnfs.SpendDetail),
- EpochChan: make(chan *chainntnfs.BlockEpoch),
- ConfChan: make(chan *chainntnfs.TxConfirmation),
- },
- PublishTx: func(tx *wire.MsgTx, _ string) er.R {
- published[tx.TxHash()]++
- return nil
- },
- Clock: clock.NewDefaultClock(),
- }
- chainArb := NewChainArbitrator(
- chainArbCfg, db,
- )
-
- if err := chainArb.Start(); err != nil {
- t.Fatal(err)
- }
- defer func() {
- if err := chainArb.Stop(); err != nil {
- t.Fatal(err)
- }
- }()
-
- // Half of the channels should have had their closing tx re-published.
- if len(published) != numChans/2 {
- t.Fatalf("expected %d re-published transactions, got %d",
- numChans/2, len(published))
- }
-
- // And make sure the published transactions are correct, and unique.
- for i := 0; i < numChans/2; i++ {
- closeTx := channels[i].FundingTxn.Copy()
- closeTx.TxIn[0].PreviousOutPoint = channels[i].FundingOutpoint
-
- count, ok := published[closeTx.TxHash()]
- if !ok {
- t.Fatalf("closing tx not re-published")
- }
-
- // We expect one coop close and one force close.
- if count != 2 {
- t.Fatalf("expected 2 closing txns, only got %d", count)
- }
-
- delete(published, closeTx.TxHash())
- }
-
- if len(published) != 0 {
- t.Fatalf("unexpected tx published")
- }
-}
-
-// TestResolveContract tests that if we have an active channel being watched by
-// the chain arb, then a call to ResolveContract will mark the channel as fully
-// closed in the database, and also clean up all arbitrator state.
-func TestResolveContract(t *testing.T) {
- t.Parallel()
-
- // To start with, we'll create a new temp DB for the duration of this
- // test.
- tempPath, errr := ioutil.TempDir("", "testdb")
- if errr != nil {
- t.Fatalf("unable to make temp dir: %v", errr)
- }
- defer os.RemoveAll(tempPath)
- db, err := channeldb.Open(tempPath)
- if err != nil {
- t.Fatalf("unable to open db: %v", err)
- }
- defer db.Close()
-
- // With the DB created, we'll make a new channel, and mark it as
- // pending open within the database.
- newChannel, _, cleanup, err := lnwallet.CreateTestChannels(
- channeldb.SingleFunderTweaklessBit,
- )
- if err != nil {
- t.Fatalf("unable to make new test channel: %v", err)
- }
- defer cleanup()
- channel := newChannel.State()
- channel.Db = db
- addr := &net.TCPAddr{
- IP: net.ParseIP("127.0.0.1"),
- Port: 18556,
- }
- if err := channel.SyncPending(addr, 101); err != nil {
- t.Fatalf("unable to write channel to db: %v", err)
- }
-
- // With the channel inserted into the database, we'll now create a new
- // chain arbitrator that should pick up these new channels and launch
- // resolver for them.
- chainArbCfg := ChainArbitratorConfig{
- ChainIO: &mock.ChainIO{},
- Notifier: &mock.ChainNotifier{
- SpendChan: make(chan *chainntnfs.SpendDetail),
- EpochChan: make(chan *chainntnfs.BlockEpoch),
- ConfChan: make(chan *chainntnfs.TxConfirmation),
- },
- PublishTx: func(tx *wire.MsgTx, _ string) er.R {
- return nil
- },
- Clock: clock.NewDefaultClock(),
- }
- chainArb := NewChainArbitrator(
- chainArbCfg, db,
- )
- if err := chainArb.Start(); err != nil {
- t.Fatal(err)
- }
- defer func() {
- if err := chainArb.Stop(); err != nil {
- t.Fatal(err)
- }
- }()
-
- channelArb := chainArb.activeChannels[channel.FundingOutpoint]
-
- // While the resolver are active, we'll now remove the channel from the
- // database (mark is as closed).
- err = db.AbandonChannel(&channel.FundingOutpoint, 4)
- if err != nil {
- t.Fatalf("unable to remove channel: %v", err)
- }
-
- // With the channel removed, we'll now manually call ResolveContract.
- // This stimulates needing to remove a channel from the chain arb due
- // to any possible external consistency issues.
- err = chainArb.ResolveContract(channel.FundingOutpoint)
- if err != nil {
- t.Fatalf("unable to resolve contract: %v", err)
- }
-
- // The shouldn't be an active chain watcher or channel arb for this
- // channel.
- if len(chainArb.activeChannels) != 0 {
- t.Fatalf("expected zero active channels, instead have %v",
- len(chainArb.activeChannels))
- }
- if len(chainArb.activeWatchers) != 0 {
- t.Fatalf("expected zero active watchers, instead have %v",
- len(chainArb.activeWatchers))
- }
-
- // At this point, the channel's arbitrator log should also be empty as
- // well.
- _, err = channelArb.log.FetchContractResolutions()
- if !errScopeBucketNoExist.Is(err) {
- t.Fatalf("channel arb log state should have been "+
- "removed: %v", err)
- }
-
- // If we attempt to call this method again, then we should get a nil
- // error, as there is no more state to be cleaned up.
- err = chainArb.ResolveContract(channel.FundingOutpoint)
- if err != nil {
- t.Fatalf("second resolve call shouldn't fail: %v", err)
- }
-}
diff --git a/lnd/contractcourt/chain_watcher.go b/lnd/contractcourt/chain_watcher.go
deleted file mode 100644
index 88f33b2f..00000000
--- a/lnd/contractcourt/chain_watcher.go
+++ /dev/null
@@ -1,1104 +0,0 @@
-package contractcourt
-
-import (
- "bytes"
- "sync"
- "sync/atomic"
- "time"
-
- "github.com/davecgh/go-spew/spew"
- "github.com/pkt-cash/pktd/btcec"
- "github.com/pkt-cash/pktd/btcutil"
- "github.com/pkt-cash/pktd/btcutil/er"
- "github.com/pkt-cash/pktd/chaincfg"
- "github.com/pkt-cash/pktd/lnd/chainntnfs"
- "github.com/pkt-cash/pktd/lnd/channeldb"
- "github.com/pkt-cash/pktd/lnd/input"
- "github.com/pkt-cash/pktd/lnd/lnwallet"
- "github.com/pkt-cash/pktd/lnd/shachain"
- "github.com/pkt-cash/pktd/pktlog/log"
- "github.com/pkt-cash/pktd/txscript"
- "github.com/pkt-cash/pktd/wire"
- "github.com/pkt-cash/pktd/wire/constants"
-)
-
-const (
- // minCommitPointPollTimeout is the minimum time we'll wait before
- // polling the database for a channel's commitpoint.
- minCommitPointPollTimeout = 1 * time.Second
-
- // maxCommitPointPollTimeout is the maximum time we'll wait before
- // polling the database for a channel's commitpoint.
- maxCommitPointPollTimeout = 10 * time.Minute
-)
-
-// LocalUnilateralCloseInfo encapsulates all the information we need to act on
-// a local force close that gets confirmed.
-type LocalUnilateralCloseInfo struct {
- *chainntnfs.SpendDetail
- *lnwallet.LocalForceCloseSummary
- *channeldb.ChannelCloseSummary
-
- // CommitSet is the set of known valid commitments at the time the
- // remote party's commitment hit the chain.
- CommitSet CommitSet
-}
-
-// CooperativeCloseInfo encapsulates all the information we need to act on a
-// cooperative close that gets confirmed.
-type CooperativeCloseInfo struct {
- *channeldb.ChannelCloseSummary
-}
-
-// RemoteUnilateralCloseInfo wraps the normal UnilateralCloseSummary to couple
-// the CommitSet at the time of channel closure.
-type RemoteUnilateralCloseInfo struct {
- *lnwallet.UnilateralCloseSummary
-
- // CommitSet is the set of known valid commitments at the time the
- // remote party's commitment hit the chain.
- CommitSet CommitSet
-}
-
-// CommitSet is a collection of the set of known valid commitments at a given
-// instant. If ConfCommitKey is set, then the commitment identified by the
-// HtlcSetKey has hit the chain. This struct will be used to examine all live
-// HTLCs to determine if any additional actions need to be made based on the
-// remote party's commitments.
-type CommitSet struct {
- // ConfCommitKey if non-nil, identifies the commitment that was
- // confirmed in the chain.
- ConfCommitKey *HtlcSetKey
-
- // HtlcSets stores the set of all known active HTLC for each active
- // commitment at the time of channel closure.
- HtlcSets map[HtlcSetKey][]channeldb.HTLC
-}
-
-// IsEmpty returns true if there are no HTLCs at all within all commitments
-// that are a part of this commitment diff.
-func (c *CommitSet) IsEmpty() bool {
- if c == nil {
- return true
- }
-
- for _, htlcs := range c.HtlcSets {
- if len(htlcs) != 0 {
- return false
- }
- }
-
- return true
-}
-
-// toActiveHTLCSets returns the set of all active HTLCs across all commitment
-// transactions.
-func (c *CommitSet) toActiveHTLCSets() map[HtlcSetKey]htlcSet {
- htlcSets := make(map[HtlcSetKey]htlcSet)
-
- for htlcSetKey, htlcs := range c.HtlcSets {
- htlcSets[htlcSetKey] = newHtlcSet(htlcs)
- }
-
- return htlcSets
-}
-
-// ChainEventSubscription is a struct that houses a subscription to be notified
-// for any on-chain events related to a channel. There are three types of
-// possible on-chain events: a cooperative channel closure, a unilateral
-// channel closure, and a channel breach. The fourth type: a force close is
-// locally initiated, so we don't provide any event stream for said event.
-type ChainEventSubscription struct {
- // ChanPoint is that channel that chain events will be dispatched for.
- ChanPoint wire.OutPoint
-
- // RemoteUnilateralClosure is a channel that will be sent upon in the
- // event that the remote party's commitment transaction is confirmed.
- RemoteUnilateralClosure chan *RemoteUnilateralCloseInfo
-
- // LocalUnilateralClosure is a channel that will be sent upon in the
- // event that our commitment transaction is confirmed.
- LocalUnilateralClosure chan *LocalUnilateralCloseInfo
-
- // CooperativeClosure is a signal that will be sent upon once a
- // cooperative channel closure has been detected confirmed.
- CooperativeClosure chan *CooperativeCloseInfo
-
- // ContractBreach is a channel that will be sent upon if we detect a
- // contract breach. The struct sent across the channel contains all the
- // material required to bring the cheating channel peer to justice.
- ContractBreach chan *lnwallet.BreachRetribution
-
- // Cancel cancels the subscription to the event stream for a particular
- // channel. This method should be called once the caller no longer needs to
- // be notified of any on-chain events for a particular channel.
- Cancel func()
-}
-
-// chainWatcherConfig encapsulates all the necessary functions and interfaces
-// needed to watch and act on on-chain events for a particular channel.
-type chainWatcherConfig struct {
- // chanState is a snapshot of the persistent state of the channel that
- // we're watching. In the event of an on-chain event, we'll query the
- // database to ensure that we act using the most up to date state.
- chanState *channeldb.OpenChannel
-
- // notifier is a reference to the channel notifier that we'll use to be
- // notified of output spends and when transactions are confirmed.
- notifier chainntnfs.ChainNotifier
-
- // signer is the main signer instances that will be responsible for
- // signing any HTLC and commitment transaction generated by the state
- // machine.
- signer input.Signer
-
- // contractBreach is a method that will be called by the watcher if it
- // detects that a contract breach transaction has been confirmed. Only
- // when this method returns with a non-nil error it will be safe to mark
- // the channel as pending close in the database.
- contractBreach func(*lnwallet.BreachRetribution) er.R
-
- // isOurAddr is a function that returns true if the passed address is
- // known to us.
- isOurAddr func(btcutil.Address) bool
-
- // extractStateNumHint extracts the encoded state hint using the passed
- // obfuscater. This is used by the chain watcher to identify which
- // state was broadcast and confirmed on-chain.
- extractStateNumHint func(*wire.MsgTx, [lnwallet.StateHintSize]byte) uint64
-}
-
-// chainWatcher is a system that's assigned to every active channel. The duty
-// of this system is to watch the chain for spends of the channels chan point.
-// If a spend is detected then with chain watcher will notify all subscribers
-// that the channel has been closed, and also give them the materials necessary
-// to sweep the funds of the channel on chain eventually.
-type chainWatcher struct {
- started int32 // To be used atomically.
- stopped int32 // To be used atomically.
-
- quit chan struct{}
- wg sync.WaitGroup
-
- cfg chainWatcherConfig
-
- // stateHintObfuscator is a 48-bit state hint that's used to obfuscate
- // the current state number on the commitment transactions.
- stateHintObfuscator [lnwallet.StateHintSize]byte
-
- // All the fields below are protected by this mutex.
- sync.Mutex
-
- // clientID is an ephemeral counter used to keep track of each
- // individual client subscription.
- clientID uint64
-
- // clientSubscriptions is a map that keeps track of all the active
- // client subscriptions for events related to this channel.
- clientSubscriptions map[uint64]*ChainEventSubscription
-}
-
-// newChainWatcher returns a new instance of a chainWatcher for a channel given
-// the chan point to watch, and also a notifier instance that will allow us to
-// detect on chain events.
-func newChainWatcher(cfg chainWatcherConfig) (*chainWatcher, er.R) {
- // In order to be able to detect the nature of a potential channel
- // closure we'll need to reconstruct the state hint bytes used to
- // obfuscate the commitment state number encoded in the lock time and
- // sequence fields.
- var stateHint [lnwallet.StateHintSize]byte
- chanState := cfg.chanState
- if chanState.IsInitiator {
- stateHint = lnwallet.DeriveStateHintObfuscator(
- chanState.LocalChanCfg.PaymentBasePoint.PubKey,
- chanState.RemoteChanCfg.PaymentBasePoint.PubKey,
- )
- } else {
- stateHint = lnwallet.DeriveStateHintObfuscator(
- chanState.RemoteChanCfg.PaymentBasePoint.PubKey,
- chanState.LocalChanCfg.PaymentBasePoint.PubKey,
- )
- }
-
- return &chainWatcher{
- cfg: cfg,
- stateHintObfuscator: stateHint,
- quit: make(chan struct{}),
- clientSubscriptions: make(map[uint64]*ChainEventSubscription),
- }, nil
-}
-
-// Start starts all goroutines that the chainWatcher needs to perform its
-// duties.
-func (c *chainWatcher) Start() er.R {
- if !atomic.CompareAndSwapInt32(&c.started, 0, 1) {
- return nil
- }
-
- chanState := c.cfg.chanState
- log.Debugf("Starting chain watcher for ChannelPoint(%v)",
- chanState.FundingOutpoint)
-
- // First, we'll register for a notification to be dispatched if the
- // funding output is spent.
- fundingOut := &chanState.FundingOutpoint
-
- // As a height hint, we'll try to use the opening height, but if the
- // channel isn't yet open, then we'll use the height it was broadcast
- // at.
- heightHint := c.cfg.chanState.ShortChanID().BlockHeight
- if heightHint == 0 {
- heightHint = chanState.FundingBroadcastHeight
- }
-
- localKey := chanState.LocalChanCfg.MultiSigKey.PubKey.SerializeCompressed()
- remoteKey := chanState.RemoteChanCfg.MultiSigKey.PubKey.SerializeCompressed()
- multiSigScript, err := input.GenMultiSigScript(
- localKey, remoteKey,
- )
- if err != nil {
- return err
- }
- pkScript, err := input.WitnessScriptHash(multiSigScript)
- if err != nil {
- return err
- }
-
- spendNtfn, err := c.cfg.notifier.RegisterSpendNtfn(
- fundingOut, pkScript, heightHint,
- )
- if err != nil {
- return err
- }
-
- // With the spend notification obtained, we'll now dispatch the
- // closeObserver which will properly react to any changes.
- c.wg.Add(1)
- go c.closeObserver(spendNtfn)
-
- return nil
-}
-
-// Stop signals the close observer to gracefully exit.
-func (c *chainWatcher) Stop() er.R {
- if !atomic.CompareAndSwapInt32(&c.stopped, 0, 1) {
- return nil
- }
-
- close(c.quit)
-
- c.wg.Wait()
-
- return nil
-}
-
-// SubscribeChannelEvents returns an active subscription to the set of channel
-// events for the channel watched by this chain watcher. Once clients no longer
-// require the subscription, they should call the Cancel() method to allow the
-// watcher to regain those committed resources.
-func (c *chainWatcher) SubscribeChannelEvents() *ChainEventSubscription {
-
- c.Lock()
- clientID := c.clientID
- c.clientID++
- c.Unlock()
-
- log.Debugf("New ChainEventSubscription(id=%v) for ChannelPoint(%v)",
- clientID, c.cfg.chanState.FundingOutpoint)
-
- sub := &ChainEventSubscription{
- ChanPoint: c.cfg.chanState.FundingOutpoint,
- RemoteUnilateralClosure: make(chan *RemoteUnilateralCloseInfo, 1),
- LocalUnilateralClosure: make(chan *LocalUnilateralCloseInfo, 1),
- CooperativeClosure: make(chan *CooperativeCloseInfo, 1),
- ContractBreach: make(chan *lnwallet.BreachRetribution, 1),
- Cancel: func() {
- c.Lock()
- delete(c.clientSubscriptions, clientID)
- c.Unlock()
- },
- }
-
- c.Lock()
- c.clientSubscriptions[clientID] = sub
- c.Unlock()
-
- return sub
-}
-
-// isOurCommitment returns true if the passed commitSpend is a spend of the
-// funding transaction using our commitment transaction (a local force close).
-// In order to do this in a state agnostic manner, we'll make our decisions
-// based off of only the set of outputs included.
-func isOurCommitment(localChanCfg, remoteChanCfg channeldb.ChannelConfig,
- commitSpend *chainntnfs.SpendDetail, broadcastStateNum uint64,
- revocationProducer shachain.Producer,
- chanType channeldb.ChannelType) (bool, er.R) {
-
- // First, we'll re-derive our commitment point for this state since
- // this is what we use to randomize each of the keys for this state.
- commitSecret, err := revocationProducer.AtIndex(broadcastStateNum)
- if err != nil {
- return false, err
- }
- commitPoint := input.ComputeCommitmentPoint(commitSecret[:])
-
- // Now that we have the commit point, we'll derive the tweaked local
- // and remote keys for this state. We use our point as only we can
- // revoke our own commitment.
- commitKeyRing := lnwallet.DeriveCommitmentKeys(
- commitPoint, true, chanType, &localChanCfg, &remoteChanCfg,
- )
-
- // With the keys derived, we'll construct the remote script that'll be
- // present if they have a non-dust balance on the commitment.
- remoteScript, _, err := lnwallet.CommitScriptToRemote(
- chanType, commitKeyRing.ToRemoteKey,
- )
- if err != nil {
- return false, err
- }
-
- // Next, we'll derive our script that includes the revocation base for
- // the remote party allowing them to claim this output before the CSV
- // delay if we breach.
- localScript, err := input.CommitScriptToSelf(
- uint32(localChanCfg.CsvDelay), commitKeyRing.ToLocalKey,
- commitKeyRing.RevocationKey,
- )
- if err != nil {
- return false, err
- }
- localPkScript, err := input.WitnessScriptHash(localScript)
- if err != nil {
- return false, err
- }
-
- // With all our scripts assembled, we'll examine the outputs of the
- // commitment transaction to determine if this is a local force close
- // or not.
- for _, output := range commitSpend.SpendingTx.TxOut {
- pkScript := output.PkScript
-
- switch {
- case bytes.Equal(localPkScript, pkScript):
- return true, nil
-
- case bytes.Equal(remoteScript.PkScript, pkScript):
- return true, nil
- }
- }
-
- // If neither of these scripts are present, then it isn't a local force
- // close.
- return false, nil
-}
-
-// chainSet includes all the information we need to dispatch a channel close
-// event to any subscribers.
-type chainSet struct {
- // remoteStateNum is the commitment number of the lowest valid
- // commitment the remote party holds from our PoV. This value is used
- // to determine if the remote party is playing a state that's behind,
- // in line, or ahead of the latest state we know for it.
- remoteStateNum uint64
-
- // commitSet includes information pertaining to the set of active HTLCs
- // on each commitment.
- commitSet CommitSet
-
- // remoteCommit is the current commitment of the remote party.
- remoteCommit channeldb.ChannelCommitment
-
- // localCommit is our current commitment.
- localCommit channeldb.ChannelCommitment
-
- // remotePendingCommit points to the dangling commitment of the remote
- // party, if it exists. If there's no dangling commitment, then this
- // pointer will be nil.
- remotePendingCommit *channeldb.ChannelCommitment
-}
-
-// newChainSet creates a new chainSet given the current up to date channel
-// state.
-func newChainSet(chanState *channeldb.OpenChannel) (*chainSet, er.R) {
- // First, we'll grab the current unrevoked commitments for ourselves
- // and the remote party.
- localCommit, remoteCommit, err := chanState.LatestCommitments()
- if err != nil {
- return nil, er.Errorf("unable to fetch channel state for "+
- "chan_point=%v", chanState.FundingOutpoint)
- }
-
- log.Debugf("ChannelPoint(%v): local_commit_type=%v, local_commit=%v",
- chanState.FundingOutpoint, chanState.ChanType,
- spew.Sdump(localCommit))
- log.Debugf("ChannelPoint(%v): remote_commit_type=%v, remote_commit=%v",
- chanState.FundingOutpoint, chanState.ChanType,
- spew.Sdump(remoteCommit))
-
- // Fetch the current known commit height for the remote party, and
- // their pending commitment chain tip if it exists.
- remoteStateNum := remoteCommit.CommitHeight
- remoteChainTip, err := chanState.RemoteCommitChainTip()
- if err != nil && !channeldb.ErrNoPendingCommit.Is(err) {
- return nil, er.Errorf("unable to obtain chain tip for "+
- "ChannelPoint(%v): %v",
- chanState.FundingOutpoint, err)
- }
-
- // Now that we have all the possible valid commitments, we'll make the
- // CommitSet the ChannelArbitrator will need in order to carry out its
- // duty.
- commitSet := CommitSet{
- HtlcSets: map[HtlcSetKey][]channeldb.HTLC{
- LocalHtlcSet: localCommit.Htlcs,
- RemoteHtlcSet: remoteCommit.Htlcs,
- },
- }
-
- var remotePendingCommit *channeldb.ChannelCommitment
- if remoteChainTip != nil {
- remotePendingCommit = &remoteChainTip.Commitment
- log.Debugf("ChannelPoint(%v): remote_pending_commit_type=%v, "+
- "remote_pending_commit=%v", chanState.FundingOutpoint,
- chanState.ChanType,
- spew.Sdump(remoteChainTip.Commitment))
-
- htlcs := remoteChainTip.Commitment.Htlcs
- commitSet.HtlcSets[RemotePendingHtlcSet] = htlcs
- }
-
- // We'll now retrieve the latest state of the revocation store so we
- // can populate the revocation information within the channel state
- // object that we have.
- //
- // TODO(roasbeef): mutation is bad mkay
- _, err = chanState.RemoteRevocationStore()
- if err != nil {
- return nil, er.Errorf("unable to fetch revocation state for "+
- "chan_point=%v", chanState.FundingOutpoint)
- }
-
- return &chainSet{
- remoteStateNum: remoteStateNum,
- commitSet: commitSet,
- localCommit: *localCommit,
- remoteCommit: *remoteCommit,
- remotePendingCommit: remotePendingCommit,
- }, nil
-}
-
-// closeObserver is a dedicated goroutine that will watch for any closes of the
-// channel that it's watching on chain. In the event of an on-chain event, the
-// close observer will assembled the proper materials required to claim the
-// funds of the channel on-chain (if required), then dispatch these as
-// notifications to all subscribers.
-func (c *chainWatcher) closeObserver(spendNtfn *chainntnfs.SpendEvent) {
- defer c.wg.Done()
-
- log.Infof("Close observer for ChannelPoint(%v) active",
- c.cfg.chanState.FundingOutpoint)
-
- select {
- // We've detected a spend of the channel onchain! Depending on the type
- // of spend, we'll act accordingly, so we'll examine the spending
- // transaction to determine what we should do.
- //
- // TODO(Roasbeef): need to be able to ensure this only triggers
- // on confirmation, to ensure if multiple txns are broadcast, we
- // act on the one that's timestamped
- case commitSpend, ok := <-spendNtfn.Spend:
- // If the channel was closed, then this means that the notifier
- // exited, so we will as well.
- if !ok {
- return
- }
-
- // Otherwise, the remote party might have broadcast a prior
- // revoked state...!!!
- commitTxBroadcast := commitSpend.SpendingTx
-
- // First, we'll construct the chainset which includes all the
- // data we need to dispatch an event to our subscribers about
- // this possible channel close event.
- chainSet, err := newChainSet(c.cfg.chanState)
- if err != nil {
- log.Errorf("unable to create commit set: %v", err)
- return
- }
-
- // Decode the state hint encoded within the commitment
- // transaction to determine if this is a revoked state or not.
- obfuscator := c.stateHintObfuscator
- broadcastStateNum := c.cfg.extractStateNumHint(
- commitTxBroadcast, obfuscator,
- )
-
- // Based on the output scripts within this commitment, we'll
- // determine if this is our commitment transaction or not (a
- // self force close).
- isOurCommit, err := isOurCommitment(
- c.cfg.chanState.LocalChanCfg,
- c.cfg.chanState.RemoteChanCfg, commitSpend,
- broadcastStateNum, c.cfg.chanState.RevocationProducer,
- c.cfg.chanState.ChanType,
- )
- if err != nil {
- log.Errorf("unable to determine self commit for "+
- "chan_point=%v: %v",
- c.cfg.chanState.FundingOutpoint, err)
- return
- }
-
- // If this is our commitment transaction, then we can exit here
- // as we don't have any further processing we need to do (we
- // can't cheat ourselves :p).
- if isOurCommit {
- chainSet.commitSet.ConfCommitKey = &LocalHtlcSet
-
- if err := c.dispatchLocalForceClose(
- commitSpend, chainSet.localCommit,
- chainSet.commitSet,
- ); err != nil {
- log.Errorf("unable to handle local"+
- "close for chan_point=%v: %v",
- c.cfg.chanState.FundingOutpoint, err)
- }
- return
- }
-
- // Next, we'll check to see if this is a cooperative channel
- // closure or not. This is characterized by having an input
- // sequence number that's finalized. This won't happen with
- // regular commitment transactions due to the state hint
- // encoding scheme.
- if commitTxBroadcast.TxIn[0].Sequence == constants.MaxTxInSequenceNum {
- // TODO(roasbeef): rare but possible, need itest case
- // for
- err := c.dispatchCooperativeClose(commitSpend)
- if err != nil {
- log.Errorf("unable to handle co op close: %v", err)
- }
- return
- }
-
- log.Warnf("Unprompted commitment broadcast for "+
- "ChannelPoint(%v) ", c.cfg.chanState.FundingOutpoint)
-
- // If this channel has been recovered, then we'll modify our
- // behavior as it isn't possible for us to close out the
- // channel off-chain ourselves. It can only be the remote party
- // force closing, or a cooperative closure we signed off on
- // before losing data getting confirmed in the chain.
- isRecoveredChan := c.cfg.chanState.HasChanStatus(
- channeldb.ChanStatusRestored,
- )
-
- switch {
- // If state number spending transaction matches the current
- // latest state, then they've initiated a unilateral close. So
- // we'll trigger the unilateral close signal so subscribers can
- // clean up the state as necessary.
- case broadcastStateNum == chainSet.remoteStateNum &&
- !isRecoveredChan:
-
- log.Infof("Remote party broadcast base set, "+
- "commit_num=%v", chainSet.remoteStateNum)
-
- chainSet.commitSet.ConfCommitKey = &RemoteHtlcSet
- err := c.dispatchRemoteForceClose(
- commitSpend, chainSet.remoteCommit,
- chainSet.commitSet,
- c.cfg.chanState.RemoteCurrentRevocation,
- )
- if err != nil {
- log.Errorf("unable to handle remote "+
- "close for chan_point=%v: %v",
- c.cfg.chanState.FundingOutpoint, err)
- }
-
- // We'll also handle the case of the remote party broadcasting
- // their commitment transaction which is one height above ours.
- // This case can arise when we initiate a state transition, but
- // the remote party has a fail crash _after_ accepting the new
- // state, but _before_ sending their signature to us.
- case broadcastStateNum == chainSet.remoteStateNum+1 &&
- chainSet.remotePendingCommit != nil && !isRecoveredChan:
-
- log.Infof("Remote party broadcast pending set, "+
- "commit_num=%v", chainSet.remoteStateNum+1)
-
- chainSet.commitSet.ConfCommitKey = &RemotePendingHtlcSet
- err := c.dispatchRemoteForceClose(
- commitSpend, *chainSet.remotePendingCommit,
- chainSet.commitSet,
- c.cfg.chanState.RemoteNextRevocation,
- )
- if err != nil {
- log.Errorf("unable to handle remote "+
- "close for chan_point=%v: %v",
- c.cfg.chanState.FundingOutpoint, err)
- }
-
- // If the remote party has broadcasted a state beyond our best
- // known state for them, and they don't have a pending
- // commitment (we write them to disk before sending out), then
- // this means that we've lost data. In this case, we'll enter
- // the DLP protocol. Otherwise, if we've recovered our channel
- // state from scratch, then we don't know what the precise
- // current state is, so we assume either the remote party
- // forced closed or we've been breached. In the latter case,
- // our tower will take care of us.
- case broadcastStateNum > chainSet.remoteStateNum || isRecoveredChan:
- log.Warnf("Remote node broadcast state #%v, "+
- "which is more than 1 beyond best known "+
- "state #%v!!! Attempting recovery...",
- broadcastStateNum, chainSet.remoteStateNum)
-
- // If this isn't a tweakless commitment, then we'll
- // need to wait for the remote party's latest unrevoked
- // commitment point to be presented to us as we need
- // this to sweep. Otherwise, we can dispatch the remote
- // close and sweep immediately using a fake commitPoint
- // as it isn't actually needed for recovery anymore.
- commitPoint := c.cfg.chanState.RemoteCurrentRevocation
- tweaklessCommit := c.cfg.chanState.ChanType.IsTweakless()
- if !tweaklessCommit {
- commitPoint = c.waitForCommitmentPoint()
- if commitPoint == nil {
- return
- }
-
- log.Infof("Recovered commit point(%x) for "+
- "channel(%v)! Now attempting to use it to "+
- "sweep our funds...",
- commitPoint.SerializeCompressed(),
- c.cfg.chanState.FundingOutpoint)
-
- } else {
- log.Infof("ChannelPoint(%v) is tweakless, "+
- "moving to sweep directly on chain",
- c.cfg.chanState.FundingOutpoint)
- }
-
- // Since we don't have the commitment stored for this
- // state, we'll just pass an empty commitment within
- // the commitment set. Note that this means we won't be
- // able to recover any HTLC funds.
- //
- // TODO(halseth): can we try to recover some HTLCs?
- chainSet.commitSet.ConfCommitKey = &RemoteHtlcSet
- err = c.dispatchRemoteForceClose(
- commitSpend, channeldb.ChannelCommitment{},
- chainSet.commitSet, commitPoint,
- )
- if err != nil {
- log.Errorf("unable to handle remote "+
- "close for chan_point=%v: %v",
- c.cfg.chanState.FundingOutpoint, err)
- }
-
- // If the state number broadcast is lower than the remote
- // node's current un-revoked height, then THEY'RE ATTEMPTING TO
- // VIOLATE THE CONTRACT LAID OUT WITHIN THE PAYMENT CHANNEL.
- // Therefore we close the signal indicating a revoked broadcast
- // to allow subscribers to swiftly dispatch justice!!!
- case broadcastStateNum < chainSet.remoteStateNum:
- err := c.dispatchContractBreach(
- commitSpend, &chainSet.remoteCommit,
- broadcastStateNum,
- )
- if err != nil {
- log.Errorf("unable to handle channel "+
- "breach for chan_point=%v: %v",
- c.cfg.chanState.FundingOutpoint, err)
- }
- }
-
- // Now that a spend has been detected, we've done our job, so
- // we'll exit immediately.
- return
-
- // The chainWatcher has been signalled to exit, so we'll do so now.
- case <-c.quit:
- return
- }
-}
-
-// toSelfAmount takes a transaction and returns the sum of all outputs that pay
-// to a script that the wallet controls. If no outputs pay to us, then we
-// return zero. This is possible as our output may have been trimmed due to
-// being dust.
-func (c *chainWatcher) toSelfAmount(tx *wire.MsgTx) btcutil.Amount {
- var selfAmt btcutil.Amount
- for _, txOut := range tx.TxOut {
- _, addrs, _, err := txscript.ExtractPkScriptAddrs(
- // Doesn't matter what net we actually pass in.
- txOut.PkScript, &chaincfg.TestNet3Params,
- )
- if err != nil {
- continue
- }
-
- for _, addr := range addrs {
- if c.cfg.isOurAddr(addr) {
- selfAmt += btcutil.Amount(txOut.Value)
- }
- }
- }
-
- return selfAmt
-}
-
-// dispatchCooperativeClose processed a detect cooperative channel closure.
-// We'll use the spending transaction to locate our output within the
-// transaction, then clean up the database state. We'll also dispatch a
-// notification to all subscribers that the channel has been closed in this
-// manner.
-func (c *chainWatcher) dispatchCooperativeClose(commitSpend *chainntnfs.SpendDetail) er.R {
- broadcastTx := commitSpend.SpendingTx
-
- log.Infof("Cooperative closure for ChannelPoint(%v): %v",
- c.cfg.chanState.FundingOutpoint, spew.Sdump(broadcastTx))
-
- // If the input *is* final, then we'll check to see which output is
- // ours.
- localAmt := c.toSelfAmount(broadcastTx)
-
- // Once this is known, we'll mark the state as fully closed in the
- // database. We can do this as a cooperatively closed channel has all
- // its outputs resolved after only one confirmation.
- closeSummary := &channeldb.ChannelCloseSummary{
- ChanPoint: c.cfg.chanState.FundingOutpoint,
- ChainHash: c.cfg.chanState.ChainHash,
- ClosingTXID: *commitSpend.SpenderTxHash,
- RemotePub: c.cfg.chanState.IdentityPub,
- Capacity: c.cfg.chanState.Capacity,
- CloseHeight: uint32(commitSpend.SpendingHeight),
- SettledBalance: localAmt,
- CloseType: channeldb.CooperativeClose,
- ShortChanID: c.cfg.chanState.ShortChanID(),
- IsPending: true,
- RemoteCurrentRevocation: c.cfg.chanState.RemoteCurrentRevocation,
- RemoteNextRevocation: c.cfg.chanState.RemoteNextRevocation,
- LocalChanConfig: c.cfg.chanState.LocalChanCfg,
- }
-
- // Attempt to add a channel sync message to the close summary.
- chanSync, err := c.cfg.chanState.ChanSyncMsg()
- if err != nil {
- log.Errorf("ChannelPoint(%v): unable to create channel sync "+
- "message: %v", c.cfg.chanState.FundingOutpoint, err)
- } else {
- closeSummary.LastChanSyncMsg = chanSync
- }
-
- // Create a summary of all the information needed to handle the
- // cooperative closure.
- closeInfo := &CooperativeCloseInfo{
- ChannelCloseSummary: closeSummary,
- }
-
- // With the event processed, we'll now notify all subscribers of the
- // event.
- c.Lock()
- for _, sub := range c.clientSubscriptions {
- select {
- case sub.CooperativeClosure <- closeInfo:
- case <-c.quit:
- c.Unlock()
- return er.Errorf("exiting")
- }
- }
- c.Unlock()
-
- return nil
-}
-
-// dispatchLocalForceClose processes a unilateral close by us being confirmed.
-func (c *chainWatcher) dispatchLocalForceClose(
- commitSpend *chainntnfs.SpendDetail,
- localCommit channeldb.ChannelCommitment, commitSet CommitSet) er.R {
-
- log.Infof("Local unilateral close of ChannelPoint(%v) "+
- "detected", c.cfg.chanState.FundingOutpoint)
-
- forceClose, err := lnwallet.NewLocalForceCloseSummary(
- c.cfg.chanState, c.cfg.signer,
- commitSpend.SpendingTx, localCommit,
- )
- if err != nil {
- return err
- }
-
- // As we've detected that the channel has been closed, immediately
- // creating a close summary for future usage by related sub-systems.
- chanSnapshot := forceClose.ChanSnapshot
- closeSummary := &channeldb.ChannelCloseSummary{
- ChanPoint: chanSnapshot.ChannelPoint,
- ChainHash: chanSnapshot.ChainHash,
- ClosingTXID: forceClose.CloseTx.TxHash(),
- RemotePub: &chanSnapshot.RemoteIdentity,
- Capacity: chanSnapshot.Capacity,
- CloseType: channeldb.LocalForceClose,
- IsPending: true,
- ShortChanID: c.cfg.chanState.ShortChanID(),
- CloseHeight: uint32(commitSpend.SpendingHeight),
- RemoteCurrentRevocation: c.cfg.chanState.RemoteCurrentRevocation,
- RemoteNextRevocation: c.cfg.chanState.RemoteNextRevocation,
- LocalChanConfig: c.cfg.chanState.LocalChanCfg,
- }
-
- // If our commitment output isn't dust or we have active HTLC's on the
- // commitment transaction, then we'll populate the balances on the
- // close channel summary.
- if forceClose.CommitResolution != nil {
- closeSummary.SettledBalance = chanSnapshot.LocalBalance.ToSatoshis()
- closeSummary.TimeLockedBalance = chanSnapshot.LocalBalance.ToSatoshis()
- }
- for _, htlc := range forceClose.HtlcResolutions.OutgoingHTLCs {
- htlcValue := btcutil.Amount(htlc.SweepSignDesc.Output.Value)
- closeSummary.TimeLockedBalance += htlcValue
- }
-
- // Attempt to add a channel sync message to the close summary.
- chanSync, err := c.cfg.chanState.ChanSyncMsg()
- if err != nil {
- log.Errorf("ChannelPoint(%v): unable to create channel sync "+
- "message: %v", c.cfg.chanState.FundingOutpoint, err)
- } else {
- closeSummary.LastChanSyncMsg = chanSync
- }
-
- // With the event processed, we'll now notify all subscribers of the
- // event.
- closeInfo := &LocalUnilateralCloseInfo{
- SpendDetail: commitSpend,
- LocalForceCloseSummary: forceClose,
- ChannelCloseSummary: closeSummary,
- CommitSet: commitSet,
- }
- c.Lock()
- for _, sub := range c.clientSubscriptions {
- select {
- case sub.LocalUnilateralClosure <- closeInfo:
- case <-c.quit:
- c.Unlock()
- return er.Errorf("exiting")
- }
- }
- c.Unlock()
-
- return nil
-}
-
-// dispatchRemoteForceClose processes a detected unilateral channel closure by
-// the remote party. This function will prepare a UnilateralCloseSummary which
-// will then be sent to any subscribers allowing them to resolve all our funds
-// in the channel on chain. Once this close summary is prepared, all registered
-// subscribers will receive a notification of this event. The commitPoint
-// argument should be set to the per_commitment_point corresponding to the
-// spending commitment.
-//
-// NOTE: The remoteCommit argument should be set to the stored commitment for
-// this particular state. If we don't have the commitment stored (should only
-// happen in case we have lost state) it should be set to an empty struct, in
-// which case we will attempt to sweep the non-HTLC output using the passed
-// commitPoint.
-func (c *chainWatcher) dispatchRemoteForceClose(
- commitSpend *chainntnfs.SpendDetail,
- remoteCommit channeldb.ChannelCommitment,
- commitSet CommitSet, commitPoint *btcec.PublicKey) er.R {
-
- log.Infof("Unilateral close of ChannelPoint(%v) "+
- "detected", c.cfg.chanState.FundingOutpoint)
-
- // First, we'll create a closure summary that contains all the
- // materials required to let each subscriber sweep the funds in the
- // channel on-chain.
- uniClose, err := lnwallet.NewUnilateralCloseSummary(
- c.cfg.chanState, c.cfg.signer, commitSpend,
- remoteCommit, commitPoint,
- )
- if err != nil {
- return err
- }
-
- // With the event processed, we'll now notify all subscribers of the
- // event.
- c.Lock()
- for _, sub := range c.clientSubscriptions {
- select {
- case sub.RemoteUnilateralClosure <- &RemoteUnilateralCloseInfo{
- UnilateralCloseSummary: uniClose,
- CommitSet: commitSet,
- }:
- case <-c.quit:
- c.Unlock()
- return er.Errorf("exiting")
- }
- }
- c.Unlock()
-
- return nil
-}
-
-// dispatchContractBreach processes a detected contract breached by the remote
-// party. This method is to be called once we detect that the remote party has
-// broadcast a prior revoked commitment state. This method well prepare all the
-// materials required to bring the cheater to justice, then notify all
-// registered subscribers of this event.
-func (c *chainWatcher) dispatchContractBreach(spendEvent *chainntnfs.SpendDetail,
- remoteCommit *channeldb.ChannelCommitment,
- broadcastStateNum uint64) er.R {
-
- log.Warnf("Remote peer has breached the channel contract for "+
- "ChannelPoint(%v). Revoked state #%v was broadcast!!!",
- c.cfg.chanState.FundingOutpoint, broadcastStateNum)
-
- if err := c.cfg.chanState.MarkBorked(); err != nil {
- return er.Errorf("unable to mark channel as borked: %v", err)
- }
-
- spendHeight := uint32(spendEvent.SpendingHeight)
-
- // Create a new reach retribution struct which contains all the data
- // needed to swiftly bring the cheating peer to justice.
- //
- // TODO(roasbeef): move to same package
- retribution, err := lnwallet.NewBreachRetribution(
- c.cfg.chanState, broadcastStateNum, spendHeight,
- )
- if err != nil {
- return er.Errorf("unable to create breach retribution: %v", err)
- }
-
- // Nil the curve before printing.
- if retribution.RemoteOutputSignDesc != nil &&
- retribution.RemoteOutputSignDesc.DoubleTweak != nil {
- retribution.RemoteOutputSignDesc.DoubleTweak.Curve = nil
- }
- if retribution.RemoteOutputSignDesc != nil &&
- retribution.RemoteOutputSignDesc.KeyDesc.PubKey != nil {
- retribution.RemoteOutputSignDesc.KeyDesc.PubKey.Curve = nil
- }
- if retribution.LocalOutputSignDesc != nil &&
- retribution.LocalOutputSignDesc.DoubleTweak != nil {
- retribution.LocalOutputSignDesc.DoubleTweak.Curve = nil
- }
- if retribution.LocalOutputSignDesc != nil &&
- retribution.LocalOutputSignDesc.KeyDesc.PubKey != nil {
- retribution.LocalOutputSignDesc.KeyDesc.PubKey.Curve = nil
- }
-
- log.Debugf("Punishment breach retribution created: %v",
- log.C(func() string {
- retribution.KeyRing.CommitPoint.Curve = nil
- retribution.KeyRing.LocalHtlcKey = nil
- retribution.KeyRing.RemoteHtlcKey = nil
- retribution.KeyRing.ToLocalKey = nil
- retribution.KeyRing.ToRemoteKey = nil
- retribution.KeyRing.RevocationKey = nil
- return spew.Sdump(retribution)
- }))
-
- // Hand the retribution info over to the breach arbiter.
- if err := c.cfg.contractBreach(retribution); err != nil {
- log.Errorf("unable to hand breached contract off to "+
- "breachArbiter: %v", err)
- return err
- }
-
- // With the event processed, we'll now notify all subscribers of the
- // event.
- c.Lock()
- for _, sub := range c.clientSubscriptions {
- select {
- case sub.ContractBreach <- retribution:
- case <-c.quit:
- c.Unlock()
- return er.Errorf("quitting")
- }
- }
- c.Unlock()
-
- // At this point, we've successfully received an ack for the breach
- // close. We now construct and persist the close summary, marking the
- // channel as pending force closed.
- //
- // TODO(roasbeef): instead mark we got all the monies?
- // TODO(halseth): move responsibility to breach arbiter?
- settledBalance := remoteCommit.LocalBalance.ToSatoshis()
- closeSummary := channeldb.ChannelCloseSummary{
- ChanPoint: c.cfg.chanState.FundingOutpoint,
- ChainHash: c.cfg.chanState.ChainHash,
- ClosingTXID: *spendEvent.SpenderTxHash,
- CloseHeight: spendHeight,
- RemotePub: c.cfg.chanState.IdentityPub,
- Capacity: c.cfg.chanState.Capacity,
- SettledBalance: settledBalance,
- CloseType: channeldb.BreachClose,
- IsPending: true,
- ShortChanID: c.cfg.chanState.ShortChanID(),
- RemoteCurrentRevocation: c.cfg.chanState.RemoteCurrentRevocation,
- RemoteNextRevocation: c.cfg.chanState.RemoteNextRevocation,
- LocalChanConfig: c.cfg.chanState.LocalChanCfg,
- }
-
- // Attempt to add a channel sync message to the close summary.
- chanSync, err := c.cfg.chanState.ChanSyncMsg()
- if err != nil {
- log.Errorf("ChannelPoint(%v): unable to create channel sync "+
- "message: %v", c.cfg.chanState.FundingOutpoint, err)
- } else {
- closeSummary.LastChanSyncMsg = chanSync
- }
-
- if err := c.cfg.chanState.CloseChannel(
- &closeSummary, channeldb.ChanStatusRemoteCloseInitiator,
- ); err != nil {
- return err
- }
-
- log.Infof("Breached channel=%v marked pending-closed",
- c.cfg.chanState.FundingOutpoint)
-
- return nil
-}
-
-// waitForCommitmentPoint waits for the commitment point to be inserted into
-// the local database. We'll use this method in the DLP case, to wait for the
-// remote party to send us their point, as we can't proceed until we have that.
-func (c *chainWatcher) waitForCommitmentPoint() *btcec.PublicKey {
- // If we are lucky, the remote peer sent us the correct commitment
- // point during channel sync, such that we can sweep our funds. If we
- // cannot find the commit point, there's not much we can do other than
- // wait for us to retrieve it. We will attempt to retrieve it from the
- // peer each time we connect to it.
- //
- // TODO(halseth): actively initiate re-connection to the peer?
- backoff := minCommitPointPollTimeout
- for {
- commitPoint, err := c.cfg.chanState.DataLossCommitPoint()
- if err == nil {
- return commitPoint
- }
-
- log.Errorf("Unable to retrieve commitment point for "+
- "channel(%v) with lost state: %v. Retrying in %v.",
- c.cfg.chanState.FundingOutpoint, err, backoff)
-
- select {
- // Wait before retrying, with an exponential backoff.
- case <-time.After(backoff):
- backoff = 2 * backoff
- if backoff > maxCommitPointPollTimeout {
- backoff = maxCommitPointPollTimeout
- }
-
- case <-c.quit:
- return nil
- }
- }
-}
diff --git a/lnd/contractcourt/chain_watcher_test.go b/lnd/contractcourt/chain_watcher_test.go
deleted file mode 100644
index fd305e75..00000000
--- a/lnd/contractcourt/chain_watcher_test.go
+++ /dev/null
@@ -1,551 +0,0 @@
-package contractcourt
-
-import (
- "bytes"
- "crypto/sha256"
- "fmt"
- "testing"
- "time"
-
- "github.com/pkt-cash/pktd/btcutil/er"
- "github.com/pkt-cash/pktd/lnd/chainntnfs"
- "github.com/pkt-cash/pktd/lnd/channeldb"
- "github.com/pkt-cash/pktd/lnd/input"
- "github.com/pkt-cash/pktd/lnd/lntest/mock"
- "github.com/pkt-cash/pktd/lnd/lnwallet"
- "github.com/pkt-cash/pktd/lnd/lnwire"
- "github.com/pkt-cash/pktd/wire"
-)
-
-// TestChainWatcherRemoteUnilateralClose tests that the chain watcher is able
-// to properly detect a normal unilateral close by the remote node using their
-// lowest commitment.
-func TestChainWatcherRemoteUnilateralClose(t *testing.T) {
- t.Parallel()
-
- // First, we'll create two channels which already have established a
- // commitment contract between themselves.
- aliceChannel, bobChannel, cleanUp, err := lnwallet.CreateTestChannels(
- channeldb.SingleFunderTweaklessBit,
- )
- if err != nil {
- t.Fatalf("unable to create test channels: %v", err)
- }
- defer cleanUp()
-
- // With the channels created, we'll now create a chain watcher instance
- // which will be watching for any closes of Alice's channel.
- aliceNotifier := &mock.ChainNotifier{
- SpendChan: make(chan *chainntnfs.SpendDetail),
- EpochChan: make(chan *chainntnfs.BlockEpoch),
- ConfChan: make(chan *chainntnfs.TxConfirmation),
- }
- aliceChainWatcher, err := newChainWatcher(chainWatcherConfig{
- chanState: aliceChannel.State(),
- notifier: aliceNotifier,
- signer: aliceChannel.Signer,
- extractStateNumHint: lnwallet.GetStateNumHint,
- })
- if err != nil {
- t.Fatalf("unable to create chain watcher: %v", err)
- }
- err = aliceChainWatcher.Start()
- if err != nil {
- t.Fatalf("unable to start chain watcher: %v", err)
- }
- defer aliceChainWatcher.Stop()
-
- // We'll request a new channel event subscription from Alice's chain
- // watcher.
- chanEvents := aliceChainWatcher.SubscribeChannelEvents()
-
- // If we simulate an immediate broadcast of the current commitment by
- // Bob, then the chain watcher should detect this case.
- bobCommit := bobChannel.State().LocalCommitment.CommitTx
- bobTxHash := bobCommit.TxHash()
- bobSpend := &chainntnfs.SpendDetail{
- SpenderTxHash: &bobTxHash,
- SpendingTx: bobCommit,
- }
- aliceNotifier.SpendChan <- bobSpend
-
- // We should get a new spend event over the remote unilateral close
- // event channel.
- var uniClose *RemoteUnilateralCloseInfo
- select {
- case uniClose = <-chanEvents.RemoteUnilateralClosure:
- case <-time.After(time.Second * 15):
- t.Fatalf("didn't receive unilateral close event")
- }
-
- // The unilateral close should have properly located Alice's output in
- // the commitment transaction.
- if uniClose.CommitResolution == nil {
- t.Fatalf("unable to find alice's commit resolution")
- }
-}
-
-func addFakeHTLC(t *testing.T, htlcAmount lnwire.MilliSatoshi, id uint64,
- aliceChannel, bobChannel *lnwallet.LightningChannel) {
-
- preimage := bytes.Repeat([]byte{byte(id)}, 32)
- paymentHash := sha256.Sum256(preimage)
- var returnPreimage [32]byte
- copy(returnPreimage[:], preimage)
- htlc := &lnwire.UpdateAddHTLC{
- ID: uint64(id),
- PaymentHash: paymentHash,
- Amount: htlcAmount,
- Expiry: uint32(5),
- }
-
- if _, err := aliceChannel.AddHTLC(htlc, nil); err != nil {
- t.Fatalf("alice unable to add htlc: %v", err)
- }
- if _, err := bobChannel.ReceiveHTLC(htlc); err != nil {
- t.Fatalf("bob unable to recv add htlc: %v", err)
- }
-}
-
-// TestChainWatcherRemoteUnilateralClosePendingCommit tests that the chain
-// watcher is able to properly detect a unilateral close wherein the remote
-// node broadcasts their newly received commitment, without first revoking the
-// old one.
-func TestChainWatcherRemoteUnilateralClosePendingCommit(t *testing.T) {
- t.Parallel()
-
- // First, we'll create two channels which already have established a
- // commitment contract between themselves.
- aliceChannel, bobChannel, cleanUp, err := lnwallet.CreateTestChannels(
- channeldb.SingleFunderTweaklessBit,
- )
- if err != nil {
- t.Fatalf("unable to create test channels: %v", err)
- }
- defer cleanUp()
-
- // With the channels created, we'll now create a chain watcher instance
- // which will be watching for any closes of Alice's channel.
- aliceNotifier := &mock.ChainNotifier{
- SpendChan: make(chan *chainntnfs.SpendDetail),
- EpochChan: make(chan *chainntnfs.BlockEpoch),
- ConfChan: make(chan *chainntnfs.TxConfirmation),
- }
- aliceChainWatcher, err := newChainWatcher(chainWatcherConfig{
- chanState: aliceChannel.State(),
- notifier: aliceNotifier,
- signer: aliceChannel.Signer,
- extractStateNumHint: lnwallet.GetStateNumHint,
- })
- if err != nil {
- t.Fatalf("unable to create chain watcher: %v", err)
- }
- if err := aliceChainWatcher.Start(); err != nil {
- t.Fatalf("unable to start chain watcher: %v", err)
- }
- defer aliceChainWatcher.Stop()
-
- // We'll request a new channel event subscription from Alice's chain
- // watcher.
- chanEvents := aliceChainWatcher.SubscribeChannelEvents()
-
- // Next, we'll create a fake HTLC just so we can advance Alice's
- // channel state to a new pending commitment on her remote commit chain
- // for Bob.
- htlcAmount := lnwire.NewMSatFromSatoshis(20000)
- addFakeHTLC(t, htlcAmount, 0, aliceChannel, bobChannel)
-
- // With the HTLC added, we'll now manually initiate a state transition
- // from Alice to Bob.
- _, _, _, err = aliceChannel.SignNextCommitment()
- if err != nil {
- t.Fatal(err)
- }
-
- // At this point, we'll now Bob broadcasting this new pending unrevoked
- // commitment.
- bobPendingCommit, err := aliceChannel.State().RemoteCommitChainTip()
- if err != nil {
- t.Fatal(err)
- }
-
- // We'll craft a fake spend notification with Bob's actual commitment.
- // The chain watcher should be able to detect that this is a pending
- // commit broadcast based on the state hints in the commitment.
- bobCommit := bobPendingCommit.Commitment.CommitTx
- bobTxHash := bobCommit.TxHash()
- bobSpend := &chainntnfs.SpendDetail{
- SpenderTxHash: &bobTxHash,
- SpendingTx: bobCommit,
- }
- aliceNotifier.SpendChan <- bobSpend
-
- // We should get a new spend event over the remote unilateral close
- // event channel.
- var uniClose *RemoteUnilateralCloseInfo
- select {
- case uniClose = <-chanEvents.RemoteUnilateralClosure:
- case <-time.After(time.Second * 15):
- t.Fatalf("didn't receive unilateral close event")
- }
-
- // The unilateral close should have properly located Alice's output in
- // the commitment transaction.
- if uniClose.CommitResolution == nil {
- t.Fatalf("unable to find alice's commit resolution")
- }
-}
-
-// dlpTestCase is a special struct that we'll use to generate randomized test
-// cases for the main TestChainWatcherDataLossProtect test. This struct has a
-// special Generate method that will generate a random state number, and a
-// broadcast state number which is greater than that state number.
-type dlpTestCase struct {
- BroadcastStateNum uint8
- NumUpdates uint8
-}
-
-func executeStateTransitions(t *testing.T, htlcAmount lnwire.MilliSatoshi,
- aliceChannel, bobChannel *lnwallet.LightningChannel,
- numUpdates uint8) er.R {
-
- for i := 0; i < int(numUpdates); i++ {
- addFakeHTLC(
- t, htlcAmount, uint64(i), aliceChannel, bobChannel,
- )
-
- err := lnwallet.ForceStateTransition(aliceChannel, bobChannel)
- if err != nil {
- return err
- }
- }
-
- return nil
-}
-
-// TestChainWatcherDataLossProtect tests that if we've lost data (and are
-// behind the remote node), then we'll properly detect this case and dispatch a
-// remote force close using the obtained data loss commitment point.
-func TestChainWatcherDataLossProtect(t *testing.T) {
- t.Parallel()
-
- // dlpScenario is our primary quick check testing function for this
- // test as whole. It ensures that if the remote party broadcasts a
- // commitment that is beyond our best known commitment for them, and
- // they don't have a pending commitment (one we sent but which hasn't
- // been revoked), then we'll properly detect this case, and execute the
- // DLP protocol on our end.
- //
- // broadcastStateNum is the number that we'll trick Alice into thinking
- // was broadcast, while numUpdates is the actual number of updates
- // we'll execute. Both of these will be random 8-bit values generated
- // by testing/quick.
- dlpScenario := func(t *testing.T, testCase dlpTestCase) bool {
- // First, we'll create two channels which already have
- // established a commitment contract between themselves.
- aliceChannel, bobChannel, cleanUp, err := lnwallet.CreateTestChannels(
- channeldb.SingleFunderBit,
- )
- if err != nil {
- t.Fatalf("unable to create test channels: %v", err)
- }
- defer cleanUp()
-
- // With the channels created, we'll now create a chain watcher
- // instance which will be watching for any closes of Alice's
- // channel.
- aliceNotifier := &mock.ChainNotifier{
- SpendChan: make(chan *chainntnfs.SpendDetail),
- EpochChan: make(chan *chainntnfs.BlockEpoch),
- ConfChan: make(chan *chainntnfs.TxConfirmation),
- }
- aliceChainWatcher, err := newChainWatcher(chainWatcherConfig{
- chanState: aliceChannel.State(),
- notifier: aliceNotifier,
- signer: aliceChannel.Signer,
- extractStateNumHint: func(*wire.MsgTx,
- [lnwallet.StateHintSize]byte) uint64 {
-
- // We'll return the "fake" broadcast commitment
- // number so we can simulate broadcast of an
- // arbitrary state.
- return uint64(testCase.BroadcastStateNum)
- },
- })
- if err != nil {
- t.Fatalf("unable to create chain watcher: %v", err)
- }
- if err := aliceChainWatcher.Start(); err != nil {
- t.Fatalf("unable to start chain watcher: %v", err)
- }
- defer aliceChainWatcher.Stop()
-
- // Based on the number of random updates for this state, make a
- // new HTLC to add to the commitment, and then lock in a state
- // transition.
- const htlcAmt = 1000
- err = executeStateTransitions(
- t, htlcAmt, aliceChannel, bobChannel, testCase.NumUpdates,
- )
- if err != nil {
- t.Errorf("unable to trigger state "+
- "transition: %v", err)
- return false
- }
-
- // We'll request a new channel event subscription from Alice's
- // chain watcher so we can be notified of our fake close below.
- chanEvents := aliceChainWatcher.SubscribeChannelEvents()
-
- // Otherwise, we'll feed in this new state number as a response
- // to the query, and insert the expected DLP commit point.
- dlpPoint := aliceChannel.State().RemoteCurrentRevocation
- err = aliceChannel.State().MarkDataLoss(dlpPoint)
- if err != nil {
- t.Errorf("unable to insert dlp point: %v", err)
- return false
- }
-
- // Now we'll trigger the channel close event to trigger the
- // scenario.
- bobCommit := bobChannel.State().LocalCommitment.CommitTx
- bobTxHash := bobCommit.TxHash()
- bobSpend := &chainntnfs.SpendDetail{
- SpenderTxHash: &bobTxHash,
- SpendingTx: bobCommit,
- }
- aliceNotifier.SpendChan <- bobSpend
-
- // We should get a new uni close resolution that indicates we
- // processed the DLP scenario.
- var uniClose *RemoteUnilateralCloseInfo
- select {
- case uniClose = <-chanEvents.RemoteUnilateralClosure:
- // If we processed this as a DLP case, then the remote
- // party's commitment should be blank, as we don't have
- // this up to date state.
- blankCommit := channeldb.ChannelCommitment{}
- if uniClose.RemoteCommit.FeePerKw != blankCommit.FeePerKw {
- t.Errorf("DLP path not executed")
- return false
- }
-
- // The resolution should have also read the DLP point
- // we stored above, and used that to derive their sweep
- // key for this output.
- sweepTweak := input.SingleTweakBytes(
- dlpPoint,
- aliceChannel.State().LocalChanCfg.PaymentBasePoint.PubKey,
- )
- commitResolution := uniClose.CommitResolution
- resolutionTweak := commitResolution.SelfOutputSignDesc.SingleTweak
- if !bytes.Equal(sweepTweak, resolutionTweak) {
- t.Errorf("sweep key mismatch: expected %x got %x",
- sweepTweak, resolutionTweak)
- return false
- }
-
- return true
-
- case <-time.After(time.Second * 5):
- t.Errorf("didn't receive unilateral close event")
- return false
- }
- }
-
- testCases := []dlpTestCase{
- // For our first scenario, we'll ensure that if we're on state 1,
- // and the remote party broadcasts state 2 and we don't have a
- // pending commit for them, then we'll properly detect this as a
- // DLP scenario.
- {
- BroadcastStateNum: 2,
- NumUpdates: 1,
- },
-
- // We've completed a single update, but the remote party broadcasts
- // a state that's 5 states byeond our best known state. We've lost
- // data, but only partially, so we should enter a DLP secnario.
- {
- BroadcastStateNum: 6,
- NumUpdates: 1,
- },
-
- // Similar to the case above, but we've done more than one
- // update.
- {
- BroadcastStateNum: 6,
- NumUpdates: 3,
- },
-
- // We've done zero updates, but our channel peer broadcasts a
- // state beyond our knowledge.
- {
- BroadcastStateNum: 10,
- NumUpdates: 0,
- },
- }
- for _, testCase := range testCases {
- testName := fmt.Sprintf("num_updates=%v,broadcast_state_num=%v",
- testCase.NumUpdates, testCase.BroadcastStateNum)
-
- testCase := testCase
- t.Run(testName, func(t *testing.T) {
- t.Parallel()
-
- if !dlpScenario(t, testCase) {
- t.Fatalf("test %v failed", testName)
- }
- })
- }
-}
-
-// TestChainWatcherLocalForceCloseDetect tests we're able to always detect our
-// commitment output based on only the outputs present on the transaction.
-func TestChainWatcherLocalForceCloseDetect(t *testing.T) {
- t.Parallel()
-
- // localForceCloseScenario is the primary test we'll use to execute our
- // table driven tests. We'll assert that for any number of state
- // updates, and if the commitment transaction has our output or not,
- // we're able to properly detect a local force close.
- localForceCloseScenario := func(t *testing.T, numUpdates uint8,
- remoteOutputOnly, localOutputOnly bool) bool {
-
- // First, we'll create two channels which already have
- // established a commitment contract between themselves.
- aliceChannel, bobChannel, cleanUp, err := lnwallet.CreateTestChannels(
- channeldb.SingleFunderBit,
- )
- if err != nil {
- t.Fatalf("unable to create test channels: %v", err)
- }
- defer cleanUp()
-
- // With the channels created, we'll now create a chain watcher
- // instance which will be watching for any closes of Alice's
- // channel.
- aliceNotifier := &mock.ChainNotifier{
- SpendChan: make(chan *chainntnfs.SpendDetail),
- EpochChan: make(chan *chainntnfs.BlockEpoch),
- ConfChan: make(chan *chainntnfs.TxConfirmation),
- }
- aliceChainWatcher, err := newChainWatcher(chainWatcherConfig{
- chanState: aliceChannel.State(),
- notifier: aliceNotifier,
- signer: aliceChannel.Signer,
- extractStateNumHint: lnwallet.GetStateNumHint,
- })
- if err != nil {
- t.Fatalf("unable to create chain watcher: %v", err)
- }
- if err := aliceChainWatcher.Start(); err != nil {
- t.Fatalf("unable to start chain watcher: %v", err)
- }
- defer aliceChainWatcher.Stop()
-
- // We'll execute a number of state transitions based on the
- // randomly selected number from testing/quick. We do this to
- // get more coverage of various state hint encodings beyond 0
- // and 1.
- const htlcAmt = 1000
- err = executeStateTransitions(
- t, htlcAmt, aliceChannel, bobChannel, numUpdates,
- )
- if err != nil {
- t.Errorf("unable to trigger state "+
- "transition: %v", err)
- return false
- }
-
- // We'll request a new channel event subscription from Alice's
- // chain watcher so we can be notified of our fake close below.
- chanEvents := aliceChainWatcher.SubscribeChannelEvents()
-
- // Next, we'll obtain Alice's commitment transaction and
- // trigger a force close. This should cause her to detect a
- // local force close, and dispatch a local close event.
- aliceCommit := aliceChannel.State().LocalCommitment.CommitTx
-
- // Since this is Alice's commitment, her output is always first
- // since she's the one creating the HTLCs (lower balance). In
- // order to simulate the commitment only having the remote
- // party's output, we'll remove Alice's output.
- if remoteOutputOnly {
- aliceCommit.TxOut = aliceCommit.TxOut[1:]
- }
- if localOutputOnly {
- aliceCommit.TxOut = aliceCommit.TxOut[:1]
- }
-
- aliceTxHash := aliceCommit.TxHash()
- aliceSpend := &chainntnfs.SpendDetail{
- SpenderTxHash: &aliceTxHash,
- SpendingTx: aliceCommit,
- }
- aliceNotifier.SpendChan <- aliceSpend
-
- // We should get a local force close event from Alice as she
- // should be able to detect the close based on the commitment
- // outputs.
- select {
- case <-chanEvents.LocalUnilateralClosure:
- return true
-
- case <-time.After(time.Second * 5):
- t.Errorf("didn't get local for close for state #%v",
- numUpdates)
- return false
- }
- }
-
- // For our test cases, we'll ensure that we test having a remote output
- // present and absent with non or some number of updates in the channel.
- testCases := []struct {
- numUpdates uint8
- remoteOutputOnly bool
- localOutputOnly bool
- }{
- {
- numUpdates: 0,
- remoteOutputOnly: true,
- },
- {
- numUpdates: 0,
- remoteOutputOnly: false,
- },
- {
- numUpdates: 0,
- localOutputOnly: true,
- },
- {
- numUpdates: 20,
- remoteOutputOnly: false,
- },
- {
- numUpdates: 20,
- remoteOutputOnly: true,
- },
- {
- numUpdates: 20,
- localOutputOnly: true,
- },
- }
- for _, testCase := range testCases {
- testName := fmt.Sprintf(
- "num_updates=%v,remote_output=%v,local_output=%v",
- testCase.numUpdates, testCase.remoteOutputOnly,
- testCase.localOutputOnly,
- )
-
- testCase := testCase
- t.Run(testName, func(t *testing.T) {
- t.Parallel()
-
- localForceCloseScenario(
- t, testCase.numUpdates, testCase.remoteOutputOnly,
- testCase.localOutputOnly,
- )
- })
- }
-}
diff --git a/lnd/contractcourt/channel_arbitrator.go b/lnd/contractcourt/channel_arbitrator.go
deleted file mode 100644
index e951c1a6..00000000
--- a/lnd/contractcourt/channel_arbitrator.go
+++ /dev/null
@@ -1,2471 +0,0 @@
-package contractcourt
-
-import (
- "bytes"
- "sync"
- "sync/atomic"
- "time"
-
- "github.com/davecgh/go-spew/spew"
- "github.com/pkt-cash/pktd/btcutil"
- "github.com/pkt-cash/pktd/btcutil/er"
- "github.com/pkt-cash/pktd/chaincfg/chainhash"
- "github.com/pkt-cash/pktd/lnd/channeldb"
- "github.com/pkt-cash/pktd/lnd/channeldb/kvdb"
- "github.com/pkt-cash/pktd/lnd/input"
- "github.com/pkt-cash/pktd/lnd/labels"
- "github.com/pkt-cash/pktd/lnd/lntypes"
- "github.com/pkt-cash/pktd/lnd/lnwallet"
- "github.com/pkt-cash/pktd/lnd/lnwire"
- "github.com/pkt-cash/pktd/lnd/sweep"
- "github.com/pkt-cash/pktd/pktlog/log"
- "github.com/pkt-cash/pktd/wire"
-)
-
-var (
- // errAlreadyForceClosed is an error returned when we attempt to force
- // close a channel that's already in the process of doing so.
- errAlreadyForceClosed = Err.CodeWithDetail("errAlreadyForceClosed",
- "channel is already in the process of being force closed")
-)
-
-const (
- // anchorSweepConfTarget is the conf target used when sweeping
- // commitment anchors.
- anchorSweepConfTarget = 6
-
- // arbitratorBlockBufferSize is the size of the buffer we give to each
- // channel arbitrator.
- arbitratorBlockBufferSize = 20
-)
-
-// WitnessSubscription represents an intent to be notified once new witnesses
-// are discovered by various active contract resolvers. A contract resolver may
-// use this to be notified of when it can satisfy an incoming contract after we
-// discover the witness for an outgoing contract.
-type WitnessSubscription struct {
- // WitnessUpdates is a channel that newly discovered witnesses will be
- // sent over.
- //
- // TODO(roasbeef): couple with WitnessType?
- WitnessUpdates <-chan lntypes.Preimage
-
- // CancelSubscription is a function closure that should be used by a
- // client to cancel the subscription once they are no longer interested
- // in receiving new updates.
- CancelSubscription func()
-}
-
-// WitnessBeacon is a global beacon of witnesses. Contract resolvers will use
-// this interface to lookup witnesses (preimages typically) of contracts
-// they're trying to resolve, add new preimages they resolve, and finally
-// receive new updates each new time a preimage is discovered.
-//
-// TODO(roasbeef): need to delete the pre-images once we've used them
-// and have been sufficiently confirmed?
-type WitnessBeacon interface {
- // SubscribeUpdates returns a channel that will be sent upon *each* time
- // a new preimage is discovered.
- SubscribeUpdates() *WitnessSubscription
-
- // LookupPreImage attempts to lookup a preimage in the global cache.
- // True is returned for the second argument if the preimage is found.
- LookupPreimage(payhash lntypes.Hash) (lntypes.Preimage, bool)
-
- // AddPreimages adds a batch of newly discovered preimages to the global
- // cache, and also signals any subscribers of the newly discovered
- // witness.
- AddPreimages(preimages ...lntypes.Preimage) er.R
-}
-
-// ArbChannel is an abstraction that allows the channel arbitrator to interact
-// with an open channel.
-type ArbChannel interface {
- // ForceCloseChan should force close the contract that this attendant
- // is watching over. We'll use this when we decide that we need to go
- // to chain. It should in addition tell the switch to remove the
- // corresponding link, such that we won't accept any new updates. The
- // returned summary contains all items needed to eventually resolve all
- // outputs on chain.
- ForceCloseChan() (*lnwallet.LocalForceCloseSummary, er.R)
-
- // NewAnchorResolutions returns the anchor resolutions for currently
- // valid commitment transactions.
- NewAnchorResolutions() ([]*lnwallet.AnchorResolution, er.R)
-}
-
-// ChannelArbitratorConfig contains all the functionality that the
-// ChannelArbitrator needs in order to properly arbitrate any contract dispute
-// on chain.
-type ChannelArbitratorConfig struct {
- // ChanPoint is the channel point that uniquely identifies this
- // channel.
- ChanPoint wire.OutPoint
-
- // Channel is the full channel data structure. For legacy channels, this
- // field may not always be set after a restart.
- Channel ArbChannel
-
- // ShortChanID describes the exact location of the channel within the
- // chain. We'll use this to address any messages that we need to send
- // to the switch during contract resolution.
- ShortChanID lnwire.ShortChannelID
-
- // ChainEvents is an active subscription to the chain watcher for this
- // channel to be notified of any on-chain activity related to this
- // channel.
- ChainEvents *ChainEventSubscription
-
- // MarkCommitmentBroadcasted should mark the channel as the commitment
- // being broadcast, and we are waiting for the commitment to confirm.
- MarkCommitmentBroadcasted func(*wire.MsgTx, bool) er.R
-
- // MarkChannelClosed marks the channel closed in the database, with the
- // passed close summary. After this method successfully returns we can
- // no longer expect to receive chain events for this channel, and must
- // be able to recover from a failure without getting the close event
- // again. It takes an optional channel status which will update the
- // channel status in the record that we keep of historical channels.
- MarkChannelClosed func(*channeldb.ChannelCloseSummary,
- ...channeldb.ChannelStatus) er.R
-
- // IsPendingClose is a boolean indicating whether the channel is marked
- // as pending close in the database.
- IsPendingClose bool
-
- // ClosingHeight is the height at which the channel was closed. Note
- // that this value is only valid if IsPendingClose is true.
- ClosingHeight uint32
-
- // CloseType is the type of the close event in case IsPendingClose is
- // true. Otherwise this value is unset.
- CloseType channeldb.ClosureType
-
- // MarkChannelResolved is a function closure that serves to mark a
- // channel as "fully resolved". A channel itself can be considered
- // fully resolved once all active contracts have individually been
- // fully resolved.
- //
- // TODO(roasbeef): need RPC's to combine for pendingchannels RPC
- MarkChannelResolved func() er.R
-
- // PutResolverReport records a resolver report for the channel. If the
- // transaction provided is nil, the function should write the report
- // in a new transaction.
- PutResolverReport func(tx kvdb.RwTx,
- report *channeldb.ResolverReport) er.R
-
- ChainArbitratorConfig
-}
-
-// ReportOutputType describes the type of output that is being reported
-// on.
-type ReportOutputType uint8
-
-const (
- // ReportOutputIncomingHtlc is an incoming hash time locked contract on
- // the commitment tx.
- ReportOutputIncomingHtlc ReportOutputType = iota
-
- // ReportOutputOutgoingHtlc is an outgoing hash time locked contract on
- // the commitment tx.
- ReportOutputOutgoingHtlc
-
- // ReportOutputUnencumbered is an uncontested output on the commitment
- // transaction paying to us directly.
- ReportOutputUnencumbered
-
- // ReportOutputAnchor is an anchor output on the commitment tx.
- ReportOutputAnchor
-)
-
-// ContractReport provides a summary of a commitment tx output.
-type ContractReport struct {
- // Outpoint is the final output that will be swept back to the wallet.
- Outpoint wire.OutPoint
-
- // Type indicates the type of the reported output.
- Type ReportOutputType
-
- // Amount is the final value that will be swept in back to the wallet.
- Amount btcutil.Amount
-
- // MaturityHeight is the absolute block height that this output will
- // mature at.
- MaturityHeight uint32
-
- // Stage indicates whether the htlc is in the CLTV-timeout stage (1) or
- // the CSV-delay stage (2). A stage 1 htlc's maturity height will be set
- // to its expiry height, while a stage 2 htlc's maturity height will be
- // set to its confirmation height plus the maturity requirement.
- Stage uint32
-
- // LimboBalance is the total number of frozen coins within this
- // contract.
- LimboBalance btcutil.Amount
-
- // RecoveredBalance is the total value that has been successfully swept
- // back to the user's wallet.
- RecoveredBalance btcutil.Amount
-}
-
-// resolverReport creates a resolve report using some of the information in the
-// contract report.
-func (c *ContractReport) resolverReport(spendTx *chainhash.Hash,
- resolverType channeldb.ResolverType,
- outcome channeldb.ResolverOutcome) *channeldb.ResolverReport {
-
- return &channeldb.ResolverReport{
- OutPoint: c.Outpoint,
- Amount: c.Amount,
- ResolverType: resolverType,
- ResolverOutcome: outcome,
- SpendTxID: spendTx,
- }
-}
-
-// htlcSet represents the set of active HTLCs on a given commitment
-// transaction.
-type htlcSet struct {
- // incomingHTLCs is a map of all incoming HTLCs on the target
- // commitment transaction. We may potentially go onchain to claim the
- // funds sent to us within this set.
- incomingHTLCs map[uint64]channeldb.HTLC
-
- // outgoingHTLCs is a map of all outgoing HTLCs on the target
- // commitment transaction. We may potentially go onchain to reclaim the
- // funds that are currently in limbo.
- outgoingHTLCs map[uint64]channeldb.HTLC
-}
-
-// newHtlcSet constructs a new HTLC set from a slice of HTLC's.
-func newHtlcSet(htlcs []channeldb.HTLC) htlcSet {
- outHTLCs := make(map[uint64]channeldb.HTLC)
- inHTLCs := make(map[uint64]channeldb.HTLC)
- for _, htlc := range htlcs {
- if htlc.Incoming {
- inHTLCs[htlc.HtlcIndex] = htlc
- continue
- }
-
- outHTLCs[htlc.HtlcIndex] = htlc
- }
-
- return htlcSet{
- incomingHTLCs: inHTLCs,
- outgoingHTLCs: outHTLCs,
- }
-}
-
-// HtlcSetKey is a two-tuple that uniquely identifies a set of HTLCs on a
-// commitment transaction.
-type HtlcSetKey struct {
- // IsRemote denotes if the HTLCs are on the remote commitment
- // transaction.
- IsRemote bool
-
- // IsPending denotes if the commitment transaction that HTLCS are on
- // are pending (the higher of two unrevoked commitments).
- IsPending bool
-}
-
-var (
- // LocalHtlcSet is the HtlcSetKey used for local commitments.
- LocalHtlcSet = HtlcSetKey{IsRemote: false, IsPending: false}
-
- // RemoteHtlcSet is the HtlcSetKey used for remote commitments.
- RemoteHtlcSet = HtlcSetKey{IsRemote: true, IsPending: false}
-
- // RemotePendingHtlcSet is the HtlcSetKey used for dangling remote
- // commitment transactions.
- RemotePendingHtlcSet = HtlcSetKey{IsRemote: true, IsPending: true}
-)
-
-// String returns a human readable string describing the target HtlcSetKey.
-func (h HtlcSetKey) String() string {
- switch h {
- case LocalHtlcSet:
- return "LocalHtlcSet"
- case RemoteHtlcSet:
- return "RemoteHtlcSet"
- case RemotePendingHtlcSet:
- return "RemotePendingHtlcSet"
- default:
- return "unknown HtlcSetKey"
- }
-}
-
-// ChannelArbitrator is the on-chain arbitrator for a particular channel. The
-// struct will keep in sync with the current set of HTLCs on the commitment
-// transaction. The job of the attendant is to go on-chain to either settle or
-// cancel an HTLC as necessary iff: an HTLC times out, or we known the
-// pre-image to an HTLC, but it wasn't settled by the link off-chain. The
-// ChannelArbitrator will factor in an expected confirmation delta when
-// broadcasting to ensure that we avoid any possibility of race conditions, and
-// sweep the output(s) without contest.
-type ChannelArbitrator struct {
- started int32 // To be used atomically.
- stopped int32 // To be used atomically.
-
- // startTimestamp is the time when this ChannelArbitrator was started.
- startTimestamp time.Time
-
- // log is a persistent log that the attendant will use to checkpoint
- // its next action, and the state of any unresolved contracts.
- log ArbitratorLog
-
- // activeHTLCs is the set of active incoming/outgoing HTLC's on all
- // currently valid commitment transactions.
- activeHTLCs map[HtlcSetKey]htlcSet
-
- // cfg contains all the functionality that the ChannelArbitrator requires
- // to do its duty.
- cfg ChannelArbitratorConfig
-
- // blocks is a channel that the arbitrator will receive new blocks on.
- // This channel should be buffered by so that it does not block the
- // sender.
- blocks chan int32
-
- // signalUpdates is a channel that any new live signals for the channel
- // we're watching over will be sent.
- signalUpdates chan *signalUpdateMsg
-
- // htlcUpdates is a channel that is sent upon with new updates from the
- // active channel. Each time a new commitment state is accepted, the
- // set of HTLC's on the new state should be sent across this channel.
- htlcUpdates <-chan *ContractUpdate
-
- // activeResolvers is a slice of any active resolvers. This is used to
- // be able to signal them for shutdown in the case that we shutdown.
- activeResolvers []ContractResolver
-
- // activeResolversLock prevents simultaneous read and write to the
- // resolvers slice.
- activeResolversLock sync.RWMutex
-
- // resolutionSignal is a channel that will be sent upon by contract
- // resolvers once their contract has been fully resolved. With each
- // send, we'll check to see if the contract is fully resolved.
- resolutionSignal chan struct{}
-
- // forceCloseReqs is a channel that requests to forcibly close the
- // contract will be sent over.
- forceCloseReqs chan *forceCloseReq
-
- // state is the current state of the arbitrator. This state is examined
- // upon start up to decide which actions to take.
- state ArbitratorState
-
- wg sync.WaitGroup
- quit chan struct{}
-}
-
-// NewChannelArbitrator returns a new instance of a ChannelArbitrator backed by
-// the passed config struct.
-func NewChannelArbitrator(cfg ChannelArbitratorConfig,
- htlcSets map[HtlcSetKey]htlcSet, log ArbitratorLog) *ChannelArbitrator {
-
- return &ChannelArbitrator{
- log: log,
- blocks: make(chan int32, arbitratorBlockBufferSize),
- signalUpdates: make(chan *signalUpdateMsg),
- htlcUpdates: make(<-chan *ContractUpdate),
- resolutionSignal: make(chan struct{}),
- forceCloseReqs: make(chan *forceCloseReq),
- activeHTLCs: htlcSets,
- cfg: cfg,
- quit: make(chan struct{}),
- }
-}
-
-// chanArbStartState contains the information from disk that we need to start
-// up a channel arbitrator.
-type chanArbStartState struct {
- currentState ArbitratorState
- commitSet *CommitSet
-}
-
-// getStartState retrieves the information from disk that our channel arbitrator
-// requires to start.
-func (c *ChannelArbitrator) getStartState(tx kvdb.RTx) (*chanArbStartState,
- er.R) {
-
- // First, we'll read our last state from disk, so our internal state
- // machine can act accordingly.
- state, err := c.log.CurrentState(tx)
- if err != nil {
- return nil, err
- }
-
- // Next we'll fetch our confirmed commitment set. This will only exist
- // if the channel has been closed out on chain for modern nodes. For
- // older nodes, this won't be found at all, and will rely on the
- // existing written chain actions. Additionally, if this channel hasn't
- // logged any actions in the log, then this field won't be present.
- commitSet, err := c.log.FetchConfirmedCommitSet(tx)
- if err != nil && !errNoCommitSet.Is(err) && !errScopeBucketNoExist.Is(err) {
- return nil, err
- }
-
- return &chanArbStartState{
- currentState: state,
- commitSet: commitSet,
- }, nil
-}
-
-// Start starts all the goroutines that the ChannelArbitrator needs to operate.
-// If takes a start state, which will be looked up on disk if it is not
-// provided.
-func (c *ChannelArbitrator) Start(state *chanArbStartState) er.R {
- if !atomic.CompareAndSwapInt32(&c.started, 0, 1) {
- return nil
- }
- c.startTimestamp = c.cfg.Clock.Now()
-
- // If the state passed in is nil, we look it up now.
- if state == nil {
- var err er.R
- state, err = c.getStartState(nil)
- if err != nil {
- return err
- }
- }
-
- log.Debugf("Starting ChannelArbitrator(%v), htlc_set=%v",
- c.cfg.ChanPoint, log.C(func() string {
- return spew.Sdump(c.activeHTLCs)
- }),
- )
-
- // Set our state from our starting state.
- c.state = state.currentState
-
- _, bestHeight, err := c.cfg.ChainIO.GetBestBlock()
- if err != nil {
- return err
- }
-
- // If the channel has been marked pending close in the database, and we
- // haven't transitioned the state machine to StateContractClosed (or a
- // succeeding state), then a state transition most likely failed. We'll
- // try to recover from this by manually advancing the state by setting
- // the corresponding close trigger.
- trigger := chainTrigger
- triggerHeight := uint32(bestHeight)
- if c.cfg.IsPendingClose {
- switch c.state {
- case StateDefault:
- fallthrough
- case StateBroadcastCommit:
- fallthrough
- case StateCommitmentBroadcasted:
- switch c.cfg.CloseType {
-
- case channeldb.CooperativeClose:
- trigger = coopCloseTrigger
-
- case channeldb.BreachClose:
- trigger = breachCloseTrigger
-
- case channeldb.LocalForceClose:
- trigger = localCloseTrigger
-
- case channeldb.RemoteForceClose:
- trigger = remoteCloseTrigger
- }
-
- log.Warnf("ChannelArbitrator(%v): detected stalled "+
- "state=%v for closed channel",
- c.cfg.ChanPoint, c.state)
- }
-
- triggerHeight = c.cfg.ClosingHeight
- }
-
- log.Infof("ChannelArbitrator(%v): starting state=%v, trigger=%v, "+
- "triggerHeight=%v", c.cfg.ChanPoint, c.state, trigger,
- triggerHeight)
-
- // We'll now attempt to advance our state forward based on the current
- // on-chain state, and our set of active contracts.
- startingState := c.state
- nextState, _, err := c.advanceState(
- triggerHeight, trigger, state.commitSet,
- )
- if err != nil {
- switch {
-
- // If we detect that we tried to fetch resolutions, but failed,
- // this channel was marked closed in the database before
- // resolutions successfully written. In this case there is not
- // much we can do, so we don't return the error.
- case errScopeBucketNoExist.Is(err):
- fallthrough
- case errNoResolutions.Is(err):
- log.Warnf("ChannelArbitrator(%v): detected closed"+
- "channel with no contract resolutions written.",
- c.cfg.ChanPoint)
-
- default:
- return err
- }
- }
-
- // If we start and ended at the awaiting full resolution state, then
- // we'll relaunch our set of unresolved contracts.
- if startingState == StateWaitingFullResolution &&
- nextState == StateWaitingFullResolution {
-
- // In order to relaunch the resolvers, we'll need to fetch the
- // set of HTLCs that were present in the commitment transaction
- // at the time it was confirmed. commitSet.ConfCommitKey can't
- // be nil at this point since we're in
- // StateWaitingFullResolution. We can only be in
- // StateWaitingFullResolution after we've transitioned from
- // StateContractClosed which can only be triggered by the local
- // or remote close trigger. This trigger is only fired when we
- // receive a chain event from the chain watcher than the
- // commitment has been confirmed on chain, and before we
- // advance our state step, we call InsertConfirmedCommitSet.
- err := c.relaunchResolvers(state.commitSet, triggerHeight)
- if err != nil {
- return err
- }
- }
-
- c.wg.Add(1)
- go c.channelAttendant(bestHeight)
- return nil
-}
-
-// relauchResolvers relaunches the set of resolvers for unresolved contracts in
-// order to provide them with information that's not immediately available upon
-// starting the ChannelArbitrator. This information should ideally be stored in
-// the database, so this only serves as a intermediate work-around to prevent a
-// migration.
-func (c *ChannelArbitrator) relaunchResolvers(commitSet *CommitSet,
- heightHint uint32) er.R {
-
- // We'll now query our log to see if there are any active unresolved
- // contracts. If this is the case, then we'll relaunch all contract
- // resolvers.
- unresolvedContracts, err := c.log.FetchUnresolvedContracts()
- if err != nil {
- return err
- }
-
- // Retrieve the commitment tx hash from the log.
- contractResolutions, err := c.log.FetchContractResolutions()
- if err != nil {
- log.Errorf("unable to fetch contract resolutions: %v",
- err)
- return err
- }
- commitHash := contractResolutions.CommitHash
-
- // In prior versions of lnd, the information needed to supplement the
- // resolvers (in most cases, the full amount of the HTLC) was found in
- // the chain action map, which is now deprecated. As a result, if the
- // commitSet is nil (an older node with unresolved HTLCs at time of
- // upgrade), then we'll use the chain action information in place. The
- // chain actions may exclude some information, but we cannot recover it
- // for these older nodes at the moment.
- var confirmedHTLCs []channeldb.HTLC
- if commitSet != nil {
- confirmedHTLCs = commitSet.HtlcSets[*commitSet.ConfCommitKey]
- } else {
- chainActions, err := c.log.FetchChainActions()
- if err != nil {
- log.Errorf("unable to fetch chain actions: %v", err)
- return err
- }
- for _, htlcs := range chainActions {
- confirmedHTLCs = append(confirmedHTLCs, htlcs...)
- }
- }
-
- // Reconstruct the htlc outpoints and data from the chain action log.
- // The purpose of the constructed htlc map is to supplement to
- // resolvers restored from database with extra data. Ideally this data
- // is stored as part of the resolver in the log. This is a workaround
- // to prevent a db migration. We use all available htlc sets here in
- // order to ensure we have complete coverage.
- htlcMap := make(map[wire.OutPoint]*channeldb.HTLC)
- for _, htlc := range confirmedHTLCs {
- htlc := htlc
- outpoint := wire.OutPoint{
- Hash: commitHash,
- Index: uint32(htlc.OutputIndex),
- }
- htlcMap[outpoint] = &htlc
- }
-
- log.Infof("ChannelArbitrator(%v): relaunching %v contract "+
- "resolvers", c.cfg.ChanPoint, len(unresolvedContracts))
-
- for _, resolver := range unresolvedContracts {
- htlcResolver, ok := resolver.(htlcContractResolver)
- if !ok {
- continue
- }
-
- htlcPoint := htlcResolver.HtlcPoint()
- htlc, ok := htlcMap[htlcPoint]
- if !ok {
- return er.Errorf(
- "htlc resolver %T unavailable", resolver,
- )
- }
-
- htlcResolver.Supplement(*htlc)
- }
-
- // The anchor resolver is stateless and can always be re-instantiated.
- if contractResolutions.AnchorResolution != nil {
- anchorResolver := newAnchorResolver(
- contractResolutions.AnchorResolution.AnchorSignDescriptor,
- contractResolutions.AnchorResolution.CommitAnchor,
- heightHint, c.cfg.ChanPoint,
- ResolverConfig{
- ChannelArbitratorConfig: c.cfg,
- },
- )
- unresolvedContracts = append(unresolvedContracts, anchorResolver)
- }
-
- c.launchResolvers(unresolvedContracts)
-
- return nil
-}
-
-// Report returns htlc reports for the active resolvers.
-func (c *ChannelArbitrator) Report() []*ContractReport {
- c.activeResolversLock.RLock()
- defer c.activeResolversLock.RUnlock()
-
- var reports []*ContractReport
- for _, resolver := range c.activeResolvers {
- r, ok := resolver.(reportingContractResolver)
- if !ok {
- continue
- }
-
- report := r.report()
- if report == nil {
- continue
- }
-
- reports = append(reports, report)
- }
-
- return reports
-}
-
-// Stop signals the ChannelArbitrator for a graceful shutdown.
-func (c *ChannelArbitrator) Stop() er.R {
- if !atomic.CompareAndSwapInt32(&c.stopped, 0, 1) {
- return nil
- }
-
- log.Debugf("Stopping ChannelArbitrator(%v)", c.cfg.ChanPoint)
-
- if c.cfg.ChainEvents.Cancel != nil {
- go c.cfg.ChainEvents.Cancel()
- }
-
- c.activeResolversLock.RLock()
- for _, activeResolver := range c.activeResolvers {
- activeResolver.Stop()
- }
- c.activeResolversLock.RUnlock()
-
- close(c.quit)
- c.wg.Wait()
-
- return nil
-}
-
-// transitionTrigger is an enum that denotes exactly *why* a state transition
-// was initiated. This is useful as depending on the initial trigger, we may
-// skip certain states as those actions are expected to have already taken
-// place as a result of the external trigger.
-type transitionTrigger uint8
-
-const (
- // chainTrigger is a transition trigger that has been attempted due to
- // changing on-chain conditions such as a block which times out HTLC's
- // being attached.
- chainTrigger transitionTrigger = iota
-
- // userTrigger is a transition trigger driven by user action. Examples
- // of such a trigger include a user requesting a force closure of the
- // channel.
- userTrigger
-
- // remoteCloseTrigger is a transition trigger driven by the remote
- // peer's commitment being confirmed.
- remoteCloseTrigger
-
- // localCloseTrigger is a transition trigger driven by our commitment
- // being confirmed.
- localCloseTrigger
-
- // coopCloseTrigger is a transition trigger driven by a cooperative
- // close transaction being confirmed.
- coopCloseTrigger
-
- // breachCloseTrigger is a transition trigger driven by a remote breach
- // being confirmed. In this case the channel arbitrator won't have to
- // do anything, so we'll just clean up and exit gracefully.
- breachCloseTrigger
-)
-
-// String returns a human readable string describing the passed
-// transitionTrigger.
-func (t transitionTrigger) String() string {
- switch t {
- case chainTrigger:
- return "chainTrigger"
-
- case remoteCloseTrigger:
- return "remoteCloseTrigger"
-
- case userTrigger:
- return "userTrigger"
-
- case localCloseTrigger:
- return "localCloseTrigger"
-
- case coopCloseTrigger:
- return "coopCloseTrigger"
-
- case breachCloseTrigger:
- return "breachCloseTrigger"
-
- default:
- return "unknown trigger"
- }
-}
-
-// stateStep is a help method that examines our internal state, and attempts
-// the appropriate state transition if necessary. The next state we transition
-// to is returned, Additionally, if the next transition results in a commitment
-// broadcast, the commitment transaction itself is returned.
-func (c *ChannelArbitrator) stateStep(
- triggerHeight uint32, trigger transitionTrigger,
- confCommitSet *CommitSet) (ArbitratorState, *wire.MsgTx, er.R) {
-
- var (
- nextState ArbitratorState
- closeTx *wire.MsgTx
- )
- switch c.state {
-
- // If we're in the default state, then we'll check our set of actions
- // to see if while we were down, conditions have changed.
- case StateDefault:
- log.Debugf("ChannelArbitrator(%v): new block (height=%v) "+
- "examining active HTLC's", c.cfg.ChanPoint,
- triggerHeight)
-
- // As a new block has been connected to the end of the main
- // chain, we'll check to see if we need to make any on-chain
- // claims on behalf of the channel contract that we're
- // arbitrating for. If a commitment has confirmed, then we'll
- // use the set snapshot from the chain, otherwise we'll use our
- // current set.
- var htlcs map[HtlcSetKey]htlcSet
- if confCommitSet != nil {
- htlcs = confCommitSet.toActiveHTLCSets()
- } else {
- htlcs = c.activeHTLCs
- }
- chainActions, err := c.checkLocalChainActions(
- triggerHeight, trigger, htlcs, false,
- )
- if err != nil {
- return StateDefault, nil, err
- }
-
- // If there are no actions to be made, then we'll remain in the
- // default state. If this isn't a self initiated event (we're
- // checking due to a chain update), then we'll exit now.
- if len(chainActions) == 0 && trigger == chainTrigger {
- log.Tracef("ChannelArbitrator(%v): no actions for "+
- "chain trigger, terminating", c.cfg.ChanPoint)
-
- return StateDefault, closeTx, nil
- }
-
- // Otherwise, we'll log that we checked the HTLC actions as the
- // commitment transaction has already been broadcast.
- log.Tracef("ChannelArbitrator(%v): logging chain_actions=%v",
- c.cfg.ChanPoint,
- log.C(func() string {
- return spew.Sdump(chainActions)
- }))
-
- // Depending on the type of trigger, we'll either "tunnel"
- // through to a farther state, or just proceed linearly to the
- // next state.
- switch trigger {
-
- // If this is a chain trigger, then we'll go straight to the
- // next state, as we still need to broadcast the commitment
- // transaction.
- case chainTrigger:
- fallthrough
- case userTrigger:
- nextState = StateBroadcastCommit
-
- // If the trigger is a cooperative close being confirmed, then
- // we can go straight to StateFullyResolved, as there won't be
- // any contracts to resolve. The same is true in the case of a
- // breach.
- case coopCloseTrigger, breachCloseTrigger:
- nextState = StateFullyResolved
-
- // Otherwise, if this state advance was triggered by a
- // commitment being confirmed on chain, then we'll jump
- // straight to the state where the contract has already been
- // closed, and we will inspect the set of unresolved contracts.
- case localCloseTrigger:
- log.Errorf("ChannelArbitrator(%v): unexpected local "+
- "commitment confirmed while in StateDefault",
- c.cfg.ChanPoint)
- fallthrough
- case remoteCloseTrigger:
- nextState = StateContractClosed
- }
-
- // If we're in this state, then we've decided to broadcast the
- // commitment transaction. We enter this state either due to an outside
- // sub-system, or because an on-chain action has been triggered.
- case StateBroadcastCommit:
- // Under normal operation, we can only enter
- // StateBroadcastCommit via a user or chain trigger. On restart,
- // this state may be reexecuted after closing the channel, but
- // failing to commit to StateContractClosed or
- // StateFullyResolved. In that case, one of the four close
- // triggers will be presented, signifying that we should skip
- // rebroadcasting, and go straight to resolving the on-chain
- // contract or marking the channel resolved.
- switch trigger {
- case localCloseTrigger, remoteCloseTrigger:
- log.Infof("ChannelArbitrator(%v): detected %s "+
- "close after closing channel, fast-forwarding "+
- "to %s to resolve contract",
- c.cfg.ChanPoint, trigger, StateContractClosed)
- return StateContractClosed, closeTx, nil
-
- case coopCloseTrigger, breachCloseTrigger:
- log.Infof("ChannelArbitrator(%v): detected %s "+
- "close after closing channel, fast-forwarding "+
- "to %s to resolve contract",
- c.cfg.ChanPoint, trigger, StateFullyResolved)
- return StateFullyResolved, closeTx, nil
- }
-
- log.Infof("ChannelArbitrator(%v): force closing "+
- "chan", c.cfg.ChanPoint)
-
- // Now that we have all the actions decided for the set of
- // HTLC's, we'll broadcast the commitment transaction, and
- // signal the link to exit.
-
- // We'll tell the switch that it should remove the link for
- // this channel, in addition to fetching the force close
- // summary needed to close this channel on chain.
- closeSummary, err := c.cfg.Channel.ForceCloseChan()
- if err != nil {
- log.Errorf("ChannelArbitrator(%v): unable to "+
- "force close: %v", c.cfg.ChanPoint, err)
- return StateError, closeTx, err
- }
- closeTx = closeSummary.CloseTx
-
- // Before publishing the transaction, we store it to the
- // database, such that we can re-publish later in case it
- // didn't propagate. We initiated the force close, so we
- // mark broadcast with local initiator set to true.
- err = c.cfg.MarkCommitmentBroadcasted(closeTx, true)
- if err != nil {
- log.Errorf("ChannelArbitrator(%v): unable to "+
- "mark commitment broadcasted: %v",
- c.cfg.ChanPoint, err)
- return StateError, closeTx, err
- }
-
- // With the close transaction in hand, broadcast the
- // transaction to the network, thereby entering the post
- // channel resolution state.
- log.Infof("Broadcasting force close transaction %v, "+
- "ChannelPoint(%v): %v", closeTx.TxHash(),
- c.cfg.ChanPoint,
- log.C(func() string {
- return spew.Sdump(closeTx)
- }))
-
- // At this point, we'll now broadcast the commitment
- // transaction itself.
- label := labels.MakeLabel(
- labels.LabelTypeChannelClose, &c.cfg.ShortChanID,
- )
-
- if err := c.cfg.PublishTx(closeTx, label); err != nil {
- log.Errorf("ChannelArbitrator(%v): unable to broadcast "+
- "close tx: %v", c.cfg.ChanPoint, err)
- if !lnwallet.ErrDoubleSpend.Is(err) {
- return StateError, closeTx, err
- }
- }
-
- // We go to the StateCommitmentBroadcasted state, where we'll
- // be waiting for the commitment to be confirmed.
- nextState = StateCommitmentBroadcasted
-
- // In this state we have broadcasted our own commitment, and will need
- // to wait for a commitment (not necessarily the one we broadcasted!)
- // to be confirmed.
- case StateCommitmentBroadcasted:
- switch trigger {
-
- // We are waiting for a commitment to be confirmed.
- case chainTrigger, userTrigger:
- // The commitment transaction has been broadcast, but it
- // doesn't necessarily need to be the commitment
- // transaction version that is going to be confirmed. To
- // be sure that any of those versions can be anchored
- // down, we now submit all anchor resolutions to the
- // sweeper. The sweeper will keep trying to sweep all of
- // them.
- //
- // Note that the sweeper is idempotent. If we ever
- // happen to end up at this point in the code again, no
- // harm is done by re-offering the anchors to the
- // sweeper.
- anchors, err := c.cfg.Channel.NewAnchorResolutions()
- if err != nil {
- return StateError, closeTx, err
- }
-
- err = c.sweepAnchors(anchors, triggerHeight)
- if err != nil {
- return StateError, closeTx, err
- }
-
- nextState = StateCommitmentBroadcasted
-
- // If this state advance was triggered by any of the
- // commitments being confirmed, then we'll jump to the state
- // where the contract has been closed.
- case localCloseTrigger, remoteCloseTrigger:
- nextState = StateContractClosed
-
- // If a coop close or breach was confirmed, jump straight to
- // the fully resolved state.
- case coopCloseTrigger, breachCloseTrigger:
- nextState = StateFullyResolved
- }
-
- log.Infof("ChannelArbitrator(%v): trigger %v moving from "+
- "state %v to %v", c.cfg.ChanPoint, trigger, c.state,
- nextState)
-
- // If we're in this state, then the contract has been fully closed to
- // outside sub-systems, so we'll process the prior set of on-chain
- // contract actions and launch a set of resolvers.
- case StateContractClosed:
- // First, we'll fetch our chain actions, and both sets of
- // resolutions so we can process them.
- contractResolutions, err := c.log.FetchContractResolutions()
- if err != nil {
- log.Errorf("unable to fetch contract resolutions: %v",
- err)
- return StateError, closeTx, err
- }
-
- // If the resolution is empty, and we have no HTLCs at all to
- // tend to, then we're done here. We don't need to launch any
- // resolvers, and can go straight to our final state.
- if contractResolutions.IsEmpty() && confCommitSet.IsEmpty() {
- log.Infof("ChannelArbitrator(%v): contract "+
- "resolutions empty, marking channel as fully resolved!",
- c.cfg.ChanPoint)
- nextState = StateFullyResolved
- break
- }
-
- // Now that we know we'll need to act, we'll process the htlc
- // actions, wen create the structures we need to resolve all
- // outstanding contracts.
- htlcResolvers, pktsToSend, err := c.prepContractResolutions(
- contractResolutions, triggerHeight, trigger,
- confCommitSet,
- )
- if err != nil {
- log.Errorf("ChannelArbitrator(%v): unable to "+
- "resolve contracts: %v", c.cfg.ChanPoint, err)
- return StateError, closeTx, err
- }
-
- log.Debugf("ChannelArbitrator(%v): sending resolution message=%v",
- c.cfg.ChanPoint,
- log.C(func() string {
- return spew.Sdump(pktsToSend)
- }))
-
- // With the commitment broadcast, we'll then send over all
- // messages we can send immediately.
- if len(pktsToSend) != 0 {
- err := c.cfg.DeliverResolutionMsg(pktsToSend...)
- if err != nil {
- // TODO(roasbeef): make sure packet sends are
- // idempotent
- log.Errorf("unable to send pkts: %v", err)
- return StateError, closeTx, err
- }
- }
-
- log.Debugf("ChannelArbitrator(%v): inserting %v contract "+
- "resolvers", c.cfg.ChanPoint, len(htlcResolvers))
-
- err = c.log.InsertUnresolvedContracts(nil, htlcResolvers...)
- if err != nil {
- return StateError, closeTx, err
- }
-
- // Finally, we'll launch all the required contract resolvers.
- // Once they're all resolved, we're no longer needed.
- c.launchResolvers(htlcResolvers)
-
- nextState = StateWaitingFullResolution
-
- // This is our terminal state. We'll keep returning this state until
- // all contracts are fully resolved.
- case StateWaitingFullResolution:
- log.Infof("ChannelArbitrator(%v): still awaiting contract "+
- "resolution", c.cfg.ChanPoint)
-
- numUnresolved, err := c.log.FetchUnresolvedContracts()
- if err != nil {
- return StateError, closeTx, err
- }
-
- // If we still have unresolved contracts, then we'll stay alive
- // to oversee their resolution.
- if len(numUnresolved) != 0 {
- nextState = StateWaitingFullResolution
- break
- }
-
- nextState = StateFullyResolved
-
- // If we start as fully resolved, then we'll end as fully resolved.
- case StateFullyResolved:
- // To ensure that the state of the contract in persistent
- // storage is properly reflected, we'll mark the contract as
- // fully resolved now.
- nextState = StateFullyResolved
-
- log.Infof("ChannelPoint(%v) has been fully resolved "+
- "on-chain at height=%v", c.cfg.ChanPoint, triggerHeight)
-
- if err := c.cfg.MarkChannelResolved(); err != nil {
- log.Errorf("unable to mark channel resolved: %v", err)
- return StateError, closeTx, err
- }
- }
-
- log.Tracef("ChannelArbitrator(%v): next_state=%v", c.cfg.ChanPoint,
- nextState)
-
- return nextState, closeTx, nil
-}
-
-// sweepAnchors offers all given anchor resolutions to the sweeper. It requests
-// sweeping at the minimum fee rate. This fee rate can be upped manually by the
-// user via the BumpFee rpc.
-func (c *ChannelArbitrator) sweepAnchors(anchors []*lnwallet.AnchorResolution,
- heightHint uint32) er.R {
-
- // Use the chan id as the exclusive group. This prevents any of the
- // anchors from being batched together.
- exclusiveGroup := c.cfg.ShortChanID.ToUint64()
-
- for _, anchor := range anchors {
- log.Debugf("ChannelArbitrator(%v): pre-confirmation sweep of "+
- "anchor of tx %v", c.cfg.ChanPoint, anchor.CommitAnchor)
-
- // Prepare anchor output for sweeping.
- anchorInput := input.MakeBaseInput(
- &anchor.CommitAnchor,
- input.CommitmentAnchor,
- &anchor.AnchorSignDescriptor,
- heightHint,
- &input.TxInfo{
- Fee: anchor.CommitFee,
- Weight: anchor.CommitWeight,
- },
- )
-
- // Sweep anchor output with a confirmation target fee
- // preference. Because this is a cpfp-operation, the anchor will
- // only be attempted to sweep when the current fee estimate for
- // the confirmation target exceeds the commit fee rate.
- //
- // Also signal that this is a force sweep, so that the anchor
- // will be swept even if it isn't economical purely based on the
- // anchor value.
- _, err := c.cfg.Sweeper.SweepInput(
- &anchorInput,
- sweep.Params{
- Fee: sweep.FeePreference{
- ConfTarget: anchorSweepConfTarget,
- },
- Force: true,
- ExclusiveGroup: &exclusiveGroup,
- },
- )
- if err != nil {
- return err
- }
- }
-
- return nil
-}
-
-// launchResolvers updates the activeResolvers list and starts the resolvers.
-func (c *ChannelArbitrator) launchResolvers(resolvers []ContractResolver) {
- c.activeResolversLock.Lock()
- defer c.activeResolversLock.Unlock()
-
- c.activeResolvers = resolvers
- for _, contract := range resolvers {
- c.wg.Add(1)
- go c.resolveContract(contract)
- }
-}
-
-// advanceState is the main driver of our state machine. This method is an
-// iterative function which repeatedly attempts to advance the internal state
-// of the channel arbitrator. The state will be advanced until we reach a
-// redundant transition, meaning that the state transition is a noop. The final
-// param is a callback that allows the caller to execute an arbitrary action
-// after each state transition.
-func (c *ChannelArbitrator) advanceState(
- triggerHeight uint32, trigger transitionTrigger,
- confCommitSet *CommitSet) (ArbitratorState, *wire.MsgTx, er.R) {
-
- var (
- priorState ArbitratorState
- forceCloseTx *wire.MsgTx
- )
-
- // We'll continue to advance our state forward until the state we
- // transition to is that same state that we started at.
- for {
- priorState = c.state
- log.Tracef("ChannelArbitrator(%v): attempting state step with "+
- "trigger=%v from state=%v", c.cfg.ChanPoint, trigger,
- priorState)
-
- nextState, closeTx, err := c.stateStep(
- triggerHeight, trigger, confCommitSet,
- )
- if err != nil {
- log.Errorf("ChannelArbitrator(%v): unable to advance "+
- "state: %v", c.cfg.ChanPoint, err)
- return priorState, nil, err
- }
-
- if forceCloseTx == nil && closeTx != nil {
- forceCloseTx = closeTx
- }
-
- // Our termination transition is a noop transition. If we get
- // our prior state back as the next state, then we'll
- // terminate.
- if nextState == priorState {
- log.Tracef("ChannelArbitrator(%v): terminating at "+
- "state=%v", c.cfg.ChanPoint, nextState)
- return nextState, forceCloseTx, nil
- }
-
- // As the prior state was successfully executed, we can now
- // commit the next state. This ensures that we will re-execute
- // the prior state if anything fails.
- if err := c.log.CommitState(nextState); err != nil {
- log.Errorf("ChannelArbitrator(%v): unable to commit "+
- "next state(%v): %v", c.cfg.ChanPoint,
- nextState, err)
- return priorState, nil, err
- }
- c.state = nextState
- }
-}
-
-// ChainAction is an enum that encompasses all possible on-chain actions
-// we'll take for a set of HTLC's.
-type ChainAction uint8
-
-const (
- // NoAction is the min chainAction type, indicating that no action
- // needs to be taken for a given HTLC.
- NoAction ChainAction = 0
-
- // HtlcTimeoutAction indicates that the HTLC will timeout soon. As a
- // result, we should get ready to sweep it on chain after the timeout.
- HtlcTimeoutAction = 1
-
- // HtlcClaimAction indicates that we should claim the HTLC on chain
- // before its timeout period.
- HtlcClaimAction = 2
-
- // HtlcFailNowAction indicates that we should fail an outgoing HTLC
- // immediately by cancelling it backwards as it has no corresponding
- // output in our commitment transaction.
- HtlcFailNowAction = 3
-
- // HtlcOutgoingWatchAction indicates that we can't yet timeout this
- // HTLC, but we had to go to chain on order to resolve an existing
- // HTLC. In this case, we'll either: time it out once it expires, or
- // will learn the pre-image if the remote party claims the output. In
- // this case, well add the pre-image to our global store.
- HtlcOutgoingWatchAction = 4
-
- // HtlcIncomingWatchAction indicates that we don't yet have the
- // pre-image to claim incoming HTLC, but we had to go to chain in order
- // to resolve and existing HTLC. In this case, we'll either: let the
- // other party time it out, or eventually learn of the pre-image, in
- // which case we'll claim on chain.
- HtlcIncomingWatchAction = 5
-)
-
-// String returns a human readable string describing a chain action.
-func (c ChainAction) String() string {
- switch c {
- case NoAction:
- return "NoAction"
-
- case HtlcTimeoutAction:
- return "HtlcTimeoutAction"
-
- case HtlcClaimAction:
- return "HtlcClaimAction"
-
- case HtlcFailNowAction:
- return "HtlcFailNowAction"
-
- case HtlcOutgoingWatchAction:
- return "HtlcOutgoingWatchAction"
-
- case HtlcIncomingWatchAction:
- return "HtlcIncomingWatchAction"
-
- default:
- return ""
- }
-}
-
-// ChainActionMap is a map of a chain action, to the set of HTLC's that need to
-// be acted upon for a given action type. The channel
-type ChainActionMap map[ChainAction][]channeldb.HTLC
-
-// Merge merges the passed chain actions with the target chain action map.
-func (c ChainActionMap) Merge(actions ChainActionMap) {
- for chainAction, htlcs := range actions {
- c[chainAction] = append(c[chainAction], htlcs...)
- }
-}
-
-// shouldGoOnChain takes into account the absolute timeout of the HTLC, if the
-// confirmation delta that we need is close, and returns a bool indicating if
-// we should go on chain to claim. We do this rather than waiting up until the
-// last minute as we want to ensure that when we *need* (HTLC is timed out) to
-// sweep, the commitment is already confirmed.
-func (c *ChannelArbitrator) shouldGoOnChain(htlc channeldb.HTLC,
- broadcastDelta, currentHeight uint32) bool {
-
- // We'll calculate the broadcast cut off for this HTLC. This is the
- // height that (based on our current fee estimation) we should
- // broadcast in order to ensure the commitment transaction is confirmed
- // before the HTLC fully expires.
- broadcastCutOff := htlc.RefundTimeout - broadcastDelta
-
- log.Tracef("ChannelArbitrator(%v): examining outgoing contract: "+
- "expiry=%v, cutoff=%v, height=%v", c.cfg.ChanPoint, htlc.RefundTimeout,
- broadcastCutOff, currentHeight)
-
- // TODO(roasbeef): take into account default HTLC delta, don't need to
- // broadcast immediately
- // * can then batch with SINGLE | ANYONECANPAY
-
- // We should on-chain for this HTLC, iff we're within out broadcast
- // cutoff window.
- if currentHeight < broadcastCutOff {
- return false
- }
-
- // In case of incoming htlc we should go to chain.
- if htlc.Incoming {
- return true
- }
-
- // For htlcs that are result of our initiated payments we give some grace
- // period before force closing the channel. During this time we expect
- // both nodes to connect and give a chance to the other node to send its
- // updates and cancel the htlc.
- // This shouldn't add any security risk as there is no incoming htlc to
- // fulfill at this case and the expectation is that when the channel is
- // active the other node will send update_fail_htlc to remove the htlc
- // without closing the channel. It is up to the user to force close the
- // channel if the peer misbehaves and doesn't send the update_fail_htlc.
- // It is useful when this node is most of the time not online and is
- // likely to miss the time slot where the htlc may be cancelled.
- isForwarded := c.cfg.IsForwardedHTLC(c.cfg.ShortChanID, htlc.HtlcIndex)
- upTime := c.cfg.Clock.Now().Sub(c.startTimestamp)
- return isForwarded || upTime > c.cfg.PaymentsExpirationGracePeriod
-}
-
-// checkCommitChainActions is called for each new block connected to the end of
-// the main chain. Given the new block height, this new method will examine all
-// active HTLC's, and determine if we need to go on-chain to claim any of them.
-// A map of action -> []htlc is returned, detailing what action (if any) should
-// be performed for each HTLC. For timed out HTLC's, once the commitment has
-// been sufficiently confirmed, the HTLC's should be canceled backwards. For
-// redeemed HTLC's, we should send the pre-image back to the incoming link.
-func (c *ChannelArbitrator) checkCommitChainActions(height uint32,
- trigger transitionTrigger, htlcs htlcSet) (ChainActionMap, er.R) {
-
- // TODO(roasbeef): would need to lock channel? channel totem?
- // * race condition if adding and we broadcast, etc
- // * or would make each instance sync?
-
- log.Debugf("ChannelArbitrator(%v): checking commit chain actions at "+
- "height=%v, in_htlc_count=%v, out_htlc_count=%v",
- c.cfg.ChanPoint, height,
- len(htlcs.incomingHTLCs), len(htlcs.outgoingHTLCs))
-
- actionMap := make(ChainActionMap)
-
- // First, we'll make an initial pass over the set of incoming and
- // outgoing HTLC's to decide if we need to go on chain at all.
- haveChainActions := false
- for _, htlc := range htlcs.outgoingHTLCs {
- // We'll need to go on-chain for an outgoing HTLC if it was
- // never resolved downstream, and it's "close" to timing out.
- toChain := c.shouldGoOnChain(htlc, c.cfg.OutgoingBroadcastDelta,
- height,
- )
-
- if toChain {
- log.Debugf("ChannelArbitrator(%v): go to chain for "+
- "outgoing htlc %x: timeout=%v, "+
- "blocks_until_expiry=%v, broadcast_delta=%v",
- c.cfg.ChanPoint, htlc.RHash[:],
- htlc.RefundTimeout, htlc.RefundTimeout-height,
- c.cfg.OutgoingBroadcastDelta,
- )
- }
-
- haveChainActions = haveChainActions || toChain
- }
-
- for _, htlc := range htlcs.incomingHTLCs {
- // We'll need to go on-chain to pull an incoming HTLC iff we
- // know the pre-image and it's close to timing out. We need to
- // ensure that we claim the funds that our rightfully ours
- // on-chain.
- preimageAvailable, err := c.isPreimageAvailable(htlc.RHash)
- if err != nil {
- return nil, err
- }
-
- if !preimageAvailable {
- continue
- }
-
- toChain := c.shouldGoOnChain(htlc, c.cfg.IncomingBroadcastDelta,
- height,
- )
-
- if toChain {
- log.Debugf("ChannelArbitrator(%v): go to chain for "+
- "incoming htlc %x: timeout=%v, "+
- "blocks_until_expiry=%v, broadcast_delta=%v",
- c.cfg.ChanPoint, htlc.RHash[:],
- htlc.RefundTimeout, htlc.RefundTimeout-height,
- c.cfg.IncomingBroadcastDelta,
- )
- }
-
- haveChainActions = haveChainActions || toChain
- }
-
- // If we don't have any actions to make, then we'll return an empty
- // action map. We only do this if this was a chain trigger though, as
- // if we're going to broadcast the commitment (or the remote party did)
- // we're *forced* to act on each HTLC.
- if !haveChainActions && trigger == chainTrigger {
- log.Tracef("ChannelArbitrator(%v): no actions to take at "+
- "height=%v", c.cfg.ChanPoint, height)
- return actionMap, nil
- }
-
- // Now that we know we'll need to go on-chain, we'll examine all of our
- // active outgoing HTLC's to see if we either need to: sweep them after
- // a timeout (then cancel backwards), cancel them backwards
- // immediately, or watch them as they're still active contracts.
- for _, htlc := range htlcs.outgoingHTLCs {
- switch {
- // If the HTLC is dust, then we can cancel it backwards
- // immediately as there's no matching contract to arbitrate
- // on-chain. We know the HTLC is dust, if the OutputIndex
- // negative.
- case htlc.OutputIndex < 0:
- log.Tracef("ChannelArbitrator(%v): immediately "+
- "failing dust htlc=%x", c.cfg.ChanPoint,
- htlc.RHash[:])
-
- actionMap[HtlcFailNowAction] = append(
- actionMap[HtlcFailNowAction], htlc,
- )
-
- // If we don't need to immediately act on this HTLC, then we'll
- // mark it still "live". After we broadcast, we'll monitor it
- // until the HTLC times out to see if we can also redeem it
- // on-chain.
- case !c.shouldGoOnChain(htlc, c.cfg.OutgoingBroadcastDelta,
- height,
- ):
- // TODO(roasbeef): also need to be able to query
- // circuit map to see if HTLC hasn't been fully
- // resolved
- //
- // * can't fail incoming until if outgoing not yet
- // failed
-
- log.Tracef("ChannelArbitrator(%v): watching chain to "+
- "decide action for outgoing htlc=%x",
- c.cfg.ChanPoint, htlc.RHash[:])
-
- actionMap[HtlcOutgoingWatchAction] = append(
- actionMap[HtlcOutgoingWatchAction], htlc,
- )
-
- // Otherwise, we'll update our actionMap to mark that we need
- // to sweep this HTLC on-chain
- default:
- log.Tracef("ChannelArbitrator(%v): going on-chain to "+
- "timeout htlc=%x", c.cfg.ChanPoint, htlc.RHash[:])
-
- actionMap[HtlcTimeoutAction] = append(
- actionMap[HtlcTimeoutAction], htlc,
- )
- }
- }
-
- // Similarly, for each incoming HTLC, now that we need to go on-chain,
- // we'll either: sweep it immediately if we know the pre-image, or
- // observe the output on-chain if we don't In this last, case we'll
- // either learn of it eventually from the outgoing HTLC, or the sender
- // will timeout the HTLC.
- for _, htlc := range htlcs.incomingHTLCs {
- // If the HTLC is dust, there is no action to be taken.
- if htlc.OutputIndex < 0 {
- log.Debugf("ChannelArbitrator(%v): no resolution "+
- "needed for incoming dust htlc=%x",
- c.cfg.ChanPoint, htlc.RHash[:])
-
- continue
- }
-
- log.Tracef("ChannelArbitrator(%v): watching chain to decide "+
- "action for incoming htlc=%x", c.cfg.ChanPoint,
- htlc.RHash[:])
-
- actionMap[HtlcIncomingWatchAction] = append(
- actionMap[HtlcIncomingWatchAction], htlc,
- )
- }
-
- return actionMap, nil
-}
-
-// isPreimageAvailable returns whether the hash preimage is available in either
-// the preimage cache or the invoice database.
-func (c *ChannelArbitrator) isPreimageAvailable(hash lntypes.Hash) (bool,
- er.R) {
-
- // Start by checking the preimage cache for preimages of
- // forwarded HTLCs.
- _, preimageAvailable := c.cfg.PreimageDB.LookupPreimage(
- hash,
- )
- if preimageAvailable {
- return true, nil
- }
-
- // Then check if we have an invoice that can be settled by this HTLC.
- //
- // TODO(joostjager): Check that there are still more blocks remaining
- // than the invoice cltv delta. We don't want to go to chain only to
- // have the incoming contest resolver decide that we don't want to
- // settle this invoice.
- invoice, err := c.cfg.Registry.LookupInvoice(hash)
- switch {
- case err == nil:
- case channeldb.ErrInvoiceNotFound.Is(err), channeldb.ErrNoInvoicesCreated.Is(err):
- return false, nil
- default:
- return false, err
- }
-
- preimageAvailable = invoice.Terms.PaymentPreimage != nil
-
- return preimageAvailable, nil
-}
-
-// checkLocalChainActions is similar to checkCommitChainActions, but it also
-// examines the set of HTLCs on the remote party's commitment. This allows us
-// to ensure we're able to satisfy the HTLC timeout constraints for incoming vs
-// outgoing HTLCs.
-func (c *ChannelArbitrator) checkLocalChainActions(
- height uint32, trigger transitionTrigger,
- activeHTLCs map[HtlcSetKey]htlcSet,
- commitsConfirmed bool) (ChainActionMap, er.R) {
-
- // First, we'll check our local chain actions as normal. This will only
- // examine HTLCs on our local commitment (timeout or settle).
- localCommitActions, err := c.checkCommitChainActions(
- height, trigger, activeHTLCs[LocalHtlcSet],
- )
- if err != nil {
- return nil, err
- }
-
- // Next, we'll examine the remote commitment (and maybe a dangling one)
- // to see if the set difference of our HTLCs is non-empty. If so, then
- // we may need to cancel back some HTLCs if we decide go to chain.
- remoteDanglingActions := c.checkRemoteDanglingActions(
- height, activeHTLCs, commitsConfirmed,
- )
-
- // Finally, we'll merge the two set of chain actions.
- localCommitActions.Merge(remoteDanglingActions)
-
- return localCommitActions, nil
-}
-
-// checkRemoteDanglingActions examines the set of remote commitments for any
-// HTLCs that are close to timing out. If we find any, then we'll return a set
-// of chain actions for HTLCs that are on our commitment, but not theirs to
-// cancel immediately.
-func (c *ChannelArbitrator) checkRemoteDanglingActions(
- height uint32, activeHTLCs map[HtlcSetKey]htlcSet,
- commitsConfirmed bool) ChainActionMap {
-
- var (
- pendingRemoteHTLCs []channeldb.HTLC
- localHTLCs = make(map[uint64]struct{})
- remoteHTLCs = make(map[uint64]channeldb.HTLC)
- actionMap = make(ChainActionMap)
- )
-
- // First, we'll construct two sets of the outgoing HTLCs: those on our
- // local commitment, and those that are on the remote commitment(s).
- for htlcSetKey, htlcs := range activeHTLCs {
- if htlcSetKey.IsRemote {
- for _, htlc := range htlcs.outgoingHTLCs {
- remoteHTLCs[htlc.HtlcIndex] = htlc
- }
- } else {
- for _, htlc := range htlcs.outgoingHTLCs {
- localHTLCs[htlc.HtlcIndex] = struct{}{}
- }
- }
- }
-
- // With both sets constructed, we'll now compute the set difference of
- // our two sets of HTLCs. This'll give us the HTLCs that exist on the
- // remote commitment transaction, but not on ours.
- for htlcIndex, htlc := range remoteHTLCs {
- if _, ok := localHTLCs[htlcIndex]; ok {
- continue
- }
-
- pendingRemoteHTLCs = append(pendingRemoteHTLCs, htlc)
- }
-
- // Finally, we'll examine all the pending remote HTLCs for those that
- // have expired. If we find any, then we'll recommend that they be
- // failed now so we can free up the incoming HTLC.
- for _, htlc := range pendingRemoteHTLCs {
- // We'll now check if we need to go to chain in order to cancel
- // the incoming HTLC.
- goToChain := c.shouldGoOnChain(htlc, c.cfg.OutgoingBroadcastDelta,
- height,
- )
-
- // If we don't need to go to chain, and no commitments have
- // been confirmed, then we can move on. Otherwise, if
- // commitments have been confirmed, then we need to cancel back
- // *all* of the pending remote HTLCS.
- if !goToChain && !commitsConfirmed {
- continue
- }
-
- log.Tracef("ChannelArbitrator(%v): immediately failing "+
- "htlc=%x from remote commitment",
- c.cfg.ChanPoint, htlc.RHash[:])
-
- actionMap[HtlcFailNowAction] = append(
- actionMap[HtlcFailNowAction], htlc,
- )
- }
-
- return actionMap
-}
-
-// checkRemoteChainActions examines the two possible remote commitment chains
-// and returns the set of chain actions we need to carry out if the remote
-// commitment (non pending) confirms. The pendingConf indicates if the pending
-// remote commitment confirmed. This is similar to checkCommitChainActions, but
-// we'll immediately fail any HTLCs on the pending remote commit, but not the
-// remote commit (or the other way around).
-func (c *ChannelArbitrator) checkRemoteChainActions(
- height uint32, trigger transitionTrigger,
- activeHTLCs map[HtlcSetKey]htlcSet,
- pendingConf bool) (ChainActionMap, er.R) {
-
- // First, we'll examine all the normal chain actions on the remote
- // commitment that confirmed.
- confHTLCs := activeHTLCs[RemoteHtlcSet]
- if pendingConf {
- confHTLCs = activeHTLCs[RemotePendingHtlcSet]
- }
- remoteCommitActions, err := c.checkCommitChainActions(
- height, trigger, confHTLCs,
- )
- if err != nil {
- return nil, err
- }
-
- // With this actions computed, we'll now check the diff of the HTLCs on
- // the commitments, and cancel back any that are on the pending but not
- // the non-pending.
- remoteDiffActions := c.checkRemoteDiffActions(
- height, activeHTLCs, pendingConf,
- )
-
- // Finally, we'll merge all the chain actions and the final set of
- // chain actions.
- remoteCommitActions.Merge(remoteDiffActions)
- return remoteCommitActions, nil
-}
-
-// checkRemoteDiffActions checks the set difference of the HTLCs on the remote
-// confirmed commit and remote dangling commit for HTLCS that we need to cancel
-// back. If we find any HTLCs on the remote pending but not the remote, then
-// we'll mark them to be failed immediately.
-func (c *ChannelArbitrator) checkRemoteDiffActions(height uint32,
- activeHTLCs map[HtlcSetKey]htlcSet,
- pendingConf bool) ChainActionMap {
-
- // First, we'll partition the HTLCs into those that are present on the
- // confirmed commitment, and those on the dangling commitment.
- confHTLCs := activeHTLCs[RemoteHtlcSet]
- danglingHTLCs := activeHTLCs[RemotePendingHtlcSet]
- if pendingConf {
- confHTLCs = activeHTLCs[RemotePendingHtlcSet]
- danglingHTLCs = activeHTLCs[RemoteHtlcSet]
- }
-
- // Next, we'll create a set of all the HTLCs confirmed commitment.
- remoteHtlcs := make(map[uint64]struct{})
- for _, htlc := range confHTLCs.outgoingHTLCs {
- remoteHtlcs[htlc.HtlcIndex] = struct{}{}
- }
-
- // With the remote HTLCs assembled, we'll mark any HTLCs only on the
- // remote dangling commitment to be failed asap.
- actionMap := make(ChainActionMap)
- for _, htlc := range danglingHTLCs.outgoingHTLCs {
- if _, ok := remoteHtlcs[htlc.HtlcIndex]; ok {
- continue
- }
-
- actionMap[HtlcFailNowAction] = append(
- actionMap[HtlcFailNowAction], htlc,
- )
-
- log.Tracef("ChannelArbitrator(%v): immediately failing "+
- "htlc=%x from remote commitment",
- c.cfg.ChanPoint, htlc.RHash[:])
- }
-
- return actionMap
-}
-
-// constructChainActions returns the set of actions that should be taken for
-// confirmed HTLCs at the specified height. Our actions will depend on the set
-// of HTLCs that were active across all channels at the time of channel
-// closure.
-func (c *ChannelArbitrator) constructChainActions(confCommitSet *CommitSet,
- height uint32, trigger transitionTrigger) (ChainActionMap, er.R) {
-
- // If we've reached this point and have not confirmed commitment set,
- // then this is an older node that had a pending close channel before
- // the CommitSet was introduced. In this case, we'll just return the
- // existing ChainActionMap they had on disk.
- if confCommitSet == nil {
- return c.log.FetchChainActions()
- }
-
- // Otherwise we have the full commitment set written to disk, and can
- // proceed as normal.
- htlcSets := confCommitSet.toActiveHTLCSets()
- switch *confCommitSet.ConfCommitKey {
-
- // If the local commitment transaction confirmed, then we'll examine
- // that as well as their commitments to the set of chain actions.
- case LocalHtlcSet:
- return c.checkLocalChainActions(
- height, trigger, htlcSets, true,
- )
-
- // If the remote commitment confirmed, then we'll grab all the chain
- // actions for the remote commit, and check the pending commit for any
- // HTLCS we need to handle immediately (dust).
- case RemoteHtlcSet:
- return c.checkRemoteChainActions(
- height, trigger, htlcSets, false,
- )
-
- // Otherwise, the remote pending commitment confirmed, so we'll examine
- // the HTLCs on that unrevoked dangling commitment.
- case RemotePendingHtlcSet:
- return c.checkRemoteChainActions(
- height, trigger, htlcSets, true,
- )
- }
-
- return nil, er.Errorf("unable to locate chain actions")
-}
-
-// prepContractResolutions is called either int he case that we decide we need
-// to go to chain, or the remote party goes to chain. Given a set of actions we
-// need to take for each HTLC, this method will return a set of contract
-// resolvers that will resolve the contracts on-chain if needed, and also a set
-// of packets to send to the htlcswitch in order to ensure all incoming HTLC's
-// are properly resolved.
-func (c *ChannelArbitrator) prepContractResolutions(
- contractResolutions *ContractResolutions, height uint32,
- trigger transitionTrigger,
- confCommitSet *CommitSet) ([]ContractResolver, []ResolutionMsg, er.R) {
-
- // First, we'll reconstruct a fresh set of chain actions as the set of
- // actions we need to act on may differ based on if it was our
- // commitment, or they're commitment that hit the chain.
- htlcActions, err := c.constructChainActions(
- confCommitSet, height, trigger,
- )
- if err != nil {
- return nil, nil, err
- }
-
- // There may be a class of HTLC's which we can fail back immediately,
- // for those we'll prepare a slice of packets to add to our outbox. Any
- // packets we need to send, will be cancels.
- var (
- msgsToSend []ResolutionMsg
- )
-
- incomingResolutions := contractResolutions.HtlcResolutions.IncomingHTLCs
- outgoingResolutions := contractResolutions.HtlcResolutions.OutgoingHTLCs
-
- // We'll use these two maps to quickly look up an active HTLC with its
- // matching HTLC resolution.
- outResolutionMap := make(map[wire.OutPoint]lnwallet.OutgoingHtlcResolution)
- inResolutionMap := make(map[wire.OutPoint]lnwallet.IncomingHtlcResolution)
- for i := 0; i < len(incomingResolutions); i++ {
- inRes := incomingResolutions[i]
- inResolutionMap[inRes.HtlcPoint()] = inRes
- }
- for i := 0; i < len(outgoingResolutions); i++ {
- outRes := outgoingResolutions[i]
- outResolutionMap[outRes.HtlcPoint()] = outRes
- }
-
- // We'll create the resolver kit that we'll be cloning for each
- // resolver so they each can do their duty.
- resolverCfg := ResolverConfig{
- ChannelArbitratorConfig: c.cfg,
- Checkpoint: func(res ContractResolver,
- reports ...*channeldb.ResolverReport) er.R {
-
- return c.log.InsertUnresolvedContracts(reports, res)
- },
- }
-
- commitHash := contractResolutions.CommitHash
- failureMsg := &lnwire.FailPermanentChannelFailure{}
-
- // For each HTLC, we'll either act immediately, meaning we'll instantly
- // fail the HTLC, or we'll act only once the transaction has been
- // confirmed, in which case we'll need an HTLC resolver.
- var htlcResolvers []ContractResolver
- for htlcAction, htlcs := range htlcActions {
- switch htlcAction {
-
- // If we can fail an HTLC immediately (an outgoing HTLC with no
- // contract), then we'll assemble an HTLC fail packet to send.
- case HtlcFailNowAction:
- for _, htlc := range htlcs {
- failMsg := ResolutionMsg{
- SourceChan: c.cfg.ShortChanID,
- HtlcIndex: htlc.HtlcIndex,
- Failure: failureMsg,
- }
-
- msgsToSend = append(msgsToSend, failMsg)
- }
-
- // If we can claim this HTLC, we'll create an HTLC resolver to
- // claim the HTLC (second-level or directly), then add the pre
- case HtlcClaimAction:
- for _, htlc := range htlcs {
- htlc := htlc
-
- htlcOp := wire.OutPoint{
- Hash: commitHash,
- Index: uint32(htlc.OutputIndex),
- }
-
- resolution, ok := inResolutionMap[htlcOp]
- if !ok {
- // TODO(roasbeef): panic?
- log.Errorf("ChannelArbitrator(%v) unable to find "+
- "incoming resolution: %v",
- c.cfg.ChanPoint, htlcOp)
- continue
- }
-
- resolver := newSuccessResolver(
- resolution, height, htlc, resolverCfg,
- )
- htlcResolvers = append(htlcResolvers, resolver)
- }
-
- // If we can timeout the HTLC directly, then we'll create the
- // proper resolver to do so, who will then cancel the packet
- // backwards.
- case HtlcTimeoutAction:
- for _, htlc := range htlcs {
- htlc := htlc
-
- htlcOp := wire.OutPoint{
- Hash: commitHash,
- Index: uint32(htlc.OutputIndex),
- }
-
- resolution, ok := outResolutionMap[htlcOp]
- if !ok {
- log.Errorf("ChannelArbitrator(%v) unable to find "+
- "outgoing resolution: %v", c.cfg.ChanPoint, htlcOp)
- continue
- }
-
- resolver := newTimeoutResolver(
- resolution, height, htlc, resolverCfg,
- )
- htlcResolvers = append(htlcResolvers, resolver)
- }
-
- // If this is an incoming HTLC, but we can't act yet, then
- // we'll create an incoming resolver to redeem the HTLC if we
- // learn of the pre-image, or let the remote party time out.
- case HtlcIncomingWatchAction:
- for _, htlc := range htlcs {
- htlc := htlc
-
- htlcOp := wire.OutPoint{
- Hash: commitHash,
- Index: uint32(htlc.OutputIndex),
- }
-
- // TODO(roasbeef): need to handle incoming dust...
-
- // TODO(roasbeef): can't be negative!!!
- resolution, ok := inResolutionMap[htlcOp]
- if !ok {
- log.Errorf("ChannelArbitrator(%v) unable to find "+
- "incoming resolution: %v",
- c.cfg.ChanPoint, htlcOp)
- continue
- }
-
- resolver := newIncomingContestResolver(
- resolution, height, htlc,
- resolverCfg,
- )
- htlcResolvers = append(htlcResolvers, resolver)
- }
-
- // Finally, if this is an outgoing HTLC we've sent, then we'll
- // launch a resolver to watch for the pre-image (and settle
- // backwards), or just timeout.
- case HtlcOutgoingWatchAction:
- for _, htlc := range htlcs {
- htlc := htlc
-
- htlcOp := wire.OutPoint{
- Hash: commitHash,
- Index: uint32(htlc.OutputIndex),
- }
-
- resolution, ok := outResolutionMap[htlcOp]
- if !ok {
- log.Errorf("ChannelArbitrator(%v) unable to find "+
- "outgoing resolution: %v",
- c.cfg.ChanPoint, htlcOp)
- continue
- }
-
- resolver := newOutgoingContestResolver(
- resolution, height, htlc, resolverCfg,
- )
- htlcResolvers = append(htlcResolvers, resolver)
- }
- }
- }
-
- // If this is was an unilateral closure, then we'll also create a
- // resolver to sweep our commitment output (but only if it wasn't
- // trimmed).
- if contractResolutions.CommitResolution != nil {
- resolver := newCommitSweepResolver(
- *contractResolutions.CommitResolution,
- height, c.cfg.ChanPoint, resolverCfg,
- )
- htlcResolvers = append(htlcResolvers, resolver)
- }
-
- // We instantiate an anchor resolver if the commitmentment tx has an
- // anchor.
- if contractResolutions.AnchorResolution != nil {
- anchorResolver := newAnchorResolver(
- contractResolutions.AnchorResolution.AnchorSignDescriptor,
- contractResolutions.AnchorResolution.CommitAnchor,
- height, c.cfg.ChanPoint, resolverCfg,
- )
- htlcResolvers = append(htlcResolvers, anchorResolver)
- }
-
- return htlcResolvers, msgsToSend, nil
-}
-
-// replaceResolver replaces a in the list of active resolvers. If the resolver
-// to be replaced is not found, it returns an error.
-func (c *ChannelArbitrator) replaceResolver(oldResolver,
- newResolver ContractResolver) er.R {
-
- c.activeResolversLock.Lock()
- defer c.activeResolversLock.Unlock()
-
- oldKey := oldResolver.ResolverKey()
- for i, r := range c.activeResolvers {
- if bytes.Equal(r.ResolverKey(), oldKey) {
- c.activeResolvers[i] = newResolver
- return nil
- }
- }
-
- return er.New("resolver to be replaced not found")
-}
-
-// resolveContract is a goroutine tasked with fully resolving an unresolved
-// contract. Either the initial contract will be resolved after a single step,
-// or the contract will itself create another contract to be resolved. In
-// either case, one the contract has been fully resolved, we'll signal back to
-// the main goroutine so it can properly keep track of the set of unresolved
-// contracts.
-//
-// NOTE: This MUST be run as a goroutine.
-func (c *ChannelArbitrator) resolveContract(currentContract ContractResolver) {
- defer c.wg.Done()
-
- log.Debugf("ChannelArbitrator(%v): attempting to resolve %T",
- c.cfg.ChanPoint, currentContract)
-
- // Until the contract is fully resolved, we'll continue to iteratively
- // resolve the contract one step at a time.
- for !currentContract.IsResolved() {
- log.Debugf("ChannelArbitrator(%v): contract %T not yet resolved",
- c.cfg.ChanPoint, currentContract)
-
- select {
-
- // If we've been signalled to quit, then we'll exit early.
- case <-c.quit:
- return
-
- default:
- // Otherwise, we'll attempt to resolve the current
- // contract.
- nextContract, err := currentContract.Resolve()
- if err != nil {
- if errResolverShuttingDown.Is(err) {
- return
- }
-
- log.Errorf("ChannelArbitrator(%v): unable to "+
- "progress %T: %v",
- c.cfg.ChanPoint, currentContract, err)
- return
- }
-
- switch {
- // If this contract produced another, then this means
- // the current contract was only able to be partially
- // resolved in this step. So we'll do a contract swap
- // within our logs: the new contract will take the
- // place of the old one.
- case nextContract != nil:
- log.Debugf("ChannelArbitrator(%v): swapping "+
- "out contract %T for %T ",
- c.cfg.ChanPoint, currentContract,
- nextContract)
-
- // Swap contract in log.
- err := c.log.SwapContract(
- currentContract, nextContract,
- )
- if err != nil {
- log.Errorf("unable to add recurse "+
- "contract: %v", err)
- }
-
- // Swap contract in resolvers list. This is to
- // make sure that reports are queried from the
- // new resolver.
- err = c.replaceResolver(
- currentContract, nextContract,
- )
- if err != nil {
- log.Errorf("unable to replace "+
- "contract: %v", err)
- }
-
- // As this contract produced another, we'll
- // re-assign, so we can continue our resolution
- // loop.
- currentContract = nextContract
-
- // If this contract is actually fully resolved, then
- // we'll mark it as such within the database.
- case currentContract.IsResolved():
- log.Debugf("ChannelArbitrator(%v): marking "+
- "contract %T fully resolved",
- c.cfg.ChanPoint, currentContract)
-
- err := c.log.ResolveContract(currentContract)
- if err != nil {
- log.Errorf("unable to resolve contract: %v",
- err)
- }
-
- // Now that the contract has been resolved,
- // well signal to the main goroutine.
- select {
- case c.resolutionSignal <- struct{}{}:
- case <-c.quit:
- return
- }
- }
-
- }
- }
-}
-
-// signalUpdateMsg is a struct that carries fresh signals to the
-// ChannelArbitrator. We need to receive a message like this each time the
-// channel becomes active, as it's internal state may change.
-type signalUpdateMsg struct {
- // newSignals is the set of new active signals to be sent to the
- // arbitrator.
- newSignals *ContractSignals
-
- // doneChan is a channel that will be closed on the arbitrator has
- // attached the new signals.
- doneChan chan struct{}
-}
-
-// UpdateContractSignals updates the set of signals the ChannelArbitrator needs
-// to receive from a channel in real-time in order to keep in sync with the
-// latest state of the contract.
-func (c *ChannelArbitrator) UpdateContractSignals(newSignals *ContractSignals) {
- done := make(chan struct{})
-
- select {
- case c.signalUpdates <- &signalUpdateMsg{
- newSignals: newSignals,
- doneChan: done,
- }:
- case <-c.quit:
- }
-
- select {
- case <-done:
- case <-c.quit:
- }
-}
-
-// channelAttendant is the primary goroutine that acts at the judicial
-// arbitrator between our channel state, the remote channel peer, and the
-// blockchain (Our judge). This goroutine will ensure that we faithfully execute
-// all clauses of our contract in the case that we need to go on-chain for a
-// dispute. Currently, two such conditions warrant our intervention: when an
-// outgoing HTLC is about to timeout, and when we know the pre-image for an
-// incoming HTLC, but it hasn't yet been settled off-chain. In these cases,
-// we'll: broadcast our commitment, cancel/settle any HTLC's backwards after
-// sufficient confirmation, and finally send our set of outputs to the UTXO
-// Nursery for incubation, and ultimate sweeping.
-//
-// NOTE: This MUST be run as a goroutine.
-func (c *ChannelArbitrator) channelAttendant(bestHeight int32) {
-
- // TODO(roasbeef): tell top chain arb we're done
- defer func() {
- c.wg.Done()
- }()
-
- for {
- select {
-
- // A new block has arrived, we'll examine all the active HTLC's
- // to see if any of them have expired, and also update our
- // track of the best current height.
- case blockHeight, ok := <-c.blocks:
- if !ok {
- return
- }
- bestHeight = blockHeight
-
- // If we're not in the default state, then we can
- // ignore this signal as we're waiting for contract
- // resolution.
- if c.state != StateDefault {
- continue
- }
-
- // Now that a new block has arrived, we'll attempt to
- // advance our state forward.
- nextState, _, err := c.advanceState(
- uint32(bestHeight), chainTrigger, nil,
- )
- if err != nil {
- log.Errorf("Unable to advance state: %v", err)
- }
-
- // If as a result of this trigger, the contract is
- // fully resolved, then well exit.
- if nextState == StateFullyResolved {
- return
- }
-
- // A new signal update was just sent. This indicates that the
- // channel under watch is now live, and may modify its internal
- // state, so we'll get the most up to date signals to we can
- // properly do our job.
- case signalUpdate := <-c.signalUpdates:
- log.Tracef("ChannelArbitrator(%v) got new signal "+
- "update!", c.cfg.ChanPoint)
-
- // First, we'll update our set of signals.
- c.htlcUpdates = signalUpdate.newSignals.HtlcUpdates
- c.cfg.ShortChanID = signalUpdate.newSignals.ShortChanID
-
- // Now that the signals have been updated, we'll now
- // close the done channel to signal to the caller we've
- // registered the new contracts.
- close(signalUpdate.doneChan)
-
- // A new set of HTLC's has been added or removed from the
- // commitment transaction. So we'll update our activeHTLCs map
- // accordingly.
- case htlcUpdate := <-c.htlcUpdates:
- // We'll wipe out our old set of HTLC's for each
- // htlcSetKey type included in this update in order to
- // only monitor the HTLCs that are still active on this
- // target commitment.
- c.activeHTLCs[htlcUpdate.HtlcKey] = newHtlcSet(
- htlcUpdate.Htlcs,
- )
-
- log.Tracef("ChannelArbitrator(%v): fresh set of htlcs=%v",
- c.cfg.ChanPoint,
- log.C(func() string {
- return spew.Sdump(htlcUpdate)
- }),
- )
-
- // We've cooperatively closed the channel, so we're no longer
- // needed. We'll mark the channel as resolved and exit.
- case closeInfo := <-c.cfg.ChainEvents.CooperativeClosure:
- log.Infof("ChannelArbitrator(%v) marking channel "+
- "cooperatively closed", c.cfg.ChanPoint)
-
- err := c.cfg.MarkChannelClosed(
- closeInfo.ChannelCloseSummary,
- channeldb.ChanStatusCoopBroadcasted,
- )
- if err != nil {
- log.Errorf("Unable to mark channel closed: "+
- "%v", err)
- return
- }
-
- // We'll now advance our state machine until it reaches
- // a terminal state, and the channel is marked resolved.
- _, _, err = c.advanceState(
- closeInfo.CloseHeight, coopCloseTrigger, nil,
- )
- if err != nil {
- log.Errorf("Unable to advance state: %v", err)
- return
- }
-
- // We have broadcasted our commitment, and it is now confirmed
- // on-chain.
- case closeInfo := <-c.cfg.ChainEvents.LocalUnilateralClosure:
- log.Infof("ChannelArbitrator(%v): local on-chain "+
- "channel close", c.cfg.ChanPoint)
-
- if c.state != StateCommitmentBroadcasted {
- log.Errorf("ChannelArbitrator(%v): unexpected "+
- "local on-chain channel close",
- c.cfg.ChanPoint)
- }
- closeTx := closeInfo.CloseTx
-
- contractRes := &ContractResolutions{
- CommitHash: closeTx.TxHash(),
- CommitResolution: closeInfo.CommitResolution,
- HtlcResolutions: *closeInfo.HtlcResolutions,
- AnchorResolution: closeInfo.AnchorResolution,
- }
-
- // When processing a unilateral close event, we'll
- // transition to the ContractClosed state. We'll log
- // out the set of resolutions such that they are
- // available to fetch in that state, we'll also write
- // the commit set so we can reconstruct our chain
- // actions on restart.
- err := c.log.LogContractResolutions(contractRes)
- if err != nil {
- log.Errorf("Unable to write resolutions: %v",
- err)
- return
- }
- err = c.log.InsertConfirmedCommitSet(
- &closeInfo.CommitSet,
- )
- if err != nil {
- log.Errorf("Unable to write commit set: %v",
- err)
- return
- }
-
- // After the set of resolutions are successfully
- // logged, we can safely close the channel. After this
- // succeeds we won't be getting chain events anymore,
- // so we must make sure we can recover on restart after
- // it is marked closed. If the next state transition
- // fails, we'll start up in the prior state again, and
- // we won't be longer getting chain events. In this
- // case we must manually re-trigger the state
- // transition into StateContractClosed based on the
- // close status of the channel.
- err = c.cfg.MarkChannelClosed(
- closeInfo.ChannelCloseSummary,
- channeldb.ChanStatusLocalCloseInitiator,
- )
- if err != nil {
- log.Errorf("Unable to mark "+
- "channel closed: %v", err)
- return
- }
-
- // We'll now advance our state machine until it reaches
- // a terminal state.
- _, _, err = c.advanceState(
- uint32(closeInfo.SpendingHeight),
- localCloseTrigger, &closeInfo.CommitSet,
- )
- if err != nil {
- log.Errorf("Unable to advance state: %v", err)
- }
-
- // The remote party has broadcast the commitment on-chain.
- // We'll examine our state to determine if we need to act at
- // all.
- case uniClosure := <-c.cfg.ChainEvents.RemoteUnilateralClosure:
- log.Infof("ChannelArbitrator(%v): remote party has "+
- "closed channel out on-chain", c.cfg.ChanPoint)
-
- // If we don't have a self output, and there are no
- // active HTLC's, then we can immediately mark the
- // contract as fully resolved and exit.
- contractRes := &ContractResolutions{
- CommitHash: *uniClosure.SpenderTxHash,
- CommitResolution: uniClosure.CommitResolution,
- HtlcResolutions: *uniClosure.HtlcResolutions,
- AnchorResolution: uniClosure.AnchorResolution,
- }
-
- // When processing a unilateral close event, we'll
- // transition to the ContractClosed state. We'll log
- // out the set of resolutions such that they are
- // available to fetch in that state, we'll also write
- // the commit set so we can reconstruct our chain
- // actions on restart.
- err := c.log.LogContractResolutions(contractRes)
- if err != nil {
- log.Errorf("Unable to write resolutions: %v",
- err)
- return
- }
- err = c.log.InsertConfirmedCommitSet(
- &uniClosure.CommitSet,
- )
- if err != nil {
- log.Errorf("Unable to write commit set: %v",
- err)
- return
- }
-
- // After the set of resolutions are successfully
- // logged, we can safely close the channel. After this
- // succeeds we won't be getting chain events anymore,
- // so we must make sure we can recover on restart after
- // it is marked closed. If the next state transition
- // fails, we'll start up in the prior state again, and
- // we won't be longer getting chain events. In this
- // case we must manually re-trigger the state
- // transition into StateContractClosed based on the
- // close status of the channel.
- closeSummary := &uniClosure.ChannelCloseSummary
- err = c.cfg.MarkChannelClosed(
- closeSummary,
- channeldb.ChanStatusRemoteCloseInitiator,
- )
- if err != nil {
- log.Errorf("Unable to mark channel closed: %v",
- err)
- return
- }
-
- // We'll now advance our state machine until it reaches
- // a terminal state.
- _, _, err = c.advanceState(
- uint32(uniClosure.SpendingHeight),
- remoteCloseTrigger, &uniClosure.CommitSet,
- )
- if err != nil {
- log.Errorf("Unable to advance state: %v", err)
- }
-
- // The remote has breached the channel. As this is handled by
- // the ChainWatcher and BreachArbiter, we don't have to do
- // anything in particular, so just advance our state and
- // gracefully exit.
- case <-c.cfg.ChainEvents.ContractBreach:
- log.Infof("ChannelArbitrator(%v): remote party has "+
- "breached channel!", c.cfg.ChanPoint)
-
- // We'll advance our state machine until it reaches a
- // terminal state.
- _, _, err := c.advanceState(
- uint32(bestHeight), breachCloseTrigger, nil,
- )
- if err != nil {
- log.Errorf("Unable to advance state: %v", err)
- }
-
- // A new contract has just been resolved, we'll now check our
- // log to see if all contracts have been resolved. If so, then
- // we can exit as the contract is fully resolved.
- case <-c.resolutionSignal:
- log.Infof("ChannelArbitrator(%v): a contract has been "+
- "fully resolved!", c.cfg.ChanPoint)
-
- nextState, _, err := c.advanceState(
- uint32(bestHeight), chainTrigger, nil,
- )
- if err != nil {
- log.Errorf("Unable to advance state: %v", err)
- }
-
- // If we don't have anything further to do after
- // advancing our state, then we'll exit.
- if nextState == StateFullyResolved {
- log.Infof("ChannelArbitrator(%v): all "+
- "contracts fully resolved, exiting",
- c.cfg.ChanPoint)
-
- return
- }
-
- // We've just received a request to forcibly close out the
- // channel. We'll
- case closeReq := <-c.forceCloseReqs:
- if c.state != StateDefault {
- select {
- case closeReq.closeTx <- nil:
- case <-c.quit:
- }
-
- select {
- case closeReq.errResp <- errAlreadyForceClosed.Default():
- case <-c.quit:
- }
-
- continue
- }
-
- nextState, closeTx, err := c.advanceState(
- uint32(bestHeight), userTrigger, nil,
- )
- if err != nil {
- log.Errorf("Unable to advance state: %v", err)
- }
-
- select {
- case closeReq.closeTx <- closeTx:
- case <-c.quit:
- return
- }
-
- select {
- case closeReq.errResp <- err:
- case <-c.quit:
- return
- }
-
- // If we don't have anything further to do after
- // advancing our state, then we'll exit.
- if nextState == StateFullyResolved {
- log.Infof("ChannelArbitrator(%v): all "+
- "contracts resolved, exiting",
- c.cfg.ChanPoint)
- return
- }
-
- case <-c.quit:
- return
- }
- }
-}
diff --git a/lnd/contractcourt/channel_arbitrator_test.go b/lnd/contractcourt/channel_arbitrator_test.go
deleted file mode 100644
index ecd00638..00000000
--- a/lnd/contractcourt/channel_arbitrator_test.go
+++ /dev/null
@@ -1,2307 +0,0 @@
-package contractcourt
-
-import (
- "fmt"
- "io/ioutil"
- "os"
- "path/filepath"
- "reflect"
- "sync"
- "testing"
- "time"
-
- "github.com/pkt-cash/pktd/btcutil"
- "github.com/pkt-cash/pktd/btcutil/er"
- "github.com/pkt-cash/pktd/chaincfg/chainhash"
- "github.com/pkt-cash/pktd/chaincfg/globalcfg"
- "github.com/pkt-cash/pktd/lnd/chainntnfs"
- "github.com/pkt-cash/pktd/lnd/channeldb"
- "github.com/pkt-cash/pktd/lnd/channeldb/kvdb"
- "github.com/pkt-cash/pktd/lnd/clock"
- "github.com/pkt-cash/pktd/lnd/input"
- "github.com/pkt-cash/pktd/lnd/lntest/mock"
- "github.com/pkt-cash/pktd/lnd/lnwallet"
- "github.com/pkt-cash/pktd/lnd/lnwire"
- "github.com/pkt-cash/pktd/wire"
-)
-
-const (
- defaultTimeout = time.Second * 5
-
- // stateTimeout is the timeout we allow when waiting for state
- // transitions.
- stateTimeout = time.Second * 15
-)
-
-type mockArbitratorLog struct {
- state ArbitratorState
- newStates chan ArbitratorState
- failLog bool
- failFetch *er.ErrorCode
- failCommit bool
- failCommitState ArbitratorState
- resolutions *ContractResolutions
- resolvers map[ContractResolver]struct{}
-
- commitSet *CommitSet
-
- sync.Mutex
-}
-
-// A compile time check to ensure mockArbitratorLog meets the ArbitratorLog
-// interface.
-var _ ArbitratorLog = (*mockArbitratorLog)(nil)
-
-func (b *mockArbitratorLog) CurrentState(kvdb.RTx) (ArbitratorState, er.R) {
- return b.state, nil
-}
-
-func (b *mockArbitratorLog) CommitState(s ArbitratorState) er.R {
- if b.failCommit && s == b.failCommitState {
- return er.Errorf("intentional commit error at state %v",
- b.failCommitState)
- }
- b.state = s
- b.newStates <- s
- return nil
-}
-
-func (b *mockArbitratorLog) FetchUnresolvedContracts() ([]ContractResolver,
- er.R) {
-
- b.Lock()
- v := make([]ContractResolver, len(b.resolvers))
- idx := 0
- for resolver := range b.resolvers {
- v[idx] = resolver
- idx++
- }
- b.Unlock()
-
- return v, nil
-}
-
-func (b *mockArbitratorLog) InsertUnresolvedContracts(_ []*channeldb.ResolverReport,
- resolvers ...ContractResolver) er.R {
-
- b.Lock()
- for _, resolver := range resolvers {
- resKey := resolver.ResolverKey()
- if resKey == nil {
- continue
- }
-
- b.resolvers[resolver] = struct{}{}
- }
- b.Unlock()
- return nil
-}
-
-func (b *mockArbitratorLog) SwapContract(oldContract,
- newContract ContractResolver) er.R {
-
- b.Lock()
- delete(b.resolvers, oldContract)
- b.resolvers[newContract] = struct{}{}
- b.Unlock()
-
- return nil
-}
-
-func (b *mockArbitratorLog) ResolveContract(res ContractResolver) er.R {
- b.Lock()
- delete(b.resolvers, res)
- b.Unlock()
-
- return nil
-}
-
-func (b *mockArbitratorLog) LogContractResolutions(c *ContractResolutions) er.R {
- if b.failLog {
- return er.Errorf("intentional log failure")
- }
- b.resolutions = c
- return nil
-}
-
-func (b *mockArbitratorLog) FetchContractResolutions() (*ContractResolutions, er.R) {
- if b.failFetch != nil {
- return nil, b.failFetch.Default()
- }
-
- return b.resolutions, nil
-}
-
-func (b *mockArbitratorLog) FetchChainActions() (ChainActionMap, er.R) {
- return nil, nil
-}
-
-func (b *mockArbitratorLog) InsertConfirmedCommitSet(c *CommitSet) er.R {
- b.commitSet = c
- return nil
-}
-
-func (b *mockArbitratorLog) FetchConfirmedCommitSet(kvdb.RTx) (*CommitSet, er.R) {
- return b.commitSet, nil
-}
-
-func (b *mockArbitratorLog) WipeHistory() er.R {
- return nil
-}
-
-// testArbLog is a wrapper around an existing (ideally fully concrete
-// ArbitratorLog) that lets us intercept certain calls like transitioning to a
-// new state.
-type testArbLog struct {
- ArbitratorLog
-
- newStates chan ArbitratorState
-}
-
-func (t *testArbLog) CommitState(s ArbitratorState) er.R {
- if err := t.ArbitratorLog.CommitState(s); err != nil {
- return err
- }
-
- t.newStates <- s
-
- return nil
-}
-
-type mockChainIO struct{}
-
-var _ lnwallet.BlockChainIO = (*mockChainIO)(nil)
-
-func (*mockChainIO) GetBestBlock() (*chainhash.Hash, int32, er.R) {
- return nil, 0, nil
-}
-
-func (*mockChainIO) GetUtxo(op *wire.OutPoint, _ []byte,
- heightHint uint32, _ <-chan struct{}) (*wire.TxOut, er.R) {
- return nil, nil
-}
-
-func (*mockChainIO) GetBlockHash(blockHeight int64) (*chainhash.Hash, er.R) {
- return nil, nil
-}
-
-func (*mockChainIO) GetBlock(blockHash *chainhash.Hash) (*wire.MsgBlock, er.R) {
- return nil, nil
-}
-
-type chanArbTestCtx struct {
- t *testing.T
-
- chanArb *ChannelArbitrator
-
- cleanUp func()
-
- resolvedChan chan struct{}
-
- incubationRequests chan struct{}
-
- resolutions chan []ResolutionMsg
-
- log ArbitratorLog
-
- sweeper *mockSweeper
-}
-
-func (c *chanArbTestCtx) CleanUp() {
- if err := c.chanArb.Stop(); err != nil {
- c.t.Fatalf("unable to stop chan arb: %v", err)
- }
-
- if c.cleanUp != nil {
- c.cleanUp()
- }
-}
-
-// AssertStateTransitions asserts that the state machine steps through the
-// passed states in order.
-func (c *chanArbTestCtx) AssertStateTransitions(expectedStates ...ArbitratorState) {
- c.t.Helper()
-
- var newStatesChan chan ArbitratorState
- switch log := c.log.(type) {
- case *mockArbitratorLog:
- newStatesChan = log.newStates
-
- case *testArbLog:
- newStatesChan = log.newStates
-
- default:
- c.t.Fatalf("unable to assert state transitions with %T", log)
- }
-
- for _, exp := range expectedStates {
- var state ArbitratorState
- select {
- case state = <-newStatesChan:
- case <-time.After(defaultTimeout):
- c.t.Fatalf("new state not received")
- }
-
- if state != exp {
- c.t.Fatalf("expected new state %v, got %v", exp, state)
- }
- }
-}
-
-// AssertState checks that the ChannelArbitrator is in the state we expect it
-// to be.
-func (c *chanArbTestCtx) AssertState(expected ArbitratorState) {
- if c.chanArb.state != expected {
- c.t.Fatalf("expected state %v, was %v", expected, c.chanArb.state)
- }
-}
-
-// Restart simulates a clean restart of the channel arbitrator, forcing it to
-// walk through it's recovery logic. If this function returns nil, then a
-// restart was successful. Note that the restart process keeps the log in
-// place, in order to simulate proper persistence of the log. The caller can
-// optionally provide a restart closure which will be executed before the
-// resolver is started again, but after it is created.
-func (c *chanArbTestCtx) Restart(restartClosure func(*chanArbTestCtx)) (*chanArbTestCtx, er.R) {
- if err := c.chanArb.Stop(); err != nil {
- return nil, err
- }
-
- newCtx, err := createTestChannelArbitrator(c.t, c.log)
- if err != nil {
- return nil, err
- }
-
- if restartClosure != nil {
- restartClosure(newCtx)
- }
-
- if err := newCtx.chanArb.Start(nil); err != nil {
- return nil, err
- }
-
- return newCtx, nil
-}
-
-// testChanArbOption applies custom settings to a channel arbitrator config for
-// testing purposes.
-type testChanArbOption func(cfg *ChannelArbitratorConfig)
-
-// remoteInitiatorOption sets the MarkChannelClosed function in the
-// Channel Arbitrator's config.
-func withMarkClosed(markClosed func(*channeldb.ChannelCloseSummary,
- ...channeldb.ChannelStatus) er.R) testChanArbOption {
-
- return func(cfg *ChannelArbitratorConfig) {
- cfg.MarkChannelClosed = markClosed
- }
-}
-
-// createTestChannelArbitrator returns a channel arbitrator test context which
-// contains a channel arbitrator with default values. These values can be
-// changed by providing options which overwrite the default config.
-func createTestChannelArbitrator(t *testing.T, log ArbitratorLog,
- opts ...testChanArbOption) (*chanArbTestCtx, er.R) {
-
- chanPoint := wire.OutPoint{}
- shortChanID := lnwire.ShortChannelID{}
- chanEvents := &ChainEventSubscription{
- RemoteUnilateralClosure: make(chan *RemoteUnilateralCloseInfo, 1),
- LocalUnilateralClosure: make(chan *LocalUnilateralCloseInfo, 1),
- CooperativeClosure: make(chan *CooperativeCloseInfo, 1),
- ContractBreach: make(chan *lnwallet.BreachRetribution, 1),
- }
-
- resolutionChan := make(chan []ResolutionMsg, 1)
- incubateChan := make(chan struct{})
-
- chainIO := &mockChainIO{}
- mockSweeper := newMockSweeper()
- chainArbCfg := ChainArbitratorConfig{
- ChainIO: chainIO,
- PublishTx: func(*wire.MsgTx, string) er.R {
- return nil
- },
- DeliverResolutionMsg: func(msgs ...ResolutionMsg) er.R {
- resolutionChan <- msgs
- return nil
- },
- OutgoingBroadcastDelta: 5,
- IncomingBroadcastDelta: 5,
- Notifier: &mock.ChainNotifier{
- EpochChan: make(chan *chainntnfs.BlockEpoch),
- SpendChan: make(chan *chainntnfs.SpendDetail),
- ConfChan: make(chan *chainntnfs.TxConfirmation),
- },
- IncubateOutputs: func(wire.OutPoint,
- *lnwallet.OutgoingHtlcResolution,
- *lnwallet.IncomingHtlcResolution, uint32) er.R {
-
- incubateChan <- struct{}{}
- return nil
- },
- OnionProcessor: &mockOnionProcessor{},
- IsForwardedHTLC: func(chanID lnwire.ShortChannelID,
- htlcIndex uint64) bool {
-
- return true
- },
- Clock: clock.NewDefaultClock(),
- Sweeper: mockSweeper,
- }
-
- // We'll use the resolvedChan to synchronize on call to
- // MarkChannelResolved.
- resolvedChan := make(chan struct{}, 1)
-
- // Next we'll create the matching configuration struct that contains
- // all interfaces and methods the arbitrator needs to do its job.
- arbCfg := &ChannelArbitratorConfig{
- ChanPoint: chanPoint,
- ShortChanID: shortChanID,
- MarkChannelResolved: func() er.R {
- resolvedChan <- struct{}{}
- return nil
- },
- Channel: &mockChannel{},
- MarkCommitmentBroadcasted: func(_ *wire.MsgTx, _ bool) er.R {
- return nil
- },
- MarkChannelClosed: func(*channeldb.ChannelCloseSummary,
- ...channeldb.ChannelStatus) er.R {
- return nil
- },
- IsPendingClose: false,
- ChainArbitratorConfig: chainArbCfg,
- ChainEvents: chanEvents,
- PutResolverReport: func(_ kvdb.RwTx,
- _ *channeldb.ResolverReport) er.R {
-
- return nil
- },
- }
-
- // Apply all custom options to the config struct.
- for _, option := range opts {
- option(arbCfg)
- }
-
- var cleanUp func()
- if log == nil {
- dbDir, errr := ioutil.TempDir("", "chanArb")
- if errr != nil {
- return nil, er.E(errr)
- }
- dbPath := filepath.Join(dbDir, "testdb")
- db, err := kvdb.Create(kvdb.BoltBackendName, dbPath, true)
- if err != nil {
- return nil, err
- }
-
- backingLog, err := newBoltArbitratorLog(
- db, *arbCfg, chainhash.Hash{}, chanPoint,
- )
- if err != nil {
- return nil, err
- }
- cleanUp = func() {
- db.Close()
- os.RemoveAll(dbDir)
- }
-
- log = &testArbLog{
- ArbitratorLog: backingLog,
- newStates: make(chan ArbitratorState),
- }
- }
-
- htlcSets := make(map[HtlcSetKey]htlcSet)
-
- chanArb := NewChannelArbitrator(*arbCfg, htlcSets, log)
-
- return &chanArbTestCtx{
- t: t,
- chanArb: chanArb,
- cleanUp: cleanUp,
- resolvedChan: resolvedChan,
- resolutions: resolutionChan,
- log: log,
- incubationRequests: incubateChan,
- sweeper: mockSweeper,
- }, nil
-}
-
-// TestChannelArbitratorCooperativeClose tests that the ChannelArbitertor
-// correctly marks the channel resolved in case a cooperative close is
-// confirmed.
-func TestChannelArbitratorCooperativeClose(t *testing.T) {
- log := &mockArbitratorLog{
- state: StateDefault,
- newStates: make(chan ArbitratorState, 5),
- }
-
- chanArbCtx, err := createTestChannelArbitrator(t, log)
- if err != nil {
- t.Fatalf("unable to create ChannelArbitrator: %v", err)
- }
-
- if err := chanArbCtx.chanArb.Start(nil); err != nil {
- t.Fatalf("unable to start ChannelArbitrator: %v", err)
- }
- defer func() {
- if err := chanArbCtx.chanArb.Stop(); err != nil {
- t.Fatalf("unable to stop chan arb: %v", err)
- }
- }()
-
- // It should start out in the default state.
- chanArbCtx.AssertState(StateDefault)
-
- // We set up a channel to detect when MarkChannelClosed is called.
- closeInfos := make(chan *channeldb.ChannelCloseSummary)
- chanArbCtx.chanArb.cfg.MarkChannelClosed = func(
- closeInfo *channeldb.ChannelCloseSummary,
- statuses ...channeldb.ChannelStatus) er.R {
-
- closeInfos <- closeInfo
- return nil
- }
-
- // Cooperative close should do trigger a MarkChannelClosed +
- // MarkChannelResolved.
- closeInfo := &CooperativeCloseInfo{
- &channeldb.ChannelCloseSummary{},
- }
- chanArbCtx.chanArb.cfg.ChainEvents.CooperativeClosure <- closeInfo
-
- select {
- case c := <-closeInfos:
- if c.CloseType != channeldb.CooperativeClose {
- t.Fatalf("expected cooperative close, got %v", c.CloseType)
- }
- case <-time.After(defaultTimeout):
- t.Fatalf("timeout waiting for channel close")
- }
-
- // It should mark the channel as resolved.
- select {
- case <-chanArbCtx.resolvedChan:
- // Expected.
- case <-time.After(defaultTimeout):
- t.Fatalf("contract was not resolved")
- }
-}
-
-// TestChannelArbitratorRemoteForceClose checks that the ChannelArbitrator goes
-// through the expected states if a remote force close is observed in the
-// chain.
-func TestChannelArbitratorRemoteForceClose(t *testing.T) {
- log := &mockArbitratorLog{
- state: StateDefault,
- newStates: make(chan ArbitratorState, 5),
- }
-
- chanArbCtx, err := createTestChannelArbitrator(t, log)
- if err != nil {
- t.Fatalf("unable to create ChannelArbitrator: %v", err)
- }
- chanArb := chanArbCtx.chanArb
-
- if err := chanArb.Start(nil); err != nil {
- t.Fatalf("unable to start ChannelArbitrator: %v", err)
- }
- defer chanArb.Stop()
-
- // It should start out in the default state.
- chanArbCtx.AssertState(StateDefault)
-
- // Send a remote force close event.
- commitSpend := &chainntnfs.SpendDetail{
- SpenderTxHash: &chainhash.Hash{},
- }
-
- uniClose := &lnwallet.UnilateralCloseSummary{
- SpendDetail: commitSpend,
- HtlcResolutions: &lnwallet.HtlcResolutions{},
- }
- chanArb.cfg.ChainEvents.RemoteUnilateralClosure <- &RemoteUnilateralCloseInfo{
- UnilateralCloseSummary: uniClose,
- CommitSet: CommitSet{
- ConfCommitKey: &RemoteHtlcSet,
- HtlcSets: make(map[HtlcSetKey][]channeldb.HTLC),
- },
- }
-
- // It should transition StateDefault -> StateContractClosed ->
- // StateFullyResolved.
- chanArbCtx.AssertStateTransitions(
- StateContractClosed, StateFullyResolved,
- )
-
- // It should also mark the channel as resolved.
- select {
- case <-chanArbCtx.resolvedChan:
- // Expected.
- case <-time.After(defaultTimeout):
- t.Fatalf("contract was not resolved")
- }
-}
-
-// TestChannelArbitratorLocalForceClose tests that the ChannelArbitrator goes
-// through the expected states in case we request it to force close the channel,
-// and the local force close event is observed in chain.
-func TestChannelArbitratorLocalForceClose(t *testing.T) {
- log := &mockArbitratorLog{
- state: StateDefault,
- newStates: make(chan ArbitratorState, 5),
- }
-
- chanArbCtx, err := createTestChannelArbitrator(t, log)
- if err != nil {
- t.Fatalf("unable to create ChannelArbitrator: %v", err)
- }
- chanArb := chanArbCtx.chanArb
-
- if err := chanArb.Start(nil); err != nil {
- t.Fatalf("unable to start ChannelArbitrator: %v", err)
- }
- defer chanArb.Stop()
-
- // It should start out in the default state.
- chanArbCtx.AssertState(StateDefault)
-
- // We create a channel we can use to pause the ChannelArbitrator at the
- // point where it broadcasts the close tx, and check its state.
- stateChan := make(chan ArbitratorState)
- chanArb.cfg.PublishTx = func(*wire.MsgTx, string) er.R {
- // When the force close tx is being broadcasted, check that the
- // state is correct at that point.
- select {
- case stateChan <- chanArb.state:
- case <-chanArb.quit:
- return er.Errorf("exiting")
- }
- return nil
- }
-
- errChan := make(chan er.R, 1)
- respChan := make(chan *wire.MsgTx, 1)
-
- // With the channel found, and the request crafted, we'll send over a
- // force close request to the arbitrator that watches this channel.
- chanArb.forceCloseReqs <- &forceCloseReq{
- errResp: errChan,
- closeTx: respChan,
- }
-
- // It should transition to StateBroadcastCommit.
- chanArbCtx.AssertStateTransitions(StateBroadcastCommit)
-
- // When it is broadcasting the force close, its state should be
- // StateBroadcastCommit.
- select {
- case state := <-stateChan:
- if state != StateBroadcastCommit {
- t.Fatalf("state during PublishTx was %v", state)
- }
- case <-time.After(stateTimeout):
- t.Fatalf("did not get state update")
- }
-
- // After broadcasting, transition should be to
- // StateCommitmentBroadcasted.
- chanArbCtx.AssertStateTransitions(StateCommitmentBroadcasted)
-
- select {
- case <-respChan:
- case <-time.After(defaultTimeout):
- t.Fatalf("no response received")
- }
-
- select {
- case err := <-errChan:
- if err != nil {
- t.Fatalf("error force closing channel: %v", err)
- }
- case <-time.After(defaultTimeout):
- t.Fatalf("no response received")
- }
-
- // After broadcasting the close tx, it should be in state
- // StateCommitmentBroadcasted.
- chanArbCtx.AssertState(StateCommitmentBroadcasted)
-
- // Now notify about the local force close getting confirmed.
- chanArb.cfg.ChainEvents.LocalUnilateralClosure <- &LocalUnilateralCloseInfo{
- SpendDetail: &chainntnfs.SpendDetail{},
- LocalForceCloseSummary: &lnwallet.LocalForceCloseSummary{
- CloseTx: &wire.MsgTx{},
- HtlcResolutions: &lnwallet.HtlcResolutions{},
- },
- ChannelCloseSummary: &channeldb.ChannelCloseSummary{},
- }
-
- // It should transition StateContractClosed -> StateFullyResolved.
- chanArbCtx.AssertStateTransitions(StateContractClosed, StateFullyResolved)
-
- // It should also mark the channel as resolved.
- select {
- case <-chanArbCtx.resolvedChan:
- // Expected.
- case <-time.After(defaultTimeout):
- t.Fatalf("contract was not resolved")
- }
-}
-
-// TestChannelArbitratorBreachClose tests that the ChannelArbitrator goes
-// through the expected states in case we notice a breach in the chain, and
-// gracefully exits.
-func TestChannelArbitratorBreachClose(t *testing.T) {
- log := &mockArbitratorLog{
- state: StateDefault,
- newStates: make(chan ArbitratorState, 5),
- }
-
- chanArbCtx, err := createTestChannelArbitrator(t, log)
- if err != nil {
- t.Fatalf("unable to create ChannelArbitrator: %v", err)
- }
- chanArb := chanArbCtx.chanArb
-
- if err := chanArb.Start(nil); err != nil {
- t.Fatalf("unable to start ChannelArbitrator: %v", err)
- }
- defer func() {
- if err := chanArb.Stop(); err != nil {
- t.Fatal(err)
- }
- }()
-
- // It should start out in the default state.
- chanArbCtx.AssertState(StateDefault)
-
- // Send a breach close event.
- chanArb.cfg.ChainEvents.ContractBreach <- &lnwallet.BreachRetribution{}
-
- // It should transition StateDefault -> StateFullyResolved.
- chanArbCtx.AssertStateTransitions(
- StateFullyResolved,
- )
-
- // It should also mark the channel as resolved.
- select {
- case <-chanArbCtx.resolvedChan:
- // Expected.
- case <-time.After(defaultTimeout):
- t.Fatalf("contract was not resolved")
- }
-}
-
-// TestChannelArbitratorLocalForceClosePendingHtlc tests that the
-// ChannelArbitrator goes through the expected states in case we request it to
-// force close a channel that still has an HTLC pending.
-func TestChannelArbitratorLocalForceClosePendingHtlc(t *testing.T) {
- // We create a new test context for this channel arb, notice that we
- // pass in a nil ArbitratorLog which means that a default one backed by
- // a real DB will be created. We need this for our test as we want to
- // test proper restart recovery and resolver population.
- chanArbCtx, err := createTestChannelArbitrator(t, nil)
- if err != nil {
- t.Fatalf("unable to create ChannelArbitrator: %v", err)
- }
- chanArb := chanArbCtx.chanArb
- chanArb.cfg.PreimageDB = newMockWitnessBeacon()
- chanArb.cfg.Registry = &mockRegistry{}
-
- if err := chanArb.Start(nil); err != nil {
- t.Fatalf("unable to start ChannelArbitrator: %v", err)
- }
- defer chanArb.Stop()
-
- // Create htlcUpdates channel.
- htlcUpdates := make(chan *ContractUpdate)
-
- signals := &ContractSignals{
- HtlcUpdates: htlcUpdates,
- ShortChanID: lnwire.ShortChannelID{},
- }
- chanArb.UpdateContractSignals(signals)
-
- // Add HTLC to channel arbitrator.
- htlcAmt := 10000
- htlc := channeldb.HTLC{
- Incoming: false,
- Amt: lnwire.MilliSatoshi(htlcAmt),
- HtlcIndex: 99,
- }
-
- outgoingDustHtlc := channeldb.HTLC{
- Incoming: false,
- Amt: 100,
- HtlcIndex: 100,
- OutputIndex: -1,
- }
-
- incomingDustHtlc := channeldb.HTLC{
- Incoming: true,
- Amt: 105,
- HtlcIndex: 101,
- OutputIndex: -1,
- }
-
- htlcSet := []channeldb.HTLC{
- htlc, outgoingDustHtlc, incomingDustHtlc,
- }
-
- htlcUpdates <- &ContractUpdate{
- HtlcKey: LocalHtlcSet,
- Htlcs: htlcSet,
- }
-
- errChan := make(chan er.R, 1)
- respChan := make(chan *wire.MsgTx, 1)
-
- // With the channel found, and the request crafted, we'll send over a
- // force close request to the arbitrator that watches this channel.
- chanArb.forceCloseReqs <- &forceCloseReq{
- errResp: errChan,
- closeTx: respChan,
- }
-
- // The force close request should trigger broadcast of the commitment
- // transaction.
- chanArbCtx.AssertStateTransitions(
- StateBroadcastCommit,
- StateCommitmentBroadcasted,
- )
- select {
- case <-respChan:
- case <-time.After(defaultTimeout):
- t.Fatalf("no response received")
- }
-
- select {
- case err := <-errChan:
- if err != nil {
- t.Fatalf("error force closing channel: %v", err)
- }
- case <-time.After(defaultTimeout):
- t.Fatalf("no response received")
- }
-
- // Now notify about the local force close getting confirmed.
- closeTx := &wire.MsgTx{
- TxIn: []*wire.TxIn{
- {
- PreviousOutPoint: wire.OutPoint{},
- Witness: [][]byte{
- {0x1},
- {0x2},
- },
- },
- },
- }
-
- htlcOp := wire.OutPoint{
- Hash: closeTx.TxHash(),
- Index: 0,
- }
-
- // Set up the outgoing resolution. Populate SignedTimeoutTx because our
- // commitment transaction got confirmed.
- outgoingRes := lnwallet.OutgoingHtlcResolution{
- Expiry: 10,
- SweepSignDesc: input.SignDescriptor{
- Output: &wire.TxOut{},
- },
- SignedTimeoutTx: &wire.MsgTx{
- TxIn: []*wire.TxIn{
- {
- PreviousOutPoint: htlcOp,
- Witness: [][]byte{{}},
- },
- },
- TxOut: []*wire.TxOut{
- {},
- },
- },
- }
-
- chanArb.cfg.ChainEvents.LocalUnilateralClosure <- &LocalUnilateralCloseInfo{
- SpendDetail: &chainntnfs.SpendDetail{},
- LocalForceCloseSummary: &lnwallet.LocalForceCloseSummary{
- CloseTx: closeTx,
- HtlcResolutions: &lnwallet.HtlcResolutions{
- OutgoingHTLCs: []lnwallet.OutgoingHtlcResolution{
- outgoingRes,
- },
- },
- },
- ChannelCloseSummary: &channeldb.ChannelCloseSummary{},
- CommitSet: CommitSet{
- ConfCommitKey: &LocalHtlcSet,
- HtlcSets: map[HtlcSetKey][]channeldb.HTLC{
- LocalHtlcSet: htlcSet,
- },
- },
- }
-
- chanArbCtx.AssertStateTransitions(
- StateContractClosed,
- StateWaitingFullResolution,
- )
-
- // We expect an immediate resolution message for the outgoing dust htlc.
- // It is not resolvable on-chain.
- select {
- case msgs := <-chanArbCtx.resolutions:
- if len(msgs) != 1 {
- t.Fatalf("expected 1 message, instead got %v", len(msgs))
- }
-
- if msgs[0].HtlcIndex != outgoingDustHtlc.HtlcIndex {
- t.Fatalf("wrong htlc index: expected %v, got %v",
- outgoingDustHtlc.HtlcIndex, msgs[0].HtlcIndex)
- }
- case <-time.After(defaultTimeout):
- t.Fatalf("resolution msgs not sent")
- }
-
- // We'll grab the old notifier here as our resolvers are still holding
- // a reference to this instance, and a new one will be created when we
- // restart the channel arb below.
- oldNotifier := chanArb.cfg.Notifier.(*mock.ChainNotifier)
-
- // At this point, in order to simulate a restart, we'll re-create the
- // channel arbitrator. We do this to ensure that all information
- // required to properly resolve this HTLC are populated.
- if err := chanArb.Stop(); err != nil {
- t.Fatalf("unable to stop chan arb: %v", err)
- }
-
- // We'll no re-create the resolver, notice that we use the existing
- // arbLog so it carries over the same on-disk state.
- chanArbCtxNew, err := chanArbCtx.Restart(nil)
- if err != nil {
- t.Fatalf("unable to create ChannelArbitrator: %v", err)
- }
- chanArb = chanArbCtxNew.chanArb
- defer chanArbCtxNew.CleanUp()
-
- // Post restart, it should be the case that our resolver was properly
- // supplemented, and we only have a single resolver in the final set.
- if len(chanArb.activeResolvers) != 1 {
- t.Fatalf("expected single resolver, instead got: %v",
- len(chanArb.activeResolvers))
- }
-
- // We'll now examine the in-memory state of the active resolvers to
- // ensure t hey were populated properly.
- resolver := chanArb.activeResolvers[0]
- outgoingResolver, ok := resolver.(*htlcOutgoingContestResolver)
- if !ok {
- t.Fatalf("expected outgoing contest resolver, got %vT",
- resolver)
- }
-
- // The resolver should have its htlc amt field populated as it.
- if int64(outgoingResolver.htlc.Amt) != int64(htlcAmt) {
- t.Fatalf("wrong htlc amount: expected %v, got %v,",
- htlcAmt, int64(outgoingResolver.htlc.Amt))
- }
-
- // htlcOutgoingContestResolver is now active and waiting for the HTLC to
- // expire. It should not yet have passed it on for incubation.
- select {
- case <-chanArbCtx.incubationRequests:
- t.Fatalf("contract should not be incubated yet")
- default:
- }
-
- // Send a notification that the expiry height has been reached.
- oldNotifier.EpochChan <- &chainntnfs.BlockEpoch{Height: 10}
-
- // htlcOutgoingContestResolver is now transforming into a
- // htlcTimeoutResolver and should send the contract off for incubation.
- select {
- case <-chanArbCtx.incubationRequests:
- case <-time.After(defaultTimeout):
- t.Fatalf("no response received")
- }
-
- // Notify resolver that the HTLC output of the commitment has been
- // spent.
- oldNotifier.SpendChan <- &chainntnfs.SpendDetail{SpendingTx: closeTx}
-
- // Finally, we should also receive a resolution message instructing the
- // switch to cancel back the HTLC.
- select {
- case msgs := <-chanArbCtx.resolutions:
- if len(msgs) != 1 {
- t.Fatalf("expected 1 message, instead got %v", len(msgs))
- }
-
- if msgs[0].HtlcIndex != htlc.HtlcIndex {
- t.Fatalf("wrong htlc index: expected %v, got %v",
- htlc.HtlcIndex, msgs[0].HtlcIndex)
- }
- case <-time.After(defaultTimeout):
- t.Fatalf("resolution msgs not sent")
- }
-
- // As this is our own commitment transaction, the HTLC will go through
- // to the second level. Channel arbitrator should still not be marked
- // as resolved.
- select {
- case <-chanArbCtxNew.resolvedChan:
- t.Fatalf("channel resolved prematurely")
- default:
- }
-
- // Notify resolver that the second level transaction is spent.
- oldNotifier.SpendChan <- &chainntnfs.SpendDetail{SpendingTx: closeTx}
-
- // At this point channel should be marked as resolved.
- chanArbCtxNew.AssertStateTransitions(StateFullyResolved)
- select {
- case <-chanArbCtxNew.resolvedChan:
- case <-time.After(defaultTimeout):
- t.Fatalf("contract was not resolved")
- }
-}
-
-// TestChannelArbitratorLocalForceCloseRemoteConfiremd tests that the
-// ChannelArbitrator behaves as expected in the case where we request a local
-// force close, but a remote commitment ends up being confirmed in chain.
-func TestChannelArbitratorLocalForceCloseRemoteConfirmed(t *testing.T) {
- log := &mockArbitratorLog{
- state: StateDefault,
- newStates: make(chan ArbitratorState, 5),
- }
-
- chanArbCtx, err := createTestChannelArbitrator(t, log)
- if err != nil {
- t.Fatalf("unable to create ChannelArbitrator: %v", err)
- }
- chanArb := chanArbCtx.chanArb
-
- if err := chanArb.Start(nil); err != nil {
- t.Fatalf("unable to start ChannelArbitrator: %v", err)
- }
- defer chanArb.Stop()
-
- // It should start out in the default state.
- chanArbCtx.AssertState(StateDefault)
-
- // Create a channel we can use to assert the state when it publishes
- // the close tx.
- stateChan := make(chan ArbitratorState)
- chanArb.cfg.PublishTx = func(*wire.MsgTx, string) er.R {
- // When the force close tx is being broadcasted, check that the
- // state is correct at that point.
- select {
- case stateChan <- chanArb.state:
- case <-chanArb.quit:
- return er.Errorf("exiting")
- }
- return nil
- }
-
- errChan := make(chan er.R, 1)
- respChan := make(chan *wire.MsgTx, 1)
-
- // With the channel found, and the request crafted, we'll send over a
- // force close request to the arbitrator that watches this channel.
- chanArb.forceCloseReqs <- &forceCloseReq{
- errResp: errChan,
- closeTx: respChan,
- }
-
- // It should transition to StateBroadcastCommit.
- chanArbCtx.AssertStateTransitions(StateBroadcastCommit)
-
- // We expect it to be in state StateBroadcastCommit when publishing
- // the force close.
- select {
- case state := <-stateChan:
- if state != StateBroadcastCommit {
- t.Fatalf("state during PublishTx was %v", state)
- }
- case <-time.After(stateTimeout):
- t.Fatalf("no state update received")
- }
-
- // After broadcasting, transition should be to
- // StateCommitmentBroadcasted.
- chanArbCtx.AssertStateTransitions(StateCommitmentBroadcasted)
-
- // Wait for a response to the force close.
- select {
- case <-respChan:
- case <-time.After(defaultTimeout):
- t.Fatalf("no response received")
- }
-
- select {
- case err := <-errChan:
- if err != nil {
- t.Fatalf("error force closing channel: %v", err)
- }
- case <-time.After(defaultTimeout):
- t.Fatalf("no response received")
- }
-
- // The state should be StateCommitmentBroadcasted.
- chanArbCtx.AssertState(StateCommitmentBroadcasted)
-
- // Now notify about the _REMOTE_ commitment getting confirmed.
- commitSpend := &chainntnfs.SpendDetail{
- SpenderTxHash: &chainhash.Hash{},
- }
- uniClose := &lnwallet.UnilateralCloseSummary{
- SpendDetail: commitSpend,
- HtlcResolutions: &lnwallet.HtlcResolutions{},
- }
- chanArb.cfg.ChainEvents.RemoteUnilateralClosure <- &RemoteUnilateralCloseInfo{
- UnilateralCloseSummary: uniClose,
- }
-
- // It should transition StateContractClosed -> StateFullyResolved.
- chanArbCtx.AssertStateTransitions(StateContractClosed, StateFullyResolved)
-
- // It should resolve.
- select {
- case <-chanArbCtx.resolvedChan:
- // Expected.
- case <-time.After(stateTimeout):
- t.Fatalf("contract was not resolved")
- }
-}
-
-// TestChannelArbitratorLocalForceCloseDoubleSpend tests that the
-// ChannelArbitrator behaves as expected in the case where we request a local
-// force close, but we fail broadcasting our commitment because a remote
-// commitment has already been published.
-func TestChannelArbitratorLocalForceDoubleSpend(t *testing.T) {
- log := &mockArbitratorLog{
- state: StateDefault,
- newStates: make(chan ArbitratorState, 5),
- }
-
- chanArbCtx, err := createTestChannelArbitrator(t, log)
- if err != nil {
- t.Fatalf("unable to create ChannelArbitrator: %v", err)
- }
- chanArb := chanArbCtx.chanArb
-
- if err := chanArb.Start(nil); err != nil {
- t.Fatalf("unable to start ChannelArbitrator: %v", err)
- }
- defer chanArb.Stop()
-
- // It should start out in the default state.
- chanArbCtx.AssertState(StateDefault)
-
- // Return ErrDoubleSpend when attempting to publish the tx.
- stateChan := make(chan ArbitratorState)
- chanArb.cfg.PublishTx = func(*wire.MsgTx, string) er.R {
- // When the force close tx is being broadcasted, check that the
- // state is correct at that point.
- select {
- case stateChan <- chanArb.state:
- case <-chanArb.quit:
- return er.Errorf("exiting")
- }
- return lnwallet.ErrDoubleSpend.Default()
- }
-
- errChan := make(chan er.R, 1)
- respChan := make(chan *wire.MsgTx, 1)
-
- // With the channel found, and the request crafted, we'll send over a
- // force close request to the arbitrator that watches this channel.
- chanArb.forceCloseReqs <- &forceCloseReq{
- errResp: errChan,
- closeTx: respChan,
- }
-
- // It should transition to StateBroadcastCommit.
- chanArbCtx.AssertStateTransitions(StateBroadcastCommit)
-
- // We expect it to be in state StateBroadcastCommit when publishing
- // the force close.
- select {
- case state := <-stateChan:
- if state != StateBroadcastCommit {
- t.Fatalf("state during PublishTx was %v", state)
- }
- case <-time.After(stateTimeout):
- t.Fatalf("no state update received")
- }
-
- // After broadcasting, transition should be to
- // StateCommitmentBroadcasted.
- chanArbCtx.AssertStateTransitions(StateCommitmentBroadcasted)
-
- // Wait for a response to the force close.
- select {
- case <-respChan:
- case <-time.After(defaultTimeout):
- t.Fatalf("no response received")
- }
-
- select {
- case err := <-errChan:
- if err != nil {
- t.Fatalf("error force closing channel: %v", err)
- }
- case <-time.After(defaultTimeout):
- t.Fatalf("no response received")
- }
-
- // The state should be StateCommitmentBroadcasted.
- chanArbCtx.AssertState(StateCommitmentBroadcasted)
-
- // Now notify about the _REMOTE_ commitment getting confirmed.
- commitSpend := &chainntnfs.SpendDetail{
- SpenderTxHash: &chainhash.Hash{},
- }
- uniClose := &lnwallet.UnilateralCloseSummary{
- SpendDetail: commitSpend,
- HtlcResolutions: &lnwallet.HtlcResolutions{},
- }
- chanArb.cfg.ChainEvents.RemoteUnilateralClosure <- &RemoteUnilateralCloseInfo{
- UnilateralCloseSummary: uniClose,
- }
-
- // It should transition StateContractClosed -> StateFullyResolved.
- chanArbCtx.AssertStateTransitions(StateContractClosed, StateFullyResolved)
-
- // It should resolve.
- select {
- case <-chanArbCtx.resolvedChan:
- // Expected.
- case <-time.After(stateTimeout):
- t.Fatalf("contract was not resolved")
- }
-}
-
-// TestChannelArbitratorPersistence tests that the ChannelArbitrator is able to
-// keep advancing the state machine from various states after restart.
-func TestChannelArbitratorPersistence(t *testing.T) {
- // Start out with a log that will fail writing the set of resolutions.
- log := &mockArbitratorLog{
- state: StateDefault,
- newStates: make(chan ArbitratorState, 5),
- failLog: true,
- }
-
- chanArbCtx, err := createTestChannelArbitrator(t, log)
- if err != nil {
- t.Fatalf("unable to create ChannelArbitrator: %v", err)
- }
-
- chanArb := chanArbCtx.chanArb
- if err := chanArb.Start(nil); err != nil {
- t.Fatalf("unable to start ChannelArbitrator: %v", err)
- }
-
- // It should start in StateDefault.
- chanArbCtx.AssertState(StateDefault)
-
- // Send a remote force close event.
- commitSpend := &chainntnfs.SpendDetail{
- SpenderTxHash: &chainhash.Hash{},
- }
-
- uniClose := &lnwallet.UnilateralCloseSummary{
- SpendDetail: commitSpend,
- HtlcResolutions: &lnwallet.HtlcResolutions{},
- }
- chanArb.cfg.ChainEvents.RemoteUnilateralClosure <- &RemoteUnilateralCloseInfo{
- UnilateralCloseSummary: uniClose,
- }
-
- // Since writing the resolutions fail, the arbitrator should not
- // advance to the next state.
- time.Sleep(100 * time.Millisecond)
- if log.state != StateDefault {
- t.Fatalf("expected to stay in StateDefault")
- }
-
- // Restart the channel arb, this'll use the same long and prior
- // context.
- chanArbCtx, err = chanArbCtx.Restart(nil)
- if err != nil {
- t.Fatalf("unable to restart channel arb: %v", err)
- }
- chanArb = chanArbCtx.chanArb
-
- // Again, it should start up in the default state.
- chanArbCtx.AssertState(StateDefault)
-
- // Now we make the log succeed writing the resolutions, but fail when
- // attempting to close the channel.
- log.failLog = false
- chanArb.cfg.MarkChannelClosed = func(*channeldb.ChannelCloseSummary,
- ...channeldb.ChannelStatus) er.R {
-
- return er.Errorf("intentional close error")
- }
-
- // Send a new remote force close event.
- chanArb.cfg.ChainEvents.RemoteUnilateralClosure <- &RemoteUnilateralCloseInfo{
- UnilateralCloseSummary: uniClose,
- }
-
- // Since closing the channel failed, the arbitrator should stay in the
- // default state.
- time.Sleep(100 * time.Millisecond)
- if log.state != StateDefault {
- t.Fatalf("expected to stay in StateDefault")
- }
-
- // Restart once again to simulate yet another restart.
- chanArbCtx, err = chanArbCtx.Restart(nil)
- if err != nil {
- t.Fatalf("unable to restart channel arb: %v", err)
- }
- chanArb = chanArbCtx.chanArb
-
- // Starts out in StateDefault.
- chanArbCtx.AssertState(StateDefault)
-
- // Now make fetching the resolutions fail.
- log.failFetch = er.GenericErrorType.Code("intentional fetch failure")
- chanArb.cfg.ChainEvents.RemoteUnilateralClosure <- &RemoteUnilateralCloseInfo{
- UnilateralCloseSummary: uniClose,
- }
-
- // Since logging the resolutions and closing the channel now succeeds,
- // it should advance to StateContractClosed.
- chanArbCtx.AssertStateTransitions(StateContractClosed)
-
- // It should not advance further, however, as fetching resolutions
- // failed.
- time.Sleep(100 * time.Millisecond)
- if log.state != StateContractClosed {
- t.Fatalf("expected to stay in StateContractClosed")
- }
- chanArb.Stop()
-
- // Create a new arbitrator, and now make fetching resolutions succeed.
- log.failFetch = nil
- chanArbCtx, err = chanArbCtx.Restart(nil)
- if err != nil {
- t.Fatalf("unable to restart channel arb: %v", err)
- }
- defer chanArbCtx.CleanUp()
-
- // Finally it should advance to StateFullyResolved.
- chanArbCtx.AssertStateTransitions(StateFullyResolved)
-
- // It should also mark the channel as resolved.
- select {
- case <-chanArbCtx.resolvedChan:
- // Expected.
- case <-time.After(defaultTimeout):
- t.Fatalf("contract was not resolved")
- }
-}
-
-// TestChannelArbitratorForceCloseBreachedChannel tests that the channel
-// arbitrator is able to handle a channel in the process of being force closed
-// is breached by the remote node. In these cases we expect the
-// ChannelArbitrator to gracefully exit, as the breach is handled by other
-// subsystems.
-func TestChannelArbitratorForceCloseBreachedChannel(t *testing.T) {
- log := &mockArbitratorLog{
- state: StateDefault,
- newStates: make(chan ArbitratorState, 5),
- }
-
- chanArbCtx, err := createTestChannelArbitrator(t, log)
- if err != nil {
- t.Fatalf("unable to create ChannelArbitrator: %v", err)
- }
-
- chanArb := chanArbCtx.chanArb
- if err := chanArb.Start(nil); err != nil {
- t.Fatalf("unable to start ChannelArbitrator: %v", err)
- }
-
- // It should start in StateDefault.
- chanArbCtx.AssertState(StateDefault)
-
- // We start by attempting a local force close. We'll return an
- // unexpected publication error, causing the state machine to halt.
- expErr := er.GenericErrorType.Code("intentional publication error")
- stateChan := make(chan ArbitratorState)
- chanArb.cfg.PublishTx = func(*wire.MsgTx, string) er.R {
- // When the force close tx is being broadcasted, check that the
- // state is correct at that point.
- select {
- case stateChan <- chanArb.state:
- case <-chanArb.quit:
- return er.Errorf("exiting")
- }
- return expErr.Default()
- }
-
- errChan := make(chan er.R, 1)
- respChan := make(chan *wire.MsgTx, 1)
-
- // With the channel found, and the request crafted, we'll send over a
- // force close request to the arbitrator that watches this channel.
- chanArb.forceCloseReqs <- &forceCloseReq{
- errResp: errChan,
- closeTx: respChan,
- }
-
- // It should transition to StateBroadcastCommit.
- chanArbCtx.AssertStateTransitions(StateBroadcastCommit)
-
- // We expect it to be in state StateBroadcastCommit when attempting
- // the force close.
- select {
- case state := <-stateChan:
- if state != StateBroadcastCommit {
- t.Fatalf("state during PublishTx was %v", state)
- }
- case <-time.After(stateTimeout):
- t.Fatalf("no state update received")
- }
-
- // Make sure we get the expected error.
- select {
- case err := <-errChan:
- if !expErr.Is(err) {
- t.Fatalf("unexpected error force closing channel: %v",
- err)
- }
- case <-time.After(defaultTimeout):
- t.Fatalf("no response received")
- }
-
- // We mimic that the channel is breached while the channel arbitrator
- // is down. This means that on restart it will be started with a
- // pending close channel, of type BreachClose.
- chanArbCtx, err = chanArbCtx.Restart(func(c *chanArbTestCtx) {
- c.chanArb.cfg.IsPendingClose = true
- c.chanArb.cfg.ClosingHeight = 100
- c.chanArb.cfg.CloseType = channeldb.BreachClose
- })
- if err != nil {
- t.Fatalf("unable to create ChannelArbitrator: %v", err)
- }
- defer chanArbCtx.CleanUp()
-
- // Finally it should advance to StateFullyResolved.
- chanArbCtx.AssertStateTransitions(StateFullyResolved)
-
- // It should also mark the channel as resolved.
- select {
- case <-chanArbCtx.resolvedChan:
- // Expected.
- case <-time.After(defaultTimeout):
- t.Fatalf("contract was not resolved")
- }
-}
-
-// TestChannelArbitratorCommitFailure tests that the channel arbitrator is able
-// to recover from a failed CommitState call at restart.
-func TestChannelArbitratorCommitFailure(t *testing.T) {
-
- testCases := []struct {
-
- // closeType is the type of channel close we want ot test.
- closeType channeldb.ClosureType
-
- // sendEvent is a function that will send the event
- // corresponding to this test's closeType to the passed
- // ChannelArbitrator.
- sendEvent func(chanArb *ChannelArbitrator)
-
- // expectedStates is the states we expect the state machine to
- // go through after a restart and successful log commit.
- expectedStates []ArbitratorState
- }{
- {
- closeType: channeldb.CooperativeClose,
- sendEvent: func(chanArb *ChannelArbitrator) {
- closeInfo := &CooperativeCloseInfo{
- &channeldb.ChannelCloseSummary{},
- }
- chanArb.cfg.ChainEvents.CooperativeClosure <- closeInfo
- },
- expectedStates: []ArbitratorState{StateFullyResolved},
- },
- {
- closeType: channeldb.RemoteForceClose,
- sendEvent: func(chanArb *ChannelArbitrator) {
- commitSpend := &chainntnfs.SpendDetail{
- SpenderTxHash: &chainhash.Hash{},
- }
-
- uniClose := &lnwallet.UnilateralCloseSummary{
- SpendDetail: commitSpend,
- HtlcResolutions: &lnwallet.HtlcResolutions{},
- }
- chanArb.cfg.ChainEvents.RemoteUnilateralClosure <- &RemoteUnilateralCloseInfo{
- UnilateralCloseSummary: uniClose,
- }
- },
- expectedStates: []ArbitratorState{StateContractClosed, StateFullyResolved},
- },
- {
- closeType: channeldb.LocalForceClose,
- sendEvent: func(chanArb *ChannelArbitrator) {
- chanArb.cfg.ChainEvents.LocalUnilateralClosure <- &LocalUnilateralCloseInfo{
- SpendDetail: &chainntnfs.SpendDetail{},
- LocalForceCloseSummary: &lnwallet.LocalForceCloseSummary{
- CloseTx: &wire.MsgTx{},
- HtlcResolutions: &lnwallet.HtlcResolutions{},
- },
- ChannelCloseSummary: &channeldb.ChannelCloseSummary{},
- }
- },
- expectedStates: []ArbitratorState{StateContractClosed, StateFullyResolved},
- },
- }
-
- for _, test := range testCases {
- test := test
-
- log := &mockArbitratorLog{
- state: StateDefault,
- newStates: make(chan ArbitratorState, 5),
- failCommit: true,
-
- // Set the log to fail on the first expected state
- // after state machine progress for this test case.
- failCommitState: test.expectedStates[0],
- }
-
- chanArbCtx, err := createTestChannelArbitrator(t, log)
- if err != nil {
- t.Fatalf("unable to create ChannelArbitrator: %v", err)
- }
-
- chanArb := chanArbCtx.chanArb
- if err := chanArb.Start(nil); err != nil {
- t.Fatalf("unable to start ChannelArbitrator: %v", err)
- }
-
- // It should start in StateDefault.
- chanArbCtx.AssertState(StateDefault)
-
- closed := make(chan struct{})
- chanArb.cfg.MarkChannelClosed = func(
- *channeldb.ChannelCloseSummary,
- ...channeldb.ChannelStatus) er.R {
- close(closed)
- return nil
- }
-
- // Send the test event to trigger the state machine.
- test.sendEvent(chanArb)
-
- select {
- case <-closed:
- case <-time.After(defaultTimeout):
- t.Fatalf("channel was not marked closed")
- }
-
- // Since the channel was marked closed in the database, but the
- // commit to the next state failed, the state should still be
- // StateDefault.
- time.Sleep(100 * time.Millisecond)
- if log.state != StateDefault {
- t.Fatalf("expected to stay in StateDefault, instead "+
- "has %v", log.state)
- }
- chanArb.Stop()
-
- // Start the arbitrator again, with IsPendingClose reporting
- // the channel closed in the database.
- log.failCommit = false
- chanArbCtx, err = chanArbCtx.Restart(func(c *chanArbTestCtx) {
- c.chanArb.cfg.IsPendingClose = true
- c.chanArb.cfg.ClosingHeight = 100
- c.chanArb.cfg.CloseType = test.closeType
- })
- if err != nil {
- t.Fatalf("unable to create ChannelArbitrator: %v", err)
- }
-
- // Since the channel is marked closed in the database, it
- // should advance to the expected states.
- chanArbCtx.AssertStateTransitions(test.expectedStates...)
-
- // It should also mark the channel as resolved.
- select {
- case <-chanArbCtx.resolvedChan:
- // Expected.
- case <-time.After(defaultTimeout):
- t.Fatalf("contract was not resolved")
- }
- }
-}
-
-// TestChannelArbitratorEmptyResolutions makes sure that a channel that is
-// pending close in the database, but haven't had any resolutions logged will
-// not be marked resolved. This situation must be handled to avoid closing
-// channels from earlier versions of the ChannelArbitrator, which didn't have a
-// proper handoff from the ChainWatcher, and we could risk ending up in a state
-// where the channel was closed in the DB, but the resolutions weren't properly
-// written.
-func TestChannelArbitratorEmptyResolutions(t *testing.T) {
- // Start out with a log that will fail writing the set of resolutions.
- log := &mockArbitratorLog{
- state: StateDefault,
- newStates: make(chan ArbitratorState, 5),
- failFetch: errNoResolutions,
- }
-
- chanArbCtx, err := createTestChannelArbitrator(t, log)
- if err != nil {
- t.Fatalf("unable to create ChannelArbitrator: %v", err)
- }
-
- chanArb := chanArbCtx.chanArb
- chanArb.cfg.IsPendingClose = true
- chanArb.cfg.ClosingHeight = 100
- chanArb.cfg.CloseType = channeldb.RemoteForceClose
-
- if err := chanArb.Start(nil); err != nil {
- t.Fatalf("unable to start ChannelArbitrator: %v", err)
- }
-
- // It should not advance its state beyond StateContractClosed, since
- // fetching resolutions fails.
- chanArbCtx.AssertStateTransitions(StateContractClosed)
-
- // It should not advance further, however, as fetching resolutions
- // failed.
- time.Sleep(100 * time.Millisecond)
- if log.state != StateContractClosed {
- t.Fatalf("expected to stay in StateContractClosed")
- }
- chanArb.Stop()
-}
-
-// TestChannelArbitratorAlreadyForceClosed ensures that we cannot force close a
-// channel that is already in the process of doing so.
-func TestChannelArbitratorAlreadyForceClosed(t *testing.T) {
- t.Parallel()
-
- // We'll create the arbitrator and its backing log to signal that it's
- // already in the process of being force closed.
- log := &mockArbitratorLog{
- state: StateCommitmentBroadcasted,
- }
- chanArbCtx, err := createTestChannelArbitrator(t, log)
- if err != nil {
- t.Fatalf("unable to create ChannelArbitrator: %v", err)
- }
- chanArb := chanArbCtx.chanArb
- if err := chanArb.Start(nil); err != nil {
- t.Fatalf("unable to start ChannelArbitrator: %v", err)
- }
- defer chanArb.Stop()
-
- // Then, we'll create a request to signal a force close request to the
- // channel arbitrator.
- errChan := make(chan er.R, 1)
- respChan := make(chan *wire.MsgTx, 1)
-
- select {
- case chanArb.forceCloseReqs <- &forceCloseReq{
- closeTx: respChan,
- errResp: errChan,
- }:
- case <-chanArb.quit:
- }
-
- // Finally, we should ensure that we are not able to do so by seeing
- // the expected errAlreadyForceClosed error.
- select {
- case err = <-errChan:
- if !errAlreadyForceClosed.Is(err) {
- t.Fatalf("expected errAlreadyForceClosed, got %v", err)
- }
- case <-time.After(time.Second):
- t.Fatal("expected to receive error response")
- }
-}
-
-// TestChannelArbitratorDanglingCommitForceClose tests that if there're HTLCs
-// on the remote party's commitment, but not ours, and they're about to time
-// out, then we'll go on chain so we can cancel back the HTLCs on the incoming
-// commitment.
-func TestChannelArbitratorDanglingCommitForceClose(t *testing.T) {
- t.Parallel()
-
- type testCase struct {
- htlcExpired bool
- remotePendingHTLC bool
- confCommit HtlcSetKey
- }
- var testCases []testCase
-
- testOptions := []bool{true, false}
- confOptions := []HtlcSetKey{
- LocalHtlcSet, RemoteHtlcSet, RemotePendingHtlcSet,
- }
- for _, htlcExpired := range testOptions {
- for _, remotePendingHTLC := range testOptions {
- for _, commitConf := range confOptions {
- switch {
- // If the HTLC is on the remote commitment, and
- // that one confirms, then there's no special
- // behavior, we should play all the HTLCs on
- // that remote commitment as normal.
- case !remotePendingHTLC && commitConf == RemoteHtlcSet:
- fallthrough
-
- // If the HTLC is on the remote pending, and
- // that confirms, then we don't have any
- // special actions.
- case remotePendingHTLC && commitConf == RemotePendingHtlcSet:
- continue
- }
-
- testCases = append(testCases, testCase{
- htlcExpired: htlcExpired,
- remotePendingHTLC: remotePendingHTLC,
- confCommit: commitConf,
- })
- }
- }
- }
-
- for _, testCase := range testCases {
- testCase := testCase
- testName := fmt.Sprintf("testCase: htlcExpired=%v,"+
- "remotePendingHTLC=%v,remotePendingCommitConf=%v",
- testCase.htlcExpired, testCase.remotePendingHTLC,
- testCase.confCommit)
-
- t.Run(testName, func(t *testing.T) {
- t.Parallel()
-
- arbLog := &mockArbitratorLog{
- state: StateDefault,
- newStates: make(chan ArbitratorState, 5),
- resolvers: make(map[ContractResolver]struct{}),
- }
-
- chanArbCtx, err := createTestChannelArbitrator(
- t, arbLog,
- )
- if err != nil {
- t.Fatalf("unable to create ChannelArbitrator: %v", err)
- }
- chanArb := chanArbCtx.chanArb
- if err := chanArb.Start(nil); err != nil {
- t.Fatalf("unable to start ChannelArbitrator: %v", err)
- }
- defer chanArb.Stop()
-
- // Now that our channel arb has started, we'll set up
- // its contract signals channel so we can send it
- // various HTLC updates for this test.
- htlcUpdates := make(chan *ContractUpdate)
- signals := &ContractSignals{
- HtlcUpdates: htlcUpdates,
- ShortChanID: lnwire.ShortChannelID{},
- }
- chanArb.UpdateContractSignals(signals)
-
- htlcKey := RemoteHtlcSet
- if testCase.remotePendingHTLC {
- htlcKey = RemotePendingHtlcSet
- }
-
- // Next, we'll send it a new HTLC that is set to expire
- // in 10 blocks, this HTLC will only appear on the
- // commitment transaction of the _remote_ party.
- htlcIndex := uint64(99)
- htlcExpiry := uint32(10)
- danglingHTLC := channeldb.HTLC{
- Incoming: false,
- Amt: 10000,
- HtlcIndex: htlcIndex,
- RefundTimeout: htlcExpiry,
- }
- htlcUpdates <- &ContractUpdate{
- HtlcKey: htlcKey,
- Htlcs: []channeldb.HTLC{danglingHTLC},
- }
-
- // At this point, we now have a split commitment state
- // from the PoV of the channel arb. There's now an HTLC
- // that only exists on the commitment transaction of
- // the remote party.
- errChan := make(chan er.R, 1)
- respChan := make(chan *wire.MsgTx, 1)
- switch {
- // If we want an HTLC expiration trigger, then We'll
- // now mine a block (height 5), which is 5 blocks away
- // (our grace delta) from the expiry of that HTLC.
- case testCase.htlcExpired:
- chanArbCtx.chanArb.blocks <- 5
-
- // Otherwise, we'll just trigger a regular force close
- // request.
- case !testCase.htlcExpired:
- chanArb.forceCloseReqs <- &forceCloseReq{
- errResp: errChan,
- closeTx: respChan,
- }
-
- }
-
- // At this point, the resolver should now have
- // determined that it needs to go to chain in order to
- // block off the redemption path so it can cancel the
- // incoming HTLC.
- chanArbCtx.AssertStateTransitions(
- StateBroadcastCommit,
- StateCommitmentBroadcasted,
- )
-
- // Next we'll craft a fake commitment transaction to
- // send to signal that the channel has closed out on
- // chain.
- closeTx := &wire.MsgTx{
- TxIn: []*wire.TxIn{
- {
- PreviousOutPoint: wire.OutPoint{},
- Witness: [][]byte{
- {0x9},
- },
- },
- },
- }
-
- // We'll now signal to the channel arb that the HTLC
- // has fully closed on chain. Our local commit set
- // shows now HTLC on our commitment, but one on the
- // remote commitment. This should result in the HTLC
- // being canalled back. Also note that there're no HTLC
- // resolutions sent since we have none on our
- // commitment transaction.
- uniCloseInfo := &LocalUnilateralCloseInfo{
- SpendDetail: &chainntnfs.SpendDetail{},
- LocalForceCloseSummary: &lnwallet.LocalForceCloseSummary{
- CloseTx: closeTx,
- HtlcResolutions: &lnwallet.HtlcResolutions{},
- },
- ChannelCloseSummary: &channeldb.ChannelCloseSummary{},
- CommitSet: CommitSet{
- ConfCommitKey: &testCase.confCommit,
- HtlcSets: make(map[HtlcSetKey][]channeldb.HTLC),
- },
- }
-
- // If the HTLC was meant to expire, then we'll mark the
- // closing transaction at the proper expiry height
- // since our comparison "need to timeout" comparison is
- // based on the confirmation height.
- if testCase.htlcExpired {
- uniCloseInfo.SpendDetail.SpendingHeight = 5
- }
-
- // Depending on if we're testing the remote pending
- // commitment or not, we'll populate either a fake
- // dangling remote commitment, or a regular locked in
- // one.
- htlcs := []channeldb.HTLC{danglingHTLC}
- if testCase.remotePendingHTLC {
- uniCloseInfo.CommitSet.HtlcSets[RemotePendingHtlcSet] = htlcs
- } else {
- uniCloseInfo.CommitSet.HtlcSets[RemoteHtlcSet] = htlcs
- }
-
- chanArb.cfg.ChainEvents.LocalUnilateralClosure <- uniCloseInfo
-
- // The channel arb should now transition to waiting
- // until the HTLCs have been fully resolved.
- chanArbCtx.AssertStateTransitions(
- StateContractClosed,
- StateWaitingFullResolution,
- )
-
- // Now that we've sent this signal, we should have that
- // HTLC be canceled back immediately.
- select {
- case msgs := <-chanArbCtx.resolutions:
- if len(msgs) != 1 {
- t.Fatalf("expected 1 message, "+
- "instead got %v", len(msgs))
- }
-
- if msgs[0].HtlcIndex != htlcIndex {
- t.Fatalf("wrong htlc index: expected %v, got %v",
- htlcIndex, msgs[0].HtlcIndex)
- }
- case <-time.After(defaultTimeout):
- t.Fatalf("resolution msgs not sent")
- }
-
- // There's no contract to send a fully resolve message,
- // so instead, we'll mine another block which'll cause
- // it to re-examine its state and realize there're no
- // more HTLCs.
- chanArbCtx.chanArb.blocks <- 6
- chanArbCtx.AssertStateTransitions(StateFullyResolved)
- })
- }
-}
-
-// TestChannelArbitratorPendingExpiredHTLC tests that if we have pending htlc
-// that is expired we will only go to chain if we are running at least the
-// time defined in PaymentsExpirationGracePeriod.
-// During this time the remote party is expected to send his updates and cancel
-// The htlc.
-func TestChannelArbitratorPendingExpiredHTLC(t *testing.T) {
- t.Parallel()
-
- // We'll create the arbitrator and its backing log in a default state.
- log := &mockArbitratorLog{
- state: StateDefault,
- newStates: make(chan ArbitratorState, 5),
- resolvers: make(map[ContractResolver]struct{}),
- }
- chanArbCtx, err := createTestChannelArbitrator(t, log)
- if err != nil {
- t.Fatalf("unable to create ChannelArbitrator: %v", err)
- }
- chanArb := chanArbCtx.chanArb
-
- // We'll inject a test clock implementation so we can control the uptime.
- startTime := time.Date(2020, time.February, 3, 13, 0, 0, 0, time.UTC)
- testClock := clock.NewTestClock(startTime)
- chanArb.cfg.Clock = testClock
-
- // We also configure the grace period and the IsForwardedHTLC to identify
- // the htlc as our initiated payment.
- chanArb.cfg.PaymentsExpirationGracePeriod = time.Second * 15
- chanArb.cfg.IsForwardedHTLC = func(chanID lnwire.ShortChannelID,
- htlcIndex uint64) bool {
-
- return false
- }
-
- if err := chanArb.Start(nil); err != nil {
- t.Fatalf("unable to start ChannelArbitrator: %v", err)
- }
- defer func() {
- if err := chanArb.Stop(); err != nil {
- t.Fatalf("unable to stop chan arb: %v", err)
- }
- }()
-
- // Now that our channel arb has started, we'll set up
- // its contract signals channel so we can send it
- // various HTLC updates for this test.
- htlcUpdates := make(chan *ContractUpdate)
- signals := &ContractSignals{
- HtlcUpdates: htlcUpdates,
- ShortChanID: lnwire.ShortChannelID{},
- }
- chanArb.UpdateContractSignals(signals)
-
- // Next, we'll send it a new HTLC that is set to expire
- // in 10 blocks.
- htlcIndex := uint64(99)
- htlcExpiry := uint32(10)
- pendingHTLC := channeldb.HTLC{
- Incoming: false,
- Amt: 10000,
- HtlcIndex: htlcIndex,
- RefundTimeout: htlcExpiry,
- }
- htlcUpdates <- &ContractUpdate{
- HtlcKey: RemoteHtlcSet,
- Htlcs: []channeldb.HTLC{pendingHTLC},
- }
-
- // We will advance the uptime to 10 seconds which should be still within
- // the grace period and should not trigger going to chain.
- testClock.SetTime(startTime.Add(time.Second * 10))
- chanArbCtx.chanArb.blocks <- 5
- chanArbCtx.AssertState(StateDefault)
-
- // We will advance the uptime to 16 seconds which should trigger going
- // to chain.
- testClock.SetTime(startTime.Add(time.Second * 16))
- chanArbCtx.chanArb.blocks <- 6
- chanArbCtx.AssertStateTransitions(
- StateBroadcastCommit,
- StateCommitmentBroadcasted,
- )
-}
-
-// TestRemoteCloseInitiator tests the setting of close initiator statuses
-// for remote force closes and breaches.
-func TestRemoteCloseInitiator(t *testing.T) {
- // getCloseSummary returns a unilateral close summary for the channel
- // provided.
- getCloseSummary := func(channel *channeldb.OpenChannel) *RemoteUnilateralCloseInfo {
- return &RemoteUnilateralCloseInfo{
- UnilateralCloseSummary: &lnwallet.UnilateralCloseSummary{
- SpendDetail: &chainntnfs.SpendDetail{
- SpenderTxHash: &chainhash.Hash{},
- SpendingTx: &wire.MsgTx{
- TxIn: []*wire.TxIn{},
- TxOut: []*wire.TxOut{},
- },
- },
- ChannelCloseSummary: channeldb.ChannelCloseSummary{
- ChanPoint: channel.FundingOutpoint,
- RemotePub: channel.IdentityPub,
- SettledBalance: btcutil.Amount(500),
- TimeLockedBalance: btcutil.Amount(10000),
- IsPending: false,
- },
- HtlcResolutions: &lnwallet.HtlcResolutions{},
- },
- }
- }
-
- tests := []struct {
- name string
-
- // notifyClose sends the appropriate chain event to indicate
- // that the channel has closed. The event subscription channel
- // is expected to be buffered, as is the default for test
- // channel arbitrators.
- notifyClose func(sub *ChainEventSubscription,
- channel *channeldb.OpenChannel)
-
- // expectedStates is the set of states we expect the arbitrator
- // to progress through.
- expectedStates []ArbitratorState
- }{
- {
- name: "force close",
- notifyClose: func(sub *ChainEventSubscription,
- channel *channeldb.OpenChannel) {
-
- s := getCloseSummary(channel)
- sub.RemoteUnilateralClosure <- s
- },
- expectedStates: []ArbitratorState{
- StateContractClosed, StateFullyResolved,
- },
- },
- }
-
- for _, test := range tests {
- test := test
-
- t.Run(test.name, func(t *testing.T) {
- t.Parallel()
-
- // First, create alice's channel.
- alice, _, cleanUp, err := lnwallet.CreateTestChannels(
- channeldb.SingleFunderTweaklessBit,
- )
- if err != nil {
- t.Fatalf("unable to create test channels: %v",
- err)
- }
- defer cleanUp()
-
- // Create a mock log which will not block the test's
- // expected number of transitions transitions, and has
- // no commit resolutions so that the channel will
- // resolve immediately.
- log := &mockArbitratorLog{
- state: StateDefault,
- newStates: make(chan ArbitratorState,
- len(test.expectedStates)),
- resolutions: &ContractResolutions{
- CommitHash: chainhash.Hash{},
- CommitResolution: nil,
- },
- }
-
- // Mock marking the channel as closed, we only care
- // about setting of channel status.
- mockMarkClosed := func(_ *channeldb.ChannelCloseSummary,
- statuses ...channeldb.ChannelStatus) er.R {
- for _, status := range statuses {
- err := alice.State().ApplyChanStatus(status)
- if err != nil {
- return err
- }
- }
- return nil
- }
-
- chanArbCtx, err := createTestChannelArbitrator(
- t, log, withMarkClosed(mockMarkClosed),
- )
- if err != nil {
- t.Fatalf("unable to create "+
- "ChannelArbitrator: %v", err)
- }
- chanArb := chanArbCtx.chanArb
-
- if err := chanArb.Start(nil); err != nil {
- t.Fatalf("unable to start "+
- "ChannelArbitrator: %v", err)
- }
- defer func() {
- if err := chanArb.Stop(); err != nil {
- t.Fatal(err)
- }
- }()
-
- // It should start out in the default state.
- chanArbCtx.AssertState(StateDefault)
-
- // Notify the close event.
- test.notifyClose(chanArb.cfg.ChainEvents, alice.State())
-
- // Check that the channel transitions as expected.
- chanArbCtx.AssertStateTransitions(
- test.expectedStates...,
- )
-
- // It should also mark the channel as resolved.
- select {
- case <-chanArbCtx.resolvedChan:
- // Expected.
- case <-time.After(defaultTimeout):
- t.Fatalf("contract was not resolved")
- }
-
- // Check that alice has the status we expect.
- if !alice.State().HasChanStatus(
- channeldb.ChanStatusRemoteCloseInitiator,
- ) {
- t.Fatalf("expected remote close initiator, "+
- "got: %v", alice.State().ChanStatus())
- }
- })
- }
-}
-
-// TestChannelArbitratorAnchors asserts that the commitment tx anchor is swept.
-func TestChannelArbitratorAnchors(t *testing.T) {
- log := &mockArbitratorLog{
- state: StateDefault,
- newStates: make(chan ArbitratorState, 5),
- }
-
- chanArbCtx, err := createTestChannelArbitrator(t, log)
- if err != nil {
- t.Fatalf("unable to create ChannelArbitrator: %v", err)
- }
-
- // Replace our mocked put report function with one which will push
- // reports into a channel for us to consume. We update this function
- // because our resolver will be created from the existing chanArb cfg.
- reports := make(chan *channeldb.ResolverReport)
- chanArbCtx.chanArb.cfg.PutResolverReport = putResolverReportInChannel(
- reports,
- )
-
- chanArb := chanArbCtx.chanArb
- chanArb.cfg.PreimageDB = newMockWitnessBeacon()
- chanArb.cfg.Registry = &mockRegistry{}
-
- // Setup two pre-confirmation anchor resolutions on the mock channel.
- chanArb.cfg.Channel.(*mockChannel).anchorResolutions =
- []*lnwallet.AnchorResolution{
- {}, {},
- }
-
- if err := chanArb.Start(nil); err != nil {
- t.Fatalf("unable to start ChannelArbitrator: %v", err)
- }
- defer func() {
- if err := chanArb.Stop(); err != nil {
- t.Fatal(err)
- }
- }()
-
- // Create htlcUpdates channel.
- htlcUpdates := make(chan *ContractUpdate)
-
- signals := &ContractSignals{
- HtlcUpdates: htlcUpdates,
- ShortChanID: lnwire.ShortChannelID{},
- }
- chanArb.UpdateContractSignals(signals)
-
- errChan := make(chan er.R, 1)
- respChan := make(chan *wire.MsgTx, 1)
-
- // With the channel found, and the request crafted, we'll send over a
- // force close request to the arbitrator that watches this channel.
- chanArb.forceCloseReqs <- &forceCloseReq{
- errResp: errChan,
- closeTx: respChan,
- }
-
- // The force close request should trigger broadcast of the commitment
- // transaction.
- chanArbCtx.AssertStateTransitions(
- StateBroadcastCommit,
- StateCommitmentBroadcasted,
- )
-
- // With the commitment tx still unconfirmed, we expect sweep attempts
- // for all three versions of the commitment transaction.
- <-chanArbCtx.sweeper.sweptInputs
- <-chanArbCtx.sweeper.sweptInputs
-
- select {
- case <-respChan:
- case <-time.After(5 * time.Second):
- t.Fatalf("no response received")
- }
-
- select {
- case err := <-errChan:
- if err != nil {
- t.Fatalf("error force closing channel: %v", err)
- }
- case <-time.After(5 * time.Second):
- t.Fatalf("no response received")
- }
-
- // Now notify about the local force close getting confirmed.
- closeTx := &wire.MsgTx{
- TxIn: []*wire.TxIn{
- {
- PreviousOutPoint: wire.OutPoint{},
- Witness: [][]byte{
- {0x1},
- {0x2},
- },
- },
- },
- }
-
- anchorResolution := &lnwallet.AnchorResolution{
- AnchorSignDescriptor: input.SignDescriptor{
- Output: &wire.TxOut{
- Value: 1,
- },
- },
- }
-
- chanArb.cfg.ChainEvents.LocalUnilateralClosure <- &LocalUnilateralCloseInfo{
- SpendDetail: &chainntnfs.SpendDetail{},
- LocalForceCloseSummary: &lnwallet.LocalForceCloseSummary{
- CloseTx: closeTx,
- HtlcResolutions: &lnwallet.HtlcResolutions{},
- AnchorResolution: anchorResolution,
- },
- ChannelCloseSummary: &channeldb.ChannelCloseSummary{},
- CommitSet: CommitSet{
- ConfCommitKey: &LocalHtlcSet,
- HtlcSets: map[HtlcSetKey][]channeldb.HTLC{},
- },
- }
-
- chanArbCtx.AssertStateTransitions(
- StateContractClosed,
- StateWaitingFullResolution,
- )
-
- // We expect to only have the anchor resolver active.
- if len(chanArb.activeResolvers) != 1 {
- t.Fatalf("expected single resolver, instead got: %v",
- len(chanArb.activeResolvers))
- }
-
- resolver := chanArb.activeResolvers[0]
- _, ok := resolver.(*anchorResolver)
- if !ok {
- t.Fatalf("expected anchor resolver, got %T", resolver)
- }
-
- // The anchor resolver is expected to re-offer the anchor input to the
- // sweeper.
- <-chanArbCtx.sweeper.sweptInputs
-
- // The mock sweeper immediately signals success for that input. This
- // should transition the channel to the resolved state.
- chanArbCtx.AssertStateTransitions(StateFullyResolved)
- select {
- case <-chanArbCtx.resolvedChan:
- case <-time.After(5 * time.Second):
- t.Fatalf("contract was not resolved")
- }
-
- anchorAmt := btcutil.Amount(
- anchorResolution.AnchorSignDescriptor.Output.Value,
- )
- spendTx := chanArbCtx.sweeper.sweepTx.TxHash()
- expectedReport := &channeldb.ResolverReport{
- OutPoint: anchorResolution.CommitAnchor,
- Amount: anchorAmt,
- ResolverType: channeldb.ResolverTypeAnchor,
- ResolverOutcome: channeldb.ResolverOutcomeClaimed,
- SpendTxID: &spendTx,
- }
-
- assertResolverReport(t, reports, expectedReport)
-}
-
-// putResolverReportInChannel returns a put report function which will pipe
-// reports into the channel provided.
-func putResolverReportInChannel(reports chan *channeldb.ResolverReport) func(
- _ kvdb.RwTx, report *channeldb.ResolverReport) er.R {
-
- return func(_ kvdb.RwTx, report *channeldb.ResolverReport) er.R {
- reports <- report
- return nil
- }
-}
-
-// assertResolverReport checks that a set of reports only contains a single
-// report, and that it is equal to the expected report passed in.
-func assertResolverReport(t *testing.T, reports chan *channeldb.ResolverReport,
- expected *channeldb.ResolverReport) {
-
- select {
- case report := <-reports:
- if !reflect.DeepEqual(report, expected) {
- t.Fatalf("expected: %v, got: %v", expected, report)
- }
-
- case <-time.After(defaultTimeout):
- t.Fatalf("no reports present")
- }
-}
-
-type mockChannel struct {
- anchorResolutions []*lnwallet.AnchorResolution
-}
-
-func (m *mockChannel) NewAnchorResolutions() ([]*lnwallet.AnchorResolution,
- er.R) {
-
- return m.anchorResolutions, nil
-}
-
-func (m *mockChannel) ForceCloseChan() (*lnwallet.LocalForceCloseSummary, er.R) {
- summary := &lnwallet.LocalForceCloseSummary{
- CloseTx: &wire.MsgTx{},
- HtlcResolutions: &lnwallet.HtlcResolutions{},
- }
- return summary, nil
-}
-
-func TestMain(m *testing.M) {
- globalcfg.SelectConfig(globalcfg.BitcoinDefaults())
- os.Exit(m.Run())
-}
diff --git a/lnd/contractcourt/commit_sweep_resolver.go b/lnd/contractcourt/commit_sweep_resolver.go
deleted file mode 100644
index fe408819..00000000
--- a/lnd/contractcourt/commit_sweep_resolver.go
+++ /dev/null
@@ -1,412 +0,0 @@
-package contractcourt
-
-import (
- "io"
- "sync"
-
- "github.com/pkt-cash/pktd/btcutil"
- "github.com/pkt-cash/pktd/btcutil/er"
- "github.com/pkt-cash/pktd/btcutil/util"
- "github.com/pkt-cash/pktd/chaincfg/chainhash"
- "github.com/pkt-cash/pktd/lnd/channeldb"
- "github.com/pkt-cash/pktd/lnd/input"
- "github.com/pkt-cash/pktd/lnd/lnwallet"
- "github.com/pkt-cash/pktd/lnd/sweep"
- "github.com/pkt-cash/pktd/pktlog/log"
- "github.com/pkt-cash/pktd/txscript/opcode"
- "github.com/pkt-cash/pktd/wire"
-)
-
-const (
- // commitOutputConfTarget is the default confirmation target we'll use
- // for sweeps of commit outputs that belong to us.
- commitOutputConfTarget = 6
-)
-
-// commitSweepResolver is a resolver that will attempt to sweep the commitment
-// output paying to us, in the case that the remote party broadcasts their
-// version of the commitment transaction. We can sweep this output immediately,
-// as it doesn't have a time-lock delay.
-type commitSweepResolver struct {
- // commitResolution contains all data required to successfully sweep
- // this HTLC on-chain.
- commitResolution lnwallet.CommitOutputResolution
-
- // resolved reflects if the contract has been fully resolved or not.
- resolved bool
-
- // broadcastHeight is the height that the original contract was
- // broadcast to the main-chain at. We'll use this value to bound any
- // historical queries to the chain for spends/confirmations.
- broadcastHeight uint32
-
- // chanPoint is the channel point of the original contract.
- chanPoint wire.OutPoint
-
- // currentReport stores the current state of the resolver for reporting
- // over the rpc interface.
- currentReport ContractReport
-
- // reportLock prevents concurrent access to the resolver report.
- reportLock sync.Mutex
-
- contractResolverKit
-}
-
-// newCommitSweepResolver instantiates a new direct commit output resolver.
-func newCommitSweepResolver(res lnwallet.CommitOutputResolution,
- broadcastHeight uint32,
- chanPoint wire.OutPoint, resCfg ResolverConfig) *commitSweepResolver {
-
- r := &commitSweepResolver{
- contractResolverKit: *newContractResolverKit(resCfg),
- commitResolution: res,
- broadcastHeight: broadcastHeight,
- chanPoint: chanPoint,
- }
-
- r.initReport()
-
- return r
-}
-
-// ResolverKey returns an identifier which should be globally unique for this
-// particular resolver within the chain the original contract resides within.
-func (c *commitSweepResolver) ResolverKey() []byte {
- key := newResolverID(c.commitResolution.SelfOutPoint)
- return key[:]
-}
-
-// waitForHeight registers for block notifications and waits for the provided
-// block height to be reached.
-func (c *commitSweepResolver) waitForHeight(waitHeight uint32) er.R {
- // Register for block epochs. After registration, the current height
- // will be sent on the channel immediately.
- blockEpochs, err := c.Notifier.RegisterBlockEpochNtfn(nil)
- if err != nil {
- return err
- }
- defer blockEpochs.Cancel()
-
- for {
- select {
- case newBlock, ok := <-blockEpochs.Epochs:
- if !ok {
- return errResolverShuttingDown.Default()
- }
- height := newBlock.Height
- if height >= int32(waitHeight) {
- return nil
- }
-
- case <-c.quit:
- return errResolverShuttingDown.Default()
- }
- }
-}
-
-// getCommitTxConfHeight waits for confirmation of the commitment tx and returns
-// the confirmation height.
-func (c *commitSweepResolver) getCommitTxConfHeight() (uint32, er.R) {
- txID := c.commitResolution.SelfOutPoint.Hash
- signDesc := c.commitResolution.SelfOutputSignDesc
- pkScript := signDesc.Output.PkScript
- const confDepth = 1
- confChan, err := c.Notifier.RegisterConfirmationsNtfn(
- &txID, pkScript, confDepth, c.broadcastHeight,
- )
- if err != nil {
- return 0, err
- }
- defer confChan.Cancel()
-
- select {
- case txConfirmation, ok := <-confChan.Confirmed:
- if !ok {
- return 0, er.Errorf("cannot get confirmation "+
- "for commit tx %v", txID)
- }
-
- return txConfirmation.BlockHeight, nil
-
- case <-c.quit:
- return 0, errResolverShuttingDown.Default()
- }
-}
-
-// Resolve instructs the contract resolver to resolve the output on-chain. Once
-// the output has been *fully* resolved, the function should return immediately
-// with a nil ContractResolver value for the first return value. In the case
-// that the contract requires further resolution, then another resolve is
-// returned.
-//
-// NOTE: This function MUST be run as a goroutine.
-func (c *commitSweepResolver) Resolve() (ContractResolver, er.R) {
- // If we're already resolved, then we can exit early.
- if c.resolved {
- return nil, nil
- }
-
- confHeight, err := c.getCommitTxConfHeight()
- if err != nil {
- return nil, err
- }
-
- unlockHeight := confHeight + c.commitResolution.MaturityDelay
-
- log.Debugf("commit conf_height=%v, unlock_height=%v",
- confHeight, unlockHeight)
-
- // Update report now that we learned the confirmation height.
- c.reportLock.Lock()
- c.currentReport.MaturityHeight = unlockHeight
- c.reportLock.Unlock()
-
- // If there is a csv delay, we'll wait for that.
- if c.commitResolution.MaturityDelay > 0 {
- log.Debugf("waiting for csv lock to expire at height %v",
- unlockHeight)
-
- // We only need to wait for the block before the block that
- // unlocks the spend path.
- err := c.waitForHeight(unlockHeight - 1)
- if err != nil {
- return nil, err
- }
- }
-
- // The output is on our local commitment if the script starts with
- // OP_IF for the revocation clause. On the remote commitment it will
- // either be a regular P2WKH or a simple sig spend with a CSV delay.
- isLocalCommitTx := c.commitResolution.SelfOutputSignDesc.WitnessScript[0] == opcode.OP_IF
- isDelayedOutput := c.commitResolution.MaturityDelay != 0
-
- log.Debugf("isDelayedOutput=%v, isLocalCommitTx=%v", isDelayedOutput,
- isLocalCommitTx)
-
- // There're three types of commitments, those that have tweaks
- // for the remote key (us in this case), those that don't, and a third
- // where there is no tweak and the output is delayed. On the local
- // commitment our output will always be delayed. We'll rely on the
- // presence of the commitment tweak to to discern which type of
- // commitment this is.
- var witnessType input.WitnessType
- switch {
-
- // Delayed output to us on our local commitment.
- case isLocalCommitTx:
- witnessType = input.CommitmentTimeLock
-
- // A confirmed output to us on the remote commitment.
- case isDelayedOutput:
- witnessType = input.CommitmentToRemoteConfirmed
-
- // A non-delayed output on the remote commitment where the key is
- // tweakless.
- case c.commitResolution.SelfOutputSignDesc.SingleTweak == nil:
- witnessType = input.CommitSpendNoDelayTweakless
-
- // A non-delayed output on the remote commitment where the key is
- // tweaked.
- default:
- witnessType = input.CommitmentNoDelay
- }
-
- log.Infof("Sweeping with witness type: %v", witnessType)
-
- // We'll craft an input with all the information required for
- // the sweeper to create a fully valid sweeping transaction to
- // recover these coins.
- inp := input.NewCsvInput(
- &c.commitResolution.SelfOutPoint,
- witnessType,
- &c.commitResolution.SelfOutputSignDesc,
- c.broadcastHeight,
- c.commitResolution.MaturityDelay,
- )
-
- // With our input constructed, we'll now offer it to the
- // sweeper.
- log.Infof("sweeping commit output")
-
- feePref := sweep.FeePreference{ConfTarget: commitOutputConfTarget}
- resultChan, err := c.Sweeper.SweepInput(inp, sweep.Params{Fee: feePref})
- if err != nil {
- log.Errorf("unable to sweep input: %v", err)
-
- return nil, err
- }
-
- var sweepTxID chainhash.Hash
-
- // Sweeper is going to join this input with other inputs if
- // possible and publish the sweep tx. When the sweep tx
- // confirms, it signals us through the result channel with the
- // outcome. Wait for this to happen.
- outcome := channeldb.ResolverOutcomeClaimed
- select {
- case sweepResult := <-resultChan:
- switch {
- case sweep.ErrRemoteSpend.Is(sweepResult.Err):
- // If the remote party was able to sweep this output
- // it's likely what we sent was actually a revoked
- // commitment. Report the error and continue to wrap up
- // the contract.
- log.Warnf("local commitment output was swept by "+
- "remote party via %v", sweepResult.Tx.TxHash())
- outcome = channeldb.ResolverOutcomeUnclaimed
- case sweepResult.Err == nil:
- // No errors, therefore continue processing.
- log.Infof("local commitment output fully resolved by "+
- "sweep tx: %v", sweepResult.Tx.TxHash())
- default:
- // Unknown errors.
- log.Errorf("unable to sweep input: %v",
- sweepResult.Err)
-
- return nil, sweepResult.Err
- }
-
- sweepTxID = sweepResult.Tx.TxHash()
-
- case <-c.quit:
- return nil, errResolverShuttingDown.Default()
- }
-
- // Funds have been swept and balance is no longer in limbo.
- c.reportLock.Lock()
- if outcome == channeldb.ResolverOutcomeClaimed {
- // We only record the balance as recovered if it actually came
- // back to us.
- c.currentReport.RecoveredBalance = c.currentReport.LimboBalance
- }
- c.currentReport.LimboBalance = 0
- c.reportLock.Unlock()
- report := c.currentReport.resolverReport(
- &sweepTxID, channeldb.ResolverTypeCommit, outcome,
- )
- c.resolved = true
-
- // Checkpoint the resolver with a closure that will write the outcome
- // of the resolver and its sweep transaction to disk.
- return nil, c.Checkpoint(c, report)
-}
-
-// Stop signals the resolver to cancel any current resolution processes, and
-// suspend.
-//
-// NOTE: Part of the ContractResolver interface.
-func (c *commitSweepResolver) Stop() {
- close(c.quit)
-}
-
-// IsResolved returns true if the stored state in the resolve is fully
-// resolved. In this case the target output can be forgotten.
-//
-// NOTE: Part of the ContractResolver interface.
-func (c *commitSweepResolver) IsResolved() bool {
- return c.resolved
-}
-
-// Encode writes an encoded version of the ContractResolver into the passed
-// Writer.
-//
-// NOTE: Part of the ContractResolver interface.
-func (c *commitSweepResolver) Encode(w io.Writer) er.R {
- if err := encodeCommitResolution(w, &c.commitResolution); err != nil {
- return err
- }
-
- if err := util.WriteBin(w, endian, c.resolved); err != nil {
- return err
- }
- if err := util.WriteBin(w, endian, c.broadcastHeight); err != nil {
- return err
- }
- if _, err := util.Write(w, c.chanPoint.Hash[:]); err != nil {
- return err
- }
- err := util.WriteBin(w, endian, c.chanPoint.Index)
- if err != nil {
- return err
- }
-
- // Previously a sweep tx was serialized at this point. Refactoring
- // removed this, but keep in mind that this data may still be present in
- // the database.
-
- return nil
-}
-
-// newCommitSweepResolverFromReader attempts to decode an encoded
-// ContractResolver from the passed Reader instance, returning an active
-// ContractResolver instance.
-func newCommitSweepResolverFromReader(r io.Reader, resCfg ResolverConfig) (
- *commitSweepResolver, er.R) {
-
- c := &commitSweepResolver{
- contractResolverKit: *newContractResolverKit(resCfg),
- }
-
- if err := decodeCommitResolution(r, &c.commitResolution); err != nil {
- return nil, err
- }
-
- if err := util.ReadBin(r, endian, &c.resolved); err != nil {
- return nil, err
- }
- if err := util.ReadBin(r, endian, &c.broadcastHeight); err != nil {
- return nil, err
- }
- _, err := util.ReadFull(r, c.chanPoint.Hash[:])
- if err != nil {
- return nil, err
- }
- err = util.ReadBin(r, endian, &c.chanPoint.Index)
- if err != nil {
- return nil, err
- }
-
- // Previously a sweep tx was deserialized at this point. Refactoring
- // removed this, but keep in mind that this data may still be present in
- // the database.
-
- c.initReport()
-
- return c, nil
-}
-
-// report returns a report on the resolution state of the contract.
-func (c *commitSweepResolver) report() *ContractReport {
- c.reportLock.Lock()
- defer c.reportLock.Unlock()
-
- copy := c.currentReport
- return ©
-}
-
-// initReport initializes the pending channels report for this resolver.
-func (c *commitSweepResolver) initReport() {
- amt := btcutil.Amount(
- c.commitResolution.SelfOutputSignDesc.Output.Value,
- )
-
- // Set the initial report. All fields are filled in, except for the
- // maturity height which remains 0 until Resolve() is executed.
- //
- // TODO(joostjager): Resolvers only activate after the commit tx
- // confirms. With more refactoring in channel arbitrator, it would be
- // possible to make the confirmation height part of ResolverConfig and
- // populate MaturityHeight here.
- c.currentReport = ContractReport{
- Outpoint: c.commitResolution.SelfOutPoint,
- Type: ReportOutputUnencumbered,
- Amount: amt,
- LimboBalance: amt,
- RecoveredBalance: 0,
- }
-}
-
-// A compile time assertion to ensure commitSweepResolver meets the
-// ContractResolver interface.
-var _ reportingContractResolver = (*commitSweepResolver)(nil)
diff --git a/lnd/contractcourt/commit_sweep_resolver_test.go b/lnd/contractcourt/commit_sweep_resolver_test.go
deleted file mode 100644
index bb4a7071..00000000
--- a/lnd/contractcourt/commit_sweep_resolver_test.go
+++ /dev/null
@@ -1,372 +0,0 @@
-package contractcourt
-
-import (
- "testing"
- "time"
-
- "github.com/pkt-cash/pktd/btcutil"
- "github.com/pkt-cash/pktd/btcutil/er"
- "github.com/pkt-cash/pktd/lnd/chainntnfs"
- "github.com/pkt-cash/pktd/lnd/channeldb"
- "github.com/pkt-cash/pktd/lnd/channeldb/kvdb"
- "github.com/pkt-cash/pktd/lnd/input"
- "github.com/pkt-cash/pktd/lnd/lntest/mock"
- "github.com/pkt-cash/pktd/lnd/lnwallet"
- "github.com/pkt-cash/pktd/lnd/lnwallet/chainfee"
- "github.com/pkt-cash/pktd/lnd/sweep"
- "github.com/pkt-cash/pktd/wire"
-)
-
-type commitSweepResolverTestContext struct {
- resolver *commitSweepResolver
- notifier *mock.ChainNotifier
- sweeper *mockSweeper
- resolverResultChan chan resolveResult
- t *testing.T
-}
-
-func newCommitSweepResolverTestContext(t *testing.T,
- resolution *lnwallet.CommitOutputResolution) *commitSweepResolverTestContext {
-
- notifier := &mock.ChainNotifier{
- EpochChan: make(chan *chainntnfs.BlockEpoch),
- SpendChan: make(chan *chainntnfs.SpendDetail),
- ConfChan: make(chan *chainntnfs.TxConfirmation),
- }
-
- sweeper := newMockSweeper()
-
- checkPointChan := make(chan struct{}, 1)
-
- chainCfg := ChannelArbitratorConfig{
- ChainArbitratorConfig: ChainArbitratorConfig{
- Notifier: notifier,
- Sweeper: sweeper,
- },
- PutResolverReport: func(_ kvdb.RwTx,
- _ *channeldb.ResolverReport) er.R {
-
- return nil
- },
- }
-
- cfg := ResolverConfig{
- ChannelArbitratorConfig: chainCfg,
- Checkpoint: func(_ ContractResolver,
- _ ...*channeldb.ResolverReport) er.R {
-
- checkPointChan <- struct{}{}
- return nil
- },
- }
-
- resolver := newCommitSweepResolver(
- *resolution, 0, wire.OutPoint{}, cfg,
- )
-
- return &commitSweepResolverTestContext{
- resolver: resolver,
- notifier: notifier,
- sweeper: sweeper,
- t: t,
- }
-}
-
-func (i *commitSweepResolverTestContext) resolve() {
- // Start resolver.
- i.resolverResultChan = make(chan resolveResult, 1)
- go func() {
- nextResolver, err := i.resolver.Resolve()
- i.resolverResultChan <- resolveResult{
- nextResolver: nextResolver,
- err: err,
- }
- }()
-}
-
-func (i *commitSweepResolverTestContext) notifyEpoch(height int32) {
- i.notifier.EpochChan <- &chainntnfs.BlockEpoch{
- Height: height,
- }
-}
-
-func (i *commitSweepResolverTestContext) waitForResult() {
- i.t.Helper()
-
- result := <-i.resolverResultChan
- if result.err != nil {
- i.t.Fatal(result.err)
- }
-
- if result.nextResolver != nil {
- i.t.Fatal("expected no next resolver")
- }
-}
-
-type mockSweeper struct {
- sweptInputs chan input.Input
- updatedInputs chan wire.OutPoint
- sweepTx *wire.MsgTx
- sweepErr *er.ErrorCode
-}
-
-func newMockSweeper() *mockSweeper {
- return &mockSweeper{
- sweptInputs: make(chan input.Input),
- updatedInputs: make(chan wire.OutPoint),
- sweepTx: &wire.MsgTx{},
- }
-}
-
-func (s *mockSweeper) SweepInput(input input.Input, params sweep.Params) (
- chan sweep.Result, er.R) {
-
- s.sweptInputs <- input
-
- var e er.R
- if s.sweepErr != nil {
- e = s.sweepErr.Default()
- }
-
- result := make(chan sweep.Result, 1)
- result <- sweep.Result{
- Tx: s.sweepTx,
- Err: e,
- }
- return result, nil
-}
-
-func (s *mockSweeper) CreateSweepTx(inputs []input.Input, feePref sweep.FeePreference,
- currentBlockHeight uint32) (*wire.MsgTx, er.R) {
-
- return nil, nil
-}
-
-func (s *mockSweeper) RelayFeePerKW() chainfee.SatPerKWeight {
- return 253
-}
-
-func (s *mockSweeper) UpdateParams(input wire.OutPoint,
- params sweep.ParamsUpdate) (chan sweep.Result, er.R) {
-
- s.updatedInputs <- input
-
- result := make(chan sweep.Result, 1)
- result <- sweep.Result{
- Tx: s.sweepTx,
- }
- return result, nil
-}
-
-var _ UtxoSweeper = &mockSweeper{}
-
-// TestCommitSweepResolverNoDelay tests resolution of a direct commitment output
-// unencumbered by a time lock.
-func TestCommitSweepResolverNoDelay(t *testing.T) {
- t.Parallel()
- defer timeout(t)()
-
- res := lnwallet.CommitOutputResolution{
- SelfOutputSignDesc: input.SignDescriptor{
- Output: &wire.TxOut{
- Value: 100,
- },
- WitnessScript: []byte{0},
- },
- }
-
- ctx := newCommitSweepResolverTestContext(t, &res)
-
- // Replace our checkpoint with one which will push reports into a
- // channel for us to consume. We replace this function on the resolver
- // itself because it is created by the test context.
- reportChan := make(chan *channeldb.ResolverReport)
- ctx.resolver.Checkpoint = func(_ ContractResolver,
- reports ...*channeldb.ResolverReport) er.R {
-
- // Send all of our reports into the channel.
- for _, report := range reports {
- reportChan <- report
- }
-
- return nil
- }
-
- ctx.resolve()
-
- spendTx := &wire.MsgTx{}
- spendHash := spendTx.TxHash()
- ctx.notifier.ConfChan <- &chainntnfs.TxConfirmation{
- Tx: spendTx,
- }
-
- // No csv delay, so the input should be swept immediately.
- <-ctx.sweeper.sweptInputs
-
- amt := btcutil.Amount(res.SelfOutputSignDesc.Output.Value)
- expectedReport := &channeldb.ResolverReport{
- OutPoint: wire.OutPoint{},
- Amount: amt,
- ResolverType: channeldb.ResolverTypeCommit,
- ResolverOutcome: channeldb.ResolverOutcomeClaimed,
- SpendTxID: &spendHash,
- }
-
- assertResolverReport(t, reportChan, expectedReport)
-
- ctx.waitForResult()
-}
-
-// testCommitSweepResolverDelay tests resolution of a direct commitment output
-// that is encumbered by a time lock. sweepErr indicates whether the local node
-// fails to sweep the output.
-func testCommitSweepResolverDelay(t *testing.T, sweepErr *er.ErrorCode) {
- defer timeout(t)()
-
- const sweepProcessInterval = 100 * time.Millisecond
- amt := int64(100)
- outpoint := wire.OutPoint{
- Index: 5,
- }
- res := lnwallet.CommitOutputResolution{
- SelfOutputSignDesc: input.SignDescriptor{
- Output: &wire.TxOut{
- Value: amt,
- },
- WitnessScript: []byte{0},
- },
- MaturityDelay: 3,
- SelfOutPoint: outpoint,
- }
-
- ctx := newCommitSweepResolverTestContext(t, &res)
-
- // Replace our checkpoint with one which will push reports into a
- // channel for us to consume. We replace this function on the resolver
- // itself because it is created by the test context.
- reportChan := make(chan *channeldb.ResolverReport)
- ctx.resolver.Checkpoint = func(_ ContractResolver,
- reports ...*channeldb.ResolverReport) er.R {
-
- // Send all of our reports into the channel.
- for _, report := range reports {
- reportChan <- report
- }
-
- return nil
- }
-
- // Setup whether we expect the sweeper to receive a sweep error in this
- // test case.
- ctx.sweeper.sweepErr = sweepErr
-
- report := ctx.resolver.report()
- expectedReport := ContractReport{
- Outpoint: outpoint,
- Type: ReportOutputUnencumbered,
- Amount: btcutil.Amount(amt),
- LimboBalance: btcutil.Amount(amt),
- }
- if *report != expectedReport {
- t.Fatalf("unexpected resolver report. want=%v got=%v",
- expectedReport, report)
- }
-
- ctx.resolve()
-
- ctx.notifier.ConfChan <- &chainntnfs.TxConfirmation{
- BlockHeight: testInitialBlockHeight - 1,
- }
-
- // Allow resolver to process confirmation.
- time.Sleep(sweepProcessInterval)
-
- // Expect report to be updated.
- report = ctx.resolver.report()
- if report.MaturityHeight != testInitialBlockHeight+2 {
- t.Fatal("report maturity height incorrect")
- }
-
- // Notify initial block height. The csv lock is still in effect, so we
- // don't expect any sweep to happen yet.
- ctx.notifyEpoch(testInitialBlockHeight)
-
- select {
- case <-ctx.sweeper.sweptInputs:
- t.Fatal("no sweep expected")
- case <-time.After(sweepProcessInterval):
- }
-
- // A new block arrives. The commit tx confirmed at height -1 and the csv
- // is 3, so a spend will be valid in the first block after height +1.
- ctx.notifyEpoch(testInitialBlockHeight + 1)
-
- <-ctx.sweeper.sweptInputs
-
- // Set the resolution report outcome based on whether our sweep
- // succeeded.
- outcome := channeldb.ResolverOutcomeClaimed
- if sweepErr != nil {
- outcome = channeldb.ResolverOutcomeUnclaimed
- }
- sweepTx := ctx.sweeper.sweepTx.TxHash()
-
- assertResolverReport(t, reportChan, &channeldb.ResolverReport{
- OutPoint: outpoint,
- ResolverType: channeldb.ResolverTypeCommit,
- ResolverOutcome: outcome,
- Amount: btcutil.Amount(amt),
- SpendTxID: &sweepTx,
- })
-
- ctx.waitForResult()
-
- // If this test case generates a sweep error, we don't expect to be
- // able to recover anything. This might happen if the local commitment
- // output was swept by a justice transaction by the remote party.
- expectedRecoveredBalance := btcutil.Amount(amt)
- if sweepErr != nil {
- expectedRecoveredBalance = 0
- }
-
- report = ctx.resolver.report()
- expectedReport = ContractReport{
- Outpoint: outpoint,
- Type: ReportOutputUnencumbered,
- Amount: btcutil.Amount(amt),
- MaturityHeight: testInitialBlockHeight + 2,
- RecoveredBalance: expectedRecoveredBalance,
- }
- if *report != expectedReport {
- t.Fatalf("unexpected resolver report. want=%v got=%v",
- expectedReport, report)
- }
-
-}
-
-// TestCommitSweepResolverDelay tests resolution of a direct commitment output
-// that is encumbered by a time lock.
-func TestCommitSweepResolverDelay(t *testing.T) {
- t.Parallel()
-
- testCases := []struct {
- name string
- sweepErr *er.ErrorCode
- }{{
- name: "success",
- sweepErr: nil,
- }, {
- name: "remote spend",
- sweepErr: sweep.ErrRemoteSpend,
- }}
-
- for _, tc := range testCases {
- tc := tc
- ok := t.Run(tc.name, func(t *testing.T) {
- testCommitSweepResolverDelay(t, tc.sweepErr)
- })
- if !ok {
- break
- }
- }
-}
diff --git a/lnd/contractcourt/contract_resolvers.go b/lnd/contractcourt/contract_resolvers.go
deleted file mode 100644
index 98ba2dae..00000000
--- a/lnd/contractcourt/contract_resolvers.go
+++ /dev/null
@@ -1,114 +0,0 @@
-package contractcourt
-
-import (
- "encoding/binary"
- "io"
-
- "github.com/pkt-cash/pktd/btcutil/er"
- "github.com/pkt-cash/pktd/lnd/channeldb"
- "github.com/pkt-cash/pktd/wire"
-)
-
-var (
- endian = binary.BigEndian
-)
-
-const (
- // sweepConfTarget is the default number of blocks that we'll use as a
- // confirmation target when sweeping.
- sweepConfTarget = 6
-)
-
-// ContractResolver is an interface which packages a state machine which is
-// able to carry out the necessary steps required to fully resolve a Bitcoin
-// contract on-chain. Resolvers are fully encodable to ensure callers are able
-// to persist them properly. A resolver may produce another resolver in the
-// case that claiming an HTLC is a multi-stage process. In this case, we may
-// partially resolve the contract, then persist, and set up for an additional
-// resolution.
-type ContractResolver interface {
- // ResolverKey returns an identifier which should be globally unique
- // for this particular resolver within the chain the original contract
- // resides within.
- ResolverKey() []byte
-
- // Resolve instructs the contract resolver to resolve the output
- // on-chain. Once the output has been *fully* resolved, the function
- // should return immediately with a nil ContractResolver value for the
- // first return value. In the case that the contract requires further
- // resolution, then another resolve is returned.
- //
- // NOTE: This function MUST be run as a goroutine.
- Resolve() (ContractResolver, er.R)
-
- // IsResolved returns true if the stored state in the resolve is fully
- // resolved. In this case the target output can be forgotten.
- IsResolved() bool
-
- // Encode writes an encoded version of the ContractResolver into the
- // passed Writer.
- Encode(w io.Writer) er.R
-
- // Stop signals the resolver to cancel any current resolution
- // processes, and suspend.
- Stop()
-}
-
-// htlcContractResolver is the required interface for htlc resolvers.
-type htlcContractResolver interface {
- ContractResolver
-
- // HtlcPoint returns the htlc's outpoint on the commitment tx.
- HtlcPoint() wire.OutPoint
-
- // Supplement adds additional information to the resolver that is
- // required before Resolve() is called.
- Supplement(htlc channeldb.HTLC)
-}
-
-// reportingContractResolver is a ContractResolver that also exposes a report on
-// the resolution state of the contract.
-type reportingContractResolver interface {
- ContractResolver
-
- report() *ContractReport
-}
-
-// ResolverConfig contains the externally supplied configuration items that are
-// required by a ContractResolver implementation.
-type ResolverConfig struct {
- // ChannelArbitratorConfig contains all the interfaces and closures
- // required for the resolver to interact with outside sub-systems.
- ChannelArbitratorConfig
-
- // Checkpoint allows a resolver to check point its state. This function
- // should write the state of the resolver to persistent storage, and
- // return a non-nil error upon success. It takes a resolver report,
- // which contains information about the outcome and should be written
- // to disk if non-nil.
- Checkpoint func(ContractResolver, ...*channeldb.ResolverReport) er.R
-}
-
-// contractResolverKit is meant to be used as a mix-in struct to be embedded within a
-// given ContractResolver implementation. It contains all the common items that
-// a resolver requires to carry out its duties.
-type contractResolverKit struct {
- ResolverConfig
-
- quit chan struct{}
-}
-
-// newContractResolverKit instantiates the mix-in struct.
-func newContractResolverKit(cfg ResolverConfig) *contractResolverKit {
- return &contractResolverKit{
- ResolverConfig: cfg,
- quit: make(chan struct{}),
- }
-}
-
-var (
- // errResolverShuttingDown is returned when the resolver stops
- // progressing because it received the quit signal.
- errResolverShuttingDown = er.GenericErrorType.CodeWithDetail("errResolverShuttingDown",
- "resolver shutting down")
-)
diff --git a/lnd/contractcourt/htlc_incoming_contest_resolver.go b/lnd/contractcourt/htlc_incoming_contest_resolver.go
deleted file mode 100644
index c7ac105b..00000000
--- a/lnd/contractcourt/htlc_incoming_contest_resolver.go
+++ /dev/null
@@ -1,452 +0,0 @@
-package contractcourt
-
-import (
- "bytes"
- "io"
-
- "github.com/pkt-cash/pktd/btcutil"
- "github.com/pkt-cash/pktd/btcutil/er"
- "github.com/pkt-cash/pktd/btcutil/util"
- "github.com/pkt-cash/pktd/lnd/channeldb"
- "github.com/pkt-cash/pktd/lnd/htlcswitch/hop"
- "github.com/pkt-cash/pktd/lnd/invoices"
- "github.com/pkt-cash/pktd/lnd/lntypes"
- "github.com/pkt-cash/pktd/lnd/lnwallet"
- "github.com/pkt-cash/pktd/pktlog/log"
-)
-
-// htlcIncomingContestResolver is a ContractResolver that's able to resolve an
-// incoming HTLC that is still contested. An HTLC is still contested, if at the
-// time of commitment broadcast, we don't know of the preimage for it yet, and
-// it hasn't expired. In this case, we can resolve the HTLC if we learn of the
-// preimage, otherwise the remote party will sweep it after it expires.
-//
-// TODO(roasbeef): just embed the other resolver?
-type htlcIncomingContestResolver struct {
- // htlcExpiry is the absolute expiry of this incoming HTLC. We use this
- // value to determine if we can exit early as if the HTLC times out,
- // before we learn of the preimage then we can't claim it on chain
- // successfully.
- htlcExpiry uint32
-
- // htlcSuccessResolver is the inner resolver that may be utilized if we
- // learn of the preimage.
- htlcSuccessResolver
-}
-
-// newIncomingContestResolver instantiates a new incoming htlc contest resolver.
-func newIncomingContestResolver(
- res lnwallet.IncomingHtlcResolution, broadcastHeight uint32,
- htlc channeldb.HTLC, resCfg ResolverConfig) *htlcIncomingContestResolver {
-
- success := newSuccessResolver(
- res, broadcastHeight, htlc, resCfg,
- )
-
- return &htlcIncomingContestResolver{
- htlcExpiry: htlc.RefundTimeout,
- htlcSuccessResolver: *success,
- }
-}
-
-// Resolve attempts to resolve this contract. As we don't yet know of the
-// preimage for the contract, we'll wait for one of two things to happen:
-//
-// 1. We learn of the preimage! In this case, we can sweep the HTLC incoming
-// and ensure that if this was a multi-hop HTLC we are made whole. In this
-// case, an additional ContractResolver will be returned to finish the
-// job.
-//
-// 2. The HTLC expires. If this happens, then the contract is fully resolved
-// as we have no remaining actions left at our disposal.
-//
-// NOTE: Part of the ContractResolver interface.
-func (h *htlcIncomingContestResolver) Resolve() (ContractResolver, er.R) {
- // If we're already full resolved, then we don't have anything further
- // to do.
- if h.resolved {
- return nil, nil
- }
-
- // First try to parse the payload. If that fails, we can stop resolution
- // now.
- payload, err := h.decodePayload()
- if err != nil {
- log.Debugf("ChannelArbitrator(%v): cannot decode payload of "+
- "htlc %v", h.ChanPoint, h.HtlcPoint())
-
- // If we've locked in an htlc with an invalid payload on our
- // commitment tx, we don't need to resolve it. The other party
- // will time it out and get their funds back. This situation can
- // present itself when we crash before processRemoteAdds in the
- // link has ran.
- h.resolved = true
-
- // We write a report to disk that indicates we could not decode
- // the htlc.
- resReport := h.report().resolverReport(
- nil, channeldb.ResolverTypeIncomingHtlc,
- channeldb.ResolverOutcomeAbandoned,
- )
- return nil, h.PutResolverReport(nil, resReport)
- }
-
- // Register for block epochs. After registration, the current height
- // will be sent on the channel immediately.
- blockEpochs, err := h.Notifier.RegisterBlockEpochNtfn(nil)
- if err != nil {
- return nil, err
- }
- defer blockEpochs.Cancel()
-
- var currentHeight int32
- select {
- case newBlock, ok := <-blockEpochs.Epochs:
- if !ok {
- return nil, errResolverShuttingDown.Default()
- }
- currentHeight = newBlock.Height
- case <-h.quit:
- return nil, errResolverShuttingDown.Default()
- }
-
- // We'll first check if this HTLC has been timed out, if so, we can
- // return now and mark ourselves as resolved. If we're past the point of
- // expiry of the HTLC, then at this point the sender can sweep it, so
- // we'll end our lifetime. Here we deliberately forego the chance that
- // the sender doesn't sweep and we already have or will learn the
- // preimage. Otherwise the resolver could potentially stay active
- // indefinitely and the channel will never close properly.
- if uint32(currentHeight) >= h.htlcExpiry {
- // TODO(roasbeef): should also somehow check if outgoing is
- // resolved or not
- // * may need to hook into the circuit map
- // * can't timeout before the outgoing has been
-
- log.Infof("%T(%v): HTLC has timed out (expiry=%v, height=%v), "+
- "abandoning", h, h.htlcResolution.ClaimOutpoint,
- h.htlcExpiry, currentHeight)
- h.resolved = true
-
- // Finally, get our report and checkpoint our resolver with a
- // timeout outcome report.
- report := h.report().resolverReport(
- nil, channeldb.ResolverTypeIncomingHtlc,
- channeldb.ResolverOutcomeTimeout,
- )
- return nil, h.Checkpoint(h, report)
- }
-
- // applyPreimage is a helper function that will populate our internal
- // resolver with the preimage we learn of. This should be called once
- // the preimage is revealed so the inner resolver can properly complete
- // its duties. The error return value indicates whether the preimage
- // was properly applied.
- applyPreimage := func(preimage lntypes.Preimage) er.R {
- // Sanity check to see if this preimage matches our htlc. At
- // this point it should never happen that it does not match.
- if !preimage.Matches(h.htlc.RHash) {
- return er.New("preimage does not match hash")
- }
-
- // Update htlcResolution with the matching preimage.
- h.htlcResolution.Preimage = preimage
-
- log.Infof("%T(%v): extracted preimage=%v from beacon!", h,
- h.htlcResolution.ClaimOutpoint, preimage)
-
- // If this is our commitment transaction, then we'll need to
- // populate the witness for the second-level HTLC transaction.
- if h.htlcResolution.SignedSuccessTx != nil {
- // Within the witness for the success transaction, the
- // preimage is the 4th element as it looks like:
- //
- // *
- //
- // We'll populate it within the witness, as since this
- // was a "contest" resolver, we didn't yet know of the
- // preimage.
- h.htlcResolution.SignedSuccessTx.TxIn[0].Witness[3] = preimage[:]
- }
-
- return nil
- }
-
- // Define a closure to process htlc resolutions either directly or
- // triggered by future notifications.
- processHtlcResolution := func(e invoices.HtlcResolution) (
- ContractResolver, er.R) {
-
- // Take action based on the type of resolution we have
- // received.
- switch resolution := e.(type) {
-
- // If the htlc resolution was a settle, apply the
- // preimage and return a success resolver.
- case *invoices.HtlcSettleResolution:
- err := applyPreimage(resolution.Preimage)
- if err != nil {
- return nil, err
- }
-
- return &h.htlcSuccessResolver, nil
-
- // If the htlc was failed, mark the htlc as
- // resolved.
- case *invoices.HtlcFailResolution:
- log.Infof("%T(%v): Exit hop HTLC canceled "+
- "(expiry=%v, height=%v), abandoning", h,
- h.htlcResolution.ClaimOutpoint,
- h.htlcExpiry, currentHeight)
-
- h.resolved = true
-
- // Checkpoint our resolver with an abandoned outcome
- // because we take no further action on this htlc.
- report := h.report().resolverReport(
- nil, channeldb.ResolverTypeIncomingHtlc,
- channeldb.ResolverOutcomeAbandoned,
- )
- return nil, h.Checkpoint(h, report)
-
- // Error if the resolution type is unknown, we are only
- // expecting settles and fails.
- default:
- return nil, er.Errorf("unknown resolution"+
- " type: %v", e)
- }
- }
-
- var (
- hodlChan chan interface{}
- witnessUpdates <-chan lntypes.Preimage
- )
- if payload.FwdInfo.NextHop == hop.Exit {
- // Create a buffered hodl chan to prevent deadlock.
- hodlChan = make(chan interface{}, 1)
-
- // Notify registry that we are potentially resolving as an exit
- // hop on-chain. If this HTLC indeed pays to an existing
- // invoice, the invoice registry will tell us what to do with
- // the HTLC. This is identical to HTLC resolution in the link.
- circuitKey := channeldb.CircuitKey{
- ChanID: h.ShortChanID,
- HtlcID: h.htlc.HtlcIndex,
- }
-
- resolution, err := h.Registry.NotifyExitHopHtlc(
- h.htlc.RHash, h.htlc.Amt, h.htlcExpiry, currentHeight,
- circuitKey, hodlChan, payload,
- )
- if err != nil {
- return nil, err
- }
-
- defer h.Registry.HodlUnsubscribeAll(hodlChan)
-
- // Take action based on the resolution we received. If the htlc
- // was settled, or a htlc for a known invoice failed we can
- // resolve it directly. If the resolution is nil, the htlc was
- // neither accepted nor failed, so we cannot take action yet.
- switch res := resolution.(type) {
- case *invoices.HtlcFailResolution:
- // In the case where the htlc failed, but the invoice
- // was known to the registry, we can directly resolve
- // the htlc.
- if res.Outcome != invoices.ResultInvoiceNotFound {
- return processHtlcResolution(resolution)
- }
-
- // If we settled the htlc, we can resolve it.
- case *invoices.HtlcSettleResolution:
- return processHtlcResolution(resolution)
-
- // If the resolution is nil, the htlc was neither settled nor
- // failed so we cannot take action at present.
- case nil:
-
- default:
- return nil, er.Errorf("unknown htlc resolution type: %T",
- resolution)
- }
- } else {
- // If the HTLC hasn't expired yet, then we may still be able to
- // claim it if we learn of the pre-image, so we'll subscribe to
- // the preimage database to see if it turns up, or the HTLC
- // times out.
- //
- // NOTE: This is done BEFORE opportunistically querying the db,
- // to ensure the preimage can't be delivered between querying
- // and registering for the preimage subscription.
- preimageSubscription := h.PreimageDB.SubscribeUpdates()
- defer preimageSubscription.CancelSubscription()
-
- // With the epochs and preimage subscriptions initialized, we'll
- // query to see if we already know the preimage.
- preimage, ok := h.PreimageDB.LookupPreimage(h.htlc.RHash)
- if ok {
- // If we do, then this means we can claim the HTLC!
- // However, we don't know how to ourselves, so we'll
- // return our inner resolver which has the knowledge to
- // do so.
- if err := applyPreimage(preimage); err != nil {
- return nil, err
- }
-
- return &h.htlcSuccessResolver, nil
- }
-
- witnessUpdates = preimageSubscription.WitnessUpdates
- }
-
- for {
- select {
- case preimage := <-witnessUpdates:
- // We received a new preimage, but we need to ignore
- // all except the preimage we are waiting for.
- if !preimage.Matches(h.htlc.RHash) {
- continue
- }
-
- if err := applyPreimage(preimage); err != nil {
- return nil, err
- }
-
- // We've learned of the preimage and this information
- // has been added to our inner resolver. We return it so
- // it can continue contract resolution.
- return &h.htlcSuccessResolver, nil
-
- case hodlItem := <-hodlChan:
- htlcResolution := hodlItem.(invoices.HtlcResolution)
- return processHtlcResolution(htlcResolution)
-
- case newBlock, ok := <-blockEpochs.Epochs:
- if !ok {
- return nil, errResolverShuttingDown.Default()
- }
-
- // If this new height expires the HTLC, then this means
- // we never found out the preimage, so we can mark
- // resolved and exit.
- newHeight := uint32(newBlock.Height)
- if newHeight >= h.htlcExpiry {
- log.Infof("%T(%v): HTLC has timed out "+
- "(expiry=%v, height=%v), abandoning", h,
- h.htlcResolution.ClaimOutpoint,
- h.htlcExpiry, currentHeight)
- h.resolved = true
-
- report := h.report().resolverReport(
- nil,
- channeldb.ResolverTypeIncomingHtlc,
- channeldb.ResolverOutcomeTimeout,
- )
- return nil, h.Checkpoint(h, report)
- }
-
- case <-h.quit:
- return nil, errResolverShuttingDown.Default()
- }
- }
-}
-
-// report returns a report on the resolution state of the contract.
-func (h *htlcIncomingContestResolver) report() *ContractReport {
- // No locking needed as these values are read-only.
-
- finalAmt := h.htlc.Amt.ToSatoshis()
- if h.htlcResolution.SignedSuccessTx != nil {
- finalAmt = btcutil.Amount(
- h.htlcResolution.SignedSuccessTx.TxOut[0].Value,
- )
- }
-
- return &ContractReport{
- Outpoint: h.htlcResolution.ClaimOutpoint,
- Type: ReportOutputIncomingHtlc,
- Amount: finalAmt,
- MaturityHeight: h.htlcExpiry,
- LimboBalance: finalAmt,
- Stage: 1,
- }
-}
-
-// Stop signals the resolver to cancel any current resolution processes, and
-// suspend.
-//
-// NOTE: Part of the ContractResolver interface.
-func (h *htlcIncomingContestResolver) Stop() {
- close(h.quit)
-}
-
-// IsResolved returns true if the stored state in the resolve is fully
-// resolved. In this case the target output can be forgotten.
-//
-// NOTE: Part of the ContractResolver interface.
-func (h *htlcIncomingContestResolver) IsResolved() bool {
- return h.resolved
-}
-
-// Encode writes an encoded version of the ContractResolver into the passed
-// Writer.
-//
-// NOTE: Part of the ContractResolver interface.
-func (h *htlcIncomingContestResolver) Encode(w io.Writer) er.R {
- // We'll first write out the one field unique to this resolver.
- if err := util.WriteBin(w, endian, h.htlcExpiry); err != nil {
- return err
- }
-
- // Then we'll write out our internal resolver.
- return h.htlcSuccessResolver.Encode(w)
-}
-
-// newIncomingContestResolverFromReader attempts to decode an encoded ContractResolver
-// from the passed Reader instance, returning an active ContractResolver
-// instance.
-func newIncomingContestResolverFromReader(r io.Reader, resCfg ResolverConfig) (
- *htlcIncomingContestResolver, er.R) {
-
- h := &htlcIncomingContestResolver{}
-
- // We'll first read the one field unique to this resolver.
- if err := util.ReadBin(r, endian, &h.htlcExpiry); err != nil {
- return nil, err
- }
-
- // Then we'll decode our internal resolver.
- successResolver, err := newSuccessResolverFromReader(r, resCfg)
- if err != nil {
- return nil, err
- }
- h.htlcSuccessResolver = *successResolver
-
- return h, nil
-}
-
-// Supplement adds additional information to the resolver that is required
-// before Resolve() is called.
-//
-// NOTE: Part of the htlcContractResolver interface.
-func (h *htlcIncomingContestResolver) Supplement(htlc channeldb.HTLC) {
- h.htlc = htlc
-}
-
-// decodePayload (re)decodes the hop payload of a received htlc.
-func (h *htlcIncomingContestResolver) decodePayload() (*hop.Payload, er.R) {
-
- onionReader := bytes.NewReader(h.htlc.OnionBlob)
- iterator, err := h.OnionProcessor.ReconstructHopIterator(
- onionReader, h.htlc.RHash[:],
- )
- if err != nil {
- return nil, err
- }
-
- return iterator.HopPayload()
-}
-
-// A compile time assertion to ensure htlcIncomingContestResolver meets the
-// ContractResolver interface.
-var _ htlcContractResolver = (*htlcIncomingContestResolver)(nil)
diff --git a/lnd/contractcourt/htlc_incoming_resolver_test.go b/lnd/contractcourt/htlc_incoming_resolver_test.go
deleted file mode 100644
index d3ce6ef6..00000000
--- a/lnd/contractcourt/htlc_incoming_resolver_test.go
+++ /dev/null
@@ -1,417 +0,0 @@
-package contractcourt
-
-import (
- "bytes"
- "io"
- "io/ioutil"
- "testing"
-
- "github.com/pkt-cash/pktd/btcutil/er"
- sphinx "github.com/pkt-cash/pktd/lightning-onion"
- "github.com/pkt-cash/pktd/lnd/chainntnfs"
- "github.com/pkt-cash/pktd/lnd/channeldb"
- "github.com/pkt-cash/pktd/lnd/channeldb/kvdb"
- "github.com/pkt-cash/pktd/lnd/htlcswitch/hop"
- "github.com/pkt-cash/pktd/lnd/invoices"
- "github.com/pkt-cash/pktd/lnd/lntest/mock"
- "github.com/pkt-cash/pktd/lnd/lntypes"
- "github.com/pkt-cash/pktd/lnd/lnwallet"
- "github.com/pkt-cash/pktd/lnd/lnwire"
-)
-
-const (
- testInitialBlockHeight = 100
- testHtlcExpiry = 150
-)
-
-var (
- testResPreimage = lntypes.Preimage{1, 2, 3}
- testResHash = testResPreimage.Hash()
- testResCircuitKey = channeldb.CircuitKey{}
- testOnionBlob = []byte{4, 5, 6}
- testAcceptHeight int32 = 1234
- testHtlcAmount = 2300
-)
-
-// TestHtlcIncomingResolverFwdPreimageKnown tests resolution of a forwarded htlc
-// for which the preimage is already known initially.
-func TestHtlcIncomingResolverFwdPreimageKnown(t *testing.T) {
- t.Parallel()
- defer timeout(t)()
-
- ctx := newIncomingResolverTestContext(t, false)
- ctx.witnessBeacon.lookupPreimage[testResHash] = testResPreimage
- ctx.resolve()
- ctx.waitForResult(true)
-}
-
-// TestHtlcIncomingResolverFwdContestedSuccess tests resolution of a forwarded
-// htlc for which the preimage becomes known after the resolver has been
-// started.
-func TestHtlcIncomingResolverFwdContestedSuccess(t *testing.T) {
- t.Parallel()
- defer timeout(t)()
-
- ctx := newIncomingResolverTestContext(t, false)
- ctx.resolve()
-
- // Simulate a new block coming in. HTLC is not yet expired.
- ctx.notifyEpoch(testInitialBlockHeight + 1)
-
- ctx.witnessBeacon.preImageUpdates <- testResPreimage
- ctx.waitForResult(true)
-}
-
-// TestHtlcIncomingResolverFwdContestedTimeout tests resolution of a forwarded
-// htlc that times out after the resolver has been started.
-func TestHtlcIncomingResolverFwdContestedTimeout(t *testing.T) {
- t.Parallel()
- defer timeout(t)()
-
- ctx := newIncomingResolverTestContext(t, false)
-
- // Replace our checkpoint with one which will push reports into a
- // channel for us to consume. We replace this function on the resolver
- // itself because it is created by the test context.
- reportChan := make(chan *channeldb.ResolverReport)
- ctx.resolver.Checkpoint = func(_ ContractResolver,
- reports ...*channeldb.ResolverReport) er.R {
-
- // Send all of our reports into the channel.
- for _, report := range reports {
- reportChan <- report
- }
-
- return nil
- }
-
- ctx.resolve()
-
- // Simulate a new block coming in. HTLC expires.
- ctx.notifyEpoch(testHtlcExpiry)
-
- // Assert that we have a failure resolution because our invoice was
- // cancelled.
- assertResolverReport(t, reportChan, &channeldb.ResolverReport{
- Amount: lnwire.MilliSatoshi(testHtlcAmount).ToSatoshis(),
- ResolverType: channeldb.ResolverTypeIncomingHtlc,
- ResolverOutcome: channeldb.ResolverOutcomeTimeout,
- })
-
- ctx.waitForResult(false)
-}
-
-// TestHtlcIncomingResolverFwdTimeout tests resolution of a forwarded htlc that
-// has already expired when the resolver starts.
-func TestHtlcIncomingResolverFwdTimeout(t *testing.T) {
- t.Parallel()
- defer timeout(t)()
-
- ctx := newIncomingResolverTestContext(t, true)
- ctx.witnessBeacon.lookupPreimage[testResHash] = testResPreimage
- ctx.resolver.htlcExpiry = 90
- ctx.resolve()
- ctx.waitForResult(false)
-}
-
-// TestHtlcIncomingResolverExitSettle tests resolution of an exit hop htlc for
-// which the invoice has already been settled when the resolver starts.
-func TestHtlcIncomingResolverExitSettle(t *testing.T) {
- t.Parallel()
- defer timeout(t)()
-
- ctx := newIncomingResolverTestContext(t, true)
- ctx.registry.notifyResolution = invoices.NewSettleResolution(
- testResPreimage, testResCircuitKey, testAcceptHeight,
- invoices.ResultReplayToSettled,
- )
-
- ctx.resolve()
-
- data := <-ctx.registry.notifyChan
- if data.expiry != testHtlcExpiry {
- t.Fatal("incorrect expiry")
- }
- if data.currentHeight != testInitialBlockHeight {
- t.Fatal("incorrect block height")
- }
-
- ctx.waitForResult(true)
-
- if !bytes.Equal(
- ctx.onionProcessor.offeredOnionBlob, testOnionBlob,
- ) {
- t.Fatal("unexpected onion blob")
- }
-}
-
-// TestHtlcIncomingResolverExitCancel tests resolution of an exit hop htlc for
-// an invoice that is already canceled when the resolver starts.
-func TestHtlcIncomingResolverExitCancel(t *testing.T) {
- t.Parallel()
- defer timeout(t)()
-
- ctx := newIncomingResolverTestContext(t, true)
- ctx.registry.notifyResolution = invoices.NewFailResolution(
- testResCircuitKey, testAcceptHeight,
- invoices.ResultInvoiceAlreadyCanceled,
- )
-
- ctx.resolve()
- ctx.waitForResult(false)
-}
-
-// TestHtlcIncomingResolverExitSettleHodl tests resolution of an exit hop htlc
-// for a hodl invoice that is settled after the resolver has started.
-func TestHtlcIncomingResolverExitSettleHodl(t *testing.T) {
- t.Parallel()
- defer timeout(t)()
-
- ctx := newIncomingResolverTestContext(t, true)
- ctx.resolve()
-
- notifyData := <-ctx.registry.notifyChan
- notifyData.hodlChan <- invoices.NewSettleResolution(
- testResPreimage, testResCircuitKey, testAcceptHeight,
- invoices.ResultSettled,
- )
-
- ctx.waitForResult(true)
-}
-
-// TestHtlcIncomingResolverExitTimeoutHodl tests resolution of an exit hop htlc
-// for a hodl invoice that times out.
-func TestHtlcIncomingResolverExitTimeoutHodl(t *testing.T) {
- t.Parallel()
- defer timeout(t)()
-
- ctx := newIncomingResolverTestContext(t, true)
-
- // Replace our checkpoint with one which will push reports into a
- // channel for us to consume. We replace this function on the resolver
- // itself because it is created by the test context.
- reportChan := make(chan *channeldb.ResolverReport)
- ctx.resolver.Checkpoint = func(_ ContractResolver,
- reports ...*channeldb.ResolverReport) er.R {
-
- // Send all of our reports into the channel.
- for _, report := range reports {
- reportChan <- report
- }
-
- return nil
- }
-
- ctx.resolve()
- ctx.notifyEpoch(testHtlcExpiry)
-
- // Assert that we have a failure resolution because our invoice was
- // cancelled.
- assertResolverReport(t, reportChan, &channeldb.ResolverReport{
- Amount: lnwire.MilliSatoshi(testHtlcAmount).ToSatoshis(),
- ResolverType: channeldb.ResolverTypeIncomingHtlc,
- ResolverOutcome: channeldb.ResolverOutcomeTimeout,
- })
-
- ctx.waitForResult(false)
-}
-
-// TestHtlcIncomingResolverExitCancelHodl tests resolution of an exit hop htlc
-// for a hodl invoice that is canceled after the resolver has started.
-func TestHtlcIncomingResolverExitCancelHodl(t *testing.T) {
- t.Parallel()
- defer timeout(t)()
-
- ctx := newIncomingResolverTestContext(t, true)
-
- // Replace our checkpoint with one which will push reports into a
- // channel for us to consume. We replace this function on the resolver
- // itself because it is created by the test context.
- reportChan := make(chan *channeldb.ResolverReport)
- ctx.resolver.Checkpoint = func(_ ContractResolver,
- reports ...*channeldb.ResolverReport) er.R {
-
- // Send all of our reports into the channel.
- for _, report := range reports {
- reportChan <- report
- }
-
- return nil
- }
-
- ctx.resolve()
- notifyData := <-ctx.registry.notifyChan
- notifyData.hodlChan <- invoices.NewFailResolution(
- testResCircuitKey, testAcceptHeight, invoices.ResultCanceled,
- )
-
- // Assert that we have a failure resolution because our invoice was
- // cancelled.
- assertResolverReport(t, reportChan, &channeldb.ResolverReport{
- Amount: lnwire.MilliSatoshi(testHtlcAmount).ToSatoshis(),
- ResolverType: channeldb.ResolverTypeIncomingHtlc,
- ResolverOutcome: channeldb.ResolverOutcomeAbandoned,
- })
-
- ctx.waitForResult(false)
-}
-
-type mockHopIterator struct {
- isExit bool
- hop.Iterator
-}
-
-func (h *mockHopIterator) HopPayload() (*hop.Payload, er.R) {
- var nextAddress [8]byte
- if !h.isExit {
- nextAddress = [8]byte{0x01}
- }
-
- return hop.NewLegacyPayload(&sphinx.HopData{
- Realm: [1]byte{},
- NextAddress: nextAddress,
- ForwardAmount: 100,
- OutgoingCltv: 40,
- ExtraBytes: [12]byte{},
- }), nil
-}
-
-type mockOnionProcessor struct {
- isExit bool
- offeredOnionBlob []byte
-}
-
-func (o *mockOnionProcessor) ReconstructHopIterator(r io.Reader, rHash []byte) (
- hop.Iterator, er.R) {
-
- data, err := ioutil.ReadAll(r)
- if err != nil {
- return nil, er.E(err)
- }
- o.offeredOnionBlob = data
-
- return &mockHopIterator{isExit: o.isExit}, nil
-}
-
-type incomingResolverTestContext struct {
- registry *mockRegistry
- witnessBeacon *mockWitnessBeacon
- resolver *htlcIncomingContestResolver
- notifier *mock.ChainNotifier
- onionProcessor *mockOnionProcessor
- resolveErr chan er.R
- nextResolver ContractResolver
- t *testing.T
-}
-
-func newIncomingResolverTestContext(t *testing.T, isExit bool) *incomingResolverTestContext {
- notifier := &mock.ChainNotifier{
- EpochChan: make(chan *chainntnfs.BlockEpoch),
- SpendChan: make(chan *chainntnfs.SpendDetail),
- ConfChan: make(chan *chainntnfs.TxConfirmation),
- }
- witnessBeacon := newMockWitnessBeacon()
- registry := &mockRegistry{
- notifyChan: make(chan notifyExitHopData, 1),
- }
-
- onionProcessor := &mockOnionProcessor{isExit: isExit}
-
- checkPointChan := make(chan struct{}, 1)
-
- chainCfg := ChannelArbitratorConfig{
- ChainArbitratorConfig: ChainArbitratorConfig{
- Notifier: notifier,
- PreimageDB: witnessBeacon,
- Registry: registry,
- OnionProcessor: onionProcessor,
- },
- PutResolverReport: func(_ kvdb.RwTx,
- _ *channeldb.ResolverReport) er.R {
-
- return nil
- },
- }
-
- cfg := ResolverConfig{
- ChannelArbitratorConfig: chainCfg,
- Checkpoint: func(_ ContractResolver,
- _ ...*channeldb.ResolverReport) er.R {
-
- checkPointChan <- struct{}{}
- return nil
- },
- }
- resolver := &htlcIncomingContestResolver{
- htlcSuccessResolver: htlcSuccessResolver{
- contractResolverKit: *newContractResolverKit(cfg),
- htlcResolution: lnwallet.IncomingHtlcResolution{},
- htlc: channeldb.HTLC{
- Amt: lnwire.MilliSatoshi(testHtlcAmount),
- RHash: testResHash,
- OnionBlob: testOnionBlob,
- },
- },
- htlcExpiry: testHtlcExpiry,
- }
-
- return &incomingResolverTestContext{
- registry: registry,
- witnessBeacon: witnessBeacon,
- resolver: resolver,
- notifier: notifier,
- onionProcessor: onionProcessor,
- t: t,
- }
-}
-
-func (i *incomingResolverTestContext) resolve() {
- // Start resolver.
- i.resolveErr = make(chan er.R, 1)
- go func() {
- var err er.R
- i.nextResolver, err = i.resolver.Resolve()
- i.resolveErr <- err
- }()
-
- // Notify initial block height.
- i.notifyEpoch(testInitialBlockHeight)
-}
-
-func (i *incomingResolverTestContext) notifyEpoch(height int32) {
- i.notifier.EpochChan <- &chainntnfs.BlockEpoch{
- Height: height,
- }
-}
-
-func (i *incomingResolverTestContext) waitForResult(expectSuccessRes bool) {
- i.t.Helper()
-
- err := <-i.resolveErr
- if err != nil {
- i.t.Fatal(err)
- }
-
- if !expectSuccessRes {
- if i.nextResolver != nil {
- i.t.Fatal("expected no next resolver")
- }
- return
- }
-
- successResolver, ok := i.nextResolver.(*htlcSuccessResolver)
- if !ok {
- i.t.Fatal("expected htlcSuccessResolver")
- }
-
- if successResolver.htlcResolution.Preimage != testResPreimage {
- i.t.Fatal("invalid preimage")
- }
-
- successTx := successResolver.htlcResolution.SignedSuccessTx
- if successTx != nil &&
- !bytes.Equal(successTx.TxIn[0].Witness[3], testResPreimage[:]) {
-
- i.t.Fatal("invalid preimage")
- }
-}
diff --git a/lnd/contractcourt/htlc_outgoing_contest_resolver.go b/lnd/contractcourt/htlc_outgoing_contest_resolver.go
deleted file mode 100644
index 9db46f09..00000000
--- a/lnd/contractcourt/htlc_outgoing_contest_resolver.go
+++ /dev/null
@@ -1,219 +0,0 @@
-package contractcourt
-
-import (
- "io"
-
- "github.com/pkt-cash/pktd/btcutil"
- "github.com/pkt-cash/pktd/btcutil/er"
- "github.com/pkt-cash/pktd/lnd/channeldb"
- "github.com/pkt-cash/pktd/lnd/lnwallet"
- "github.com/pkt-cash/pktd/pktlog/log"
-)
-
-// htlcOutgoingContestResolver is a ContractResolver that's able to resolve an
-// outgoing HTLC that is still contested. An HTLC is still contested, if at the
-// time that we broadcast the commitment transaction, it isn't able to be fully
-// resolved. In this case, we'll either wait for the HTLC to timeout, or for
-// us to learn of the preimage.
-type htlcOutgoingContestResolver struct {
- // htlcTimeoutResolver is the inner solver that this resolver may turn
- // into. This only happens if the HTLC expires on-chain.
- htlcTimeoutResolver
-}
-
-// newOutgoingContestResolver instantiates a new outgoing contested htlc
-// resolver.
-func newOutgoingContestResolver(res lnwallet.OutgoingHtlcResolution,
- broadcastHeight uint32, htlc channeldb.HTLC,
- resCfg ResolverConfig) *htlcOutgoingContestResolver {
-
- timeout := newTimeoutResolver(
- res, broadcastHeight, htlc, resCfg,
- )
-
- return &htlcOutgoingContestResolver{
- htlcTimeoutResolver: *timeout,
- }
-}
-
-// Resolve commences the resolution of this contract. As this contract hasn't
-// yet timed out, we'll wait for one of two things to happen
-//
-// 1. The HTLC expires. In this case, we'll sweep the funds and send a clean
-// up cancel message to outside sub-systems.
-//
-// 2. The remote party sweeps this HTLC on-chain, in which case we'll add the
-// pre-image to our global cache, then send a clean up settle message
-// backwards.
-//
-// When either of these two things happens, we'll create a new resolver which
-// is able to handle the final resolution of the contract. We're only the pivot
-// point.
-func (h *htlcOutgoingContestResolver) Resolve() (ContractResolver, er.R) {
- // If we're already full resolved, then we don't have anything further
- // to do.
- if h.resolved {
- return nil, nil
- }
-
- // Otherwise, we'll watch for two external signals to decide if we'll
- // morph into another resolver, or fully resolve the contract.
- //
- // The output we'll be watching for is the *direct* spend from the HTLC
- // output. If this isn't our commitment transaction, it'll be right on
- // the resolution. Otherwise, we fetch this pointer from the input of
- // the time out transaction.
- outPointToWatch, scriptToWatch, err := h.chainDetailsToWatch()
- if err != nil {
- return nil, err
- }
-
- // First, we'll register for a spend notification for this output. If
- // the remote party sweeps with the pre-image, we'll be notified.
- spendNtfn, err := h.Notifier.RegisterSpendNtfn(
- outPointToWatch, scriptToWatch, h.broadcastHeight,
- )
- if err != nil {
- return nil, err
- }
-
- // We'll quickly check to see if the output has already been spent.
- select {
- // If the output has already been spent, then we can stop early and
- // sweep the pre-image from the output.
- case commitSpend, ok := <-spendNtfn.Spend:
- if !ok {
- return nil, errResolverShuttingDown.Default()
- }
-
- // TODO(roasbeef): Checkpoint?
- return h.claimCleanUp(commitSpend)
-
- // If it hasn't, then we'll watch for both the expiration, and the
- // sweeping out this output.
- default:
- }
-
- // If we reach this point, then we can't fully act yet, so we'll await
- // either of our signals triggering: the HTLC expires, or we learn of
- // the preimage.
- blockEpochs, err := h.Notifier.RegisterBlockEpochNtfn(nil)
- if err != nil {
- return nil, err
- }
- defer blockEpochs.Cancel()
-
- for {
- select {
-
- // A new block has arrived, we'll check to see if this leads to
- // HTLC expiration.
- case newBlock, ok := <-blockEpochs.Epochs:
- if !ok {
- return nil, errResolverShuttingDown.Default()
- }
-
- // If the current height is >= expiry-1, then a timeout
- // path spend will be valid to be included in the next
- // block, and we can immediately return the resolver.
- //
- // TODO(joostjager): Statement above may not be valid.
- // For CLTV locks, the expiry value is the last
- // _invalid_ block. The likely reason that this does not
- // create a problem, is that utxonursery is checking the
- // expiry again (in the proper way).
- //
- // Source:
- // https://github.com/btcsuite/btcd/blob/991d32e72fe84d5fbf9c47cd604d793a0cd3a072/blockchain/validate.go#L154
- newHeight := uint32(newBlock.Height)
- if newHeight >= h.htlcResolution.Expiry-1 {
- log.Infof("%T(%v): HTLC has expired "+
- "(height=%v, expiry=%v), transforming "+
- "into timeout resolver", h,
- h.htlcResolution.ClaimOutpoint,
- newHeight, h.htlcResolution.Expiry)
- return &h.htlcTimeoutResolver, nil
- }
-
- // The output has been spent! This means the preimage has been
- // revealed on-chain.
- case commitSpend, ok := <-spendNtfn.Spend:
- if !ok {
- return nil, errResolverShuttingDown.Default()
- }
-
- // The only way this output can be spent by the remote
- // party is by revealing the preimage. So we'll perform
- // our duties to clean up the contract once it has been
- // claimed.
- return h.claimCleanUp(commitSpend)
-
- case <-h.quit:
- return nil, er.Errorf("resolver canceled")
- }
- }
-}
-
-// report returns a report on the resolution state of the contract.
-func (h *htlcOutgoingContestResolver) report() *ContractReport {
- // No locking needed as these values are read-only.
-
- finalAmt := h.htlc.Amt.ToSatoshis()
- if h.htlcResolution.SignedTimeoutTx != nil {
- finalAmt = btcutil.Amount(
- h.htlcResolution.SignedTimeoutTx.TxOut[0].Value,
- )
- }
-
- return &ContractReport{
- Outpoint: h.htlcResolution.ClaimOutpoint,
- Type: ReportOutputOutgoingHtlc,
- Amount: finalAmt,
- MaturityHeight: h.htlcResolution.Expiry,
- LimboBalance: finalAmt,
- Stage: 1,
- }
-}
-
-// Stop signals the resolver to cancel any current resolution processes, and
-// suspend.
-//
-// NOTE: Part of the ContractResolver interface.
-func (h *htlcOutgoingContestResolver) Stop() {
- close(h.quit)
-}
-
-// IsResolved returns true if the stored state in the resolve is fully
-// resolved. In this case the target output can be forgotten.
-//
-// NOTE: Part of the ContractResolver interface.
-func (h *htlcOutgoingContestResolver) IsResolved() bool {
- return h.resolved
-}
-
-// Encode writes an encoded version of the ContractResolver into the passed
-// Writer.
-//
-// NOTE: Part of the ContractResolver interface.
-func (h *htlcOutgoingContestResolver) Encode(w io.Writer) er.R {
- return h.htlcTimeoutResolver.Encode(w)
-}
-
-// newOutgoingContestResolverFromReader attempts to decode an encoded ContractResolver
-// from the passed Reader instance, returning an active ContractResolver
-// instance.
-func newOutgoingContestResolverFromReader(r io.Reader, resCfg ResolverConfig) (
- *htlcOutgoingContestResolver, er.R) {
-
- h := &htlcOutgoingContestResolver{}
- timeoutResolver, err := newTimeoutResolverFromReader(r, resCfg)
- if err != nil {
- return nil, err
- }
- h.htlcTimeoutResolver = *timeoutResolver
- return h, nil
-}
-
-// A compile time assertion to ensure htlcOutgoingContestResolver meets the
-// ContractResolver interface.
-var _ htlcContractResolver = (*htlcOutgoingContestResolver)(nil)
diff --git a/lnd/contractcourt/htlc_outgoing_contest_resolver_test.go b/lnd/contractcourt/htlc_outgoing_contest_resolver_test.go
deleted file mode 100644
index 4f1d9f3a..00000000
--- a/lnd/contractcourt/htlc_outgoing_contest_resolver_test.go
+++ /dev/null
@@ -1,240 +0,0 @@
-package contractcourt
-
-import (
- "testing"
-
- "github.com/pkt-cash/pktd/btcutil/er"
- "github.com/pkt-cash/pktd/lnd/chainntnfs"
- "github.com/pkt-cash/pktd/lnd/channeldb"
- "github.com/pkt-cash/pktd/lnd/channeldb/kvdb"
- "github.com/pkt-cash/pktd/lnd/input"
- "github.com/pkt-cash/pktd/lnd/lntest/mock"
- "github.com/pkt-cash/pktd/lnd/lntypes"
- "github.com/pkt-cash/pktd/lnd/lnwallet"
- "github.com/pkt-cash/pktd/lnd/lnwire"
- "github.com/pkt-cash/pktd/wire"
-)
-
-const (
- outgoingContestHtlcExpiry = 110
-)
-
-// TestHtlcOutgoingResolverTimeout tests resolution of an offered htlc that
-// timed out.
-func TestHtlcOutgoingResolverTimeout(t *testing.T) {
- t.Parallel()
- defer timeout(t)()
-
- // Setup the resolver with our test resolution.
- ctx := newOutgoingResolverTestContext(t)
-
- // Start the resolution process in a goroutine.
- ctx.resolve()
-
- // Notify arrival of the block after which the timeout path of the htlc
- // unlocks.
- ctx.notifyEpoch(outgoingContestHtlcExpiry - 1)
-
- // Assert that the resolver finishes without error and transforms in a
- // timeout resolver.
- ctx.waitForResult(true)
-}
-
-// TestHtlcOutgoingResolverRemoteClaim tests resolution of an offered htlc that
-// is claimed by the remote party.
-func TestHtlcOutgoingResolverRemoteClaim(t *testing.T) {
- t.Parallel()
- defer timeout(t)()
-
- // Setup the resolver with our test resolution and start the resolution
- // process.
- ctx := newOutgoingResolverTestContext(t)
-
- // Replace our mocked checkpoint function with one which will push
- // reports into a channel for us to consume. We do so on the resolver
- // level because our test context has already created the resolver.
- reportChan := make(chan *channeldb.ResolverReport)
- ctx.resolver.Checkpoint = func(_ ContractResolver,
- reports ...*channeldb.ResolverReport) er.R {
-
- // Send all of our reports into the channel.
- for _, report := range reports {
- reportChan <- report
- }
-
- return nil
- }
-
- ctx.resolve()
-
- // The remote party sweeps the htlc. Notify our resolver of this event.
- preimage := lntypes.Preimage{}
- spendTx := &wire.MsgTx{
- TxIn: []*wire.TxIn{
- {
- Witness: [][]byte{
- {0}, {1}, {2}, preimage[:],
- },
- },
- },
- }
-
- spendHash := spendTx.TxHash()
-
- ctx.notifier.SpendChan <- &chainntnfs.SpendDetail{
- SpendingTx: spendTx,
- SpenderTxHash: &spendHash,
- }
-
- // We expect the extracted preimage to be added to the witness beacon.
- <-ctx.preimageDB.newPreimages
-
- // We also expect a resolution message to the incoming side of the
- // circuit.
- <-ctx.resolutionChan
-
- // Finally, check that we have a report as expected.
- expectedReport := &channeldb.ResolverReport{
- OutPoint: wire.OutPoint{},
- Amount: 0,
- ResolverType: channeldb.ResolverTypeOutgoingHtlc,
- ResolverOutcome: channeldb.ResolverOutcomeClaimed,
- SpendTxID: &spendHash,
- }
-
- assertResolverReport(t, reportChan, expectedReport)
-
- // Assert that the resolver finishes without error.
- ctx.waitForResult(false)
-}
-
-type resolveResult struct {
- err er.R
- nextResolver ContractResolver
-}
-
-type outgoingResolverTestContext struct {
- resolver *htlcOutgoingContestResolver
- notifier *mock.ChainNotifier
- preimageDB *mockWitnessBeacon
- resolverResultChan chan resolveResult
- resolutionChan chan ResolutionMsg
- t *testing.T
-}
-
-func newOutgoingResolverTestContext(t *testing.T) *outgoingResolverTestContext {
- notifier := &mock.ChainNotifier{
- EpochChan: make(chan *chainntnfs.BlockEpoch),
- SpendChan: make(chan *chainntnfs.SpendDetail),
- ConfChan: make(chan *chainntnfs.TxConfirmation),
- }
-
- checkPointChan := make(chan struct{}, 1)
- resolutionChan := make(chan ResolutionMsg, 1)
-
- preimageDB := newMockWitnessBeacon()
-
- onionProcessor := &mockOnionProcessor{}
-
- chainCfg := ChannelArbitratorConfig{
- ChainArbitratorConfig: ChainArbitratorConfig{
- Notifier: notifier,
- PreimageDB: preimageDB,
- DeliverResolutionMsg: func(msgs ...ResolutionMsg) er.R {
- if len(msgs) != 1 {
- return er.Errorf("expected 1 "+
- "resolution msg, instead got %v",
- len(msgs))
- }
-
- resolutionChan <- msgs[0]
- return nil
- },
- OnionProcessor: onionProcessor,
- },
- PutResolverReport: func(_ kvdb.RwTx,
- _ *channeldb.ResolverReport) er.R {
-
- return nil
- },
- }
-
- outgoingRes := lnwallet.OutgoingHtlcResolution{
- Expiry: outgoingContestHtlcExpiry,
- SweepSignDesc: input.SignDescriptor{
- Output: &wire.TxOut{},
- },
- }
-
- cfg := ResolverConfig{
- ChannelArbitratorConfig: chainCfg,
- Checkpoint: func(_ ContractResolver,
- _ ...*channeldb.ResolverReport) er.R {
-
- checkPointChan <- struct{}{}
- return nil
- },
- }
-
- resolver := &htlcOutgoingContestResolver{
- htlcTimeoutResolver: htlcTimeoutResolver{
- contractResolverKit: *newContractResolverKit(cfg),
- htlcResolution: outgoingRes,
- htlc: channeldb.HTLC{
- Amt: lnwire.MilliSatoshi(testHtlcAmount),
- RHash: testResHash,
- OnionBlob: testOnionBlob,
- },
- },
- }
-
- return &outgoingResolverTestContext{
- resolver: resolver,
- notifier: notifier,
- preimageDB: preimageDB,
- resolutionChan: resolutionChan,
- t: t,
- }
-}
-
-func (i *outgoingResolverTestContext) resolve() {
- // Start resolver.
- i.resolverResultChan = make(chan resolveResult, 1)
- go func() {
- nextResolver, err := i.resolver.Resolve()
- i.resolverResultChan <- resolveResult{
- nextResolver: nextResolver,
- err: err,
- }
- }()
-
- // Notify initial block height.
- i.notifyEpoch(testInitialBlockHeight)
-}
-
-func (i *outgoingResolverTestContext) notifyEpoch(height int32) {
- i.notifier.EpochChan <- &chainntnfs.BlockEpoch{
- Height: height,
- }
-}
-
-func (i *outgoingResolverTestContext) waitForResult(expectTimeoutRes bool) {
- i.t.Helper()
-
- result := <-i.resolverResultChan
- if result.err != nil {
- i.t.Fatal(result.err)
- }
-
- if !expectTimeoutRes {
- if result.nextResolver != nil {
- i.t.Fatal("expected no next resolver")
- }
- return
- }
-
- _, ok := result.nextResolver.(*htlcTimeoutResolver)
- if !ok {
- i.t.Fatal("expected htlcTimeoutResolver")
- }
-}
diff --git a/lnd/contractcourt/htlc_success_resolver.go b/lnd/contractcourt/htlc_success_resolver.go
deleted file mode 100644
index 0ead21c3..00000000
--- a/lnd/contractcourt/htlc_success_resolver.go
+++ /dev/null
@@ -1,413 +0,0 @@
-package contractcourt
-
-import (
- "io"
-
- "github.com/davecgh/go-spew/spew"
- "github.com/pkt-cash/pktd/btcutil"
- "github.com/pkt-cash/pktd/btcutil/er"
- "github.com/pkt-cash/pktd/btcutil/util"
- "github.com/pkt-cash/pktd/chaincfg/chainhash"
- "github.com/pkt-cash/pktd/lnd/channeldb"
- "github.com/pkt-cash/pktd/lnd/input"
- "github.com/pkt-cash/pktd/lnd/labels"
- "github.com/pkt-cash/pktd/lnd/lnwallet"
- "github.com/pkt-cash/pktd/lnd/sweep"
- "github.com/pkt-cash/pktd/pktlog/log"
- "github.com/pkt-cash/pktd/wire"
-)
-
-// htlcSuccessResolver is a resolver that's capable of sweeping an incoming
-// HTLC output on-chain. If this is the remote party's commitment, we'll sweep
-// it directly from the commitment output *immediately*. If this is our
-// commitment, we'll first broadcast the success transaction, then send it to
-// the incubator for sweeping. That's it, no need to send any clean up
-// messages.
-//
-// TODO(roasbeef): don't need to broadcast?
-type htlcSuccessResolver struct {
- // htlcResolution is the incoming HTLC resolution for this HTLC. It
- // contains everything we need to properly resolve this HTLC.
- htlcResolution lnwallet.IncomingHtlcResolution
-
- // outputIncubating returns true if we've sent the output to the output
- // incubator (utxo nursery).
- outputIncubating bool
-
- // resolved reflects if the contract has been fully resolved or not.
- resolved bool
-
- // broadcastHeight is the height that the original contract was
- // broadcast to the main-chain at. We'll use this value to bound any
- // historical queries to the chain for spends/confirmations.
- broadcastHeight uint32
-
- // sweepTx will be non-nil if we've already crafted a transaction to
- // sweep a direct HTLC output. This is only a concern if we're sweeping
- // from the commitment transaction of the remote party.
- //
- // TODO(roasbeef): send off to utxobundler
- sweepTx *wire.MsgTx
-
- // htlc contains information on the htlc that we are resolving on-chain.
- htlc channeldb.HTLC
-
- contractResolverKit
-}
-
-// newSuccessResolver instanties a new htlc success resolver.
-func newSuccessResolver(res lnwallet.IncomingHtlcResolution,
- broadcastHeight uint32, htlc channeldb.HTLC,
- resCfg ResolverConfig) *htlcSuccessResolver {
-
- return &htlcSuccessResolver{
- contractResolverKit: *newContractResolverKit(resCfg),
- htlcResolution: res,
- broadcastHeight: broadcastHeight,
- htlc: htlc,
- }
-}
-
-// ResolverKey returns an identifier which should be globally unique for this
-// particular resolver within the chain the original contract resides within.
-//
-// NOTE: Part of the ContractResolver interface.
-func (h *htlcSuccessResolver) ResolverKey() []byte {
- // The primary key for this resolver will be the outpoint of the HTLC
- // on the commitment transaction itself. If this is our commitment,
- // then the output can be found within the signed success tx,
- // otherwise, it's just the ClaimOutpoint.
- var op wire.OutPoint
- if h.htlcResolution.SignedSuccessTx != nil {
- op = h.htlcResolution.SignedSuccessTx.TxIn[0].PreviousOutPoint
- } else {
- op = h.htlcResolution.ClaimOutpoint
- }
-
- key := newResolverID(op)
- return key[:]
-}
-
-// Resolve attempts to resolve an unresolved incoming HTLC that we know the
-// preimage to. If the HTLC is on the commitment of the remote party, then we'll
-// simply sweep it directly. Otherwise, we'll hand this off to the utxo nursery
-// to do its duty. There is no need to make a call to the invoice registry
-// anymore. Every HTLC has already passed through the incoming contest resolver
-// and in there the invoice was already marked as settled.
-//
-// TODO(roasbeef): create multi to batch
-//
-// NOTE: Part of the ContractResolver interface.
-func (h *htlcSuccessResolver) Resolve() (ContractResolver, er.R) {
- // If we're already resolved, then we can exit early.
- if h.resolved {
- return nil, nil
- }
-
- // If we don't have a success transaction, then this means that this is
- // an output on the remote party's commitment transaction.
- if h.htlcResolution.SignedSuccessTx == nil {
- // If we don't already have the sweep transaction constructed,
- // we'll do so and broadcast it.
- if h.sweepTx == nil {
- log.Infof("%T(%x): crafting sweep tx for "+
- "incoming+remote htlc confirmed", h,
- h.htlc.RHash[:])
-
- // Before we can craft out sweeping transaction, we
- // need to create an input which contains all the items
- // required to add this input to a sweeping transaction,
- // and generate a witness.
- inp := input.MakeHtlcSucceedInput(
- &h.htlcResolution.ClaimOutpoint,
- &h.htlcResolution.SweepSignDesc,
- h.htlcResolution.Preimage[:],
- h.broadcastHeight,
- h.htlcResolution.CsvDelay,
- )
-
- // With the input created, we can now generate the full
- // sweep transaction, that we'll use to move these
- // coins back into the backing wallet.
- //
- // TODO: Set tx lock time to current block height
- // instead of zero. Will be taken care of once sweeper
- // implementation is complete.
- //
- // TODO: Use time-based sweeper and result chan.
- var err er.R
- h.sweepTx, err = h.Sweeper.CreateSweepTx(
- []input.Input{&inp},
- sweep.FeePreference{
- ConfTarget: sweepConfTarget,
- }, 0,
- )
- if err != nil {
- return nil, err
- }
-
- log.Infof("%T(%x): crafted sweep tx=%v", h,
- h.htlc.RHash[:], spew.Sdump(h.sweepTx))
-
- // With the sweep transaction signed, we'll now
- // Checkpoint our state.
- if err := h.Checkpoint(h); err != nil {
- log.Errorf("unable to Checkpoint: %v", err)
- return nil, err
- }
- }
-
- // Regardless of whether an existing transaction was found or newly
- // constructed, we'll broadcast the sweep transaction to the
- // network.
- label := labels.MakeLabel(
- labels.LabelTypeChannelClose, &h.ShortChanID,
- )
- err := h.PublishTx(h.sweepTx, label)
- if err != nil {
- log.Infof("%T(%x): unable to publish tx: %v",
- h, h.htlc.RHash[:], err)
- return nil, err
- }
-
- // With the sweep transaction broadcast, we'll wait for its
- // confirmation.
- sweepTXID := h.sweepTx.TxHash()
- sweepScript := h.sweepTx.TxOut[0].PkScript
- confNtfn, err := h.Notifier.RegisterConfirmationsNtfn(
- &sweepTXID, sweepScript, 1, h.broadcastHeight,
- )
- if err != nil {
- return nil, err
- }
-
- log.Infof("%T(%x): waiting for sweep tx (txid=%v) to be "+
- "confirmed", h, h.htlc.RHash[:], sweepTXID)
-
- select {
- case _, ok := <-confNtfn.Confirmed:
- if !ok {
- return nil, errResolverShuttingDown.Default()
- }
-
- case <-h.quit:
- return nil, errResolverShuttingDown.Default()
- }
-
- // Once the transaction has received a sufficient number of
- // confirmations, we'll mark ourselves as fully resolved and exit.
- h.resolved = true
-
- // Checkpoint the resolver, and write the outcome to disk.
- return nil, h.checkpointClaim(
- &sweepTXID,
- channeldb.ResolverOutcomeClaimed,
- )
- }
-
- log.Infof("%T(%x): broadcasting second-layer transition tx: %v",
- h, h.htlc.RHash[:], spew.Sdump(h.htlcResolution.SignedSuccessTx))
-
- // We'll now broadcast the second layer transaction so we can kick off
- // the claiming process.
- //
- // TODO(roasbeef): after changing sighashes send to tx bundler
- label := labels.MakeLabel(
- labels.LabelTypeChannelClose, &h.ShortChanID,
- )
- err := h.PublishTx(h.htlcResolution.SignedSuccessTx, label)
- if err != nil {
- return nil, err
- }
-
- // Otherwise, this is an output on our commitment transaction. In this
- // case, we'll send it to the incubator, but only if we haven't already
- // done so.
- if !h.outputIncubating {
- log.Infof("%T(%x): incubating incoming htlc output",
- h, h.htlc.RHash[:])
-
- err := h.IncubateOutputs(
- h.ChanPoint, nil, &h.htlcResolution,
- h.broadcastHeight,
- )
- if err != nil {
- return nil, err
- }
-
- h.outputIncubating = true
-
- if err := h.Checkpoint(h); err != nil {
- log.Errorf("unable to Checkpoint: %v", err)
- return nil, err
- }
- }
-
- // To wrap this up, we'll wait until the second-level transaction has
- // been spent, then fully resolve the contract.
- spendNtfn, err := h.Notifier.RegisterSpendNtfn(
- &h.htlcResolution.ClaimOutpoint,
- h.htlcResolution.SweepSignDesc.Output.PkScript,
- h.broadcastHeight,
- )
- if err != nil {
- return nil, err
- }
-
- log.Infof("%T(%x): waiting for second-level HTLC output to be spent "+
- "after csv_delay=%v", h, h.htlc.RHash[:], h.htlcResolution.CsvDelay)
-
- var spendTxid *chainhash.Hash
- select {
- case spend, ok := <-spendNtfn.Spend:
- if !ok {
- return nil, errResolverShuttingDown.Default()
- }
- spendTxid = spend.SpenderTxHash
-
- case <-h.quit:
- return nil, errResolverShuttingDown.Default()
- }
-
- h.resolved = true
- return nil, h.checkpointClaim(
- spendTxid, channeldb.ResolverOutcomeClaimed,
- )
-}
-
-// checkpointClaim checkpoints the success resolver with the reports it needs.
-// If this htlc was claimed two stages, it will write reports for both stages,
-// otherwise it will just write for the single htlc claim.
-func (h *htlcSuccessResolver) checkpointClaim(spendTx *chainhash.Hash,
- outcome channeldb.ResolverOutcome) er.R {
-
- // Create a resolver report for claiming of the htlc itself.
- amt := btcutil.Amount(h.htlcResolution.SweepSignDesc.Output.Value)
- reports := []*channeldb.ResolverReport{
- {
- OutPoint: h.htlcResolution.ClaimOutpoint,
- Amount: amt,
- ResolverType: channeldb.ResolverTypeIncomingHtlc,
- ResolverOutcome: outcome,
- SpendTxID: spendTx,
- },
- }
-
- // If we have a success tx, we append a report to represent our first
- // stage claim.
- if h.htlcResolution.SignedSuccessTx != nil {
- // If the SignedSuccessTx is not nil, we are claiming the htlc
- // in two stages, so we need to create a report for the first
- // stage transaction as well.
- spendTx := h.htlcResolution.SignedSuccessTx
- spendTxID := spendTx.TxHash()
-
- report := &channeldb.ResolverReport{
- OutPoint: spendTx.TxIn[0].PreviousOutPoint,
- Amount: h.htlc.Amt.ToSatoshis(),
- ResolverType: channeldb.ResolverTypeIncomingHtlc,
- ResolverOutcome: channeldb.ResolverOutcomeFirstStage,
- SpendTxID: &spendTxID,
- }
- reports = append(reports, report)
- }
-
- // Finally, we checkpoint the resolver with our report(s).
- return h.Checkpoint(h, reports...)
-}
-
-// Stop signals the resolver to cancel any current resolution processes, and
-// suspend.
-//
-// NOTE: Part of the ContractResolver interface.
-func (h *htlcSuccessResolver) Stop() {
- close(h.quit)
-}
-
-// IsResolved returns true if the stored state in the resolve is fully
-// resolved. In this case the target output can be forgotten.
-//
-// NOTE: Part of the ContractResolver interface.
-func (h *htlcSuccessResolver) IsResolved() bool {
- return h.resolved
-}
-
-// Encode writes an encoded version of the ContractResolver into the passed
-// Writer.
-//
-// NOTE: Part of the ContractResolver interface.
-func (h *htlcSuccessResolver) Encode(w io.Writer) er.R {
- // First we'll encode our inner HTLC resolution.
- if err := encodeIncomingResolution(w, &h.htlcResolution); err != nil {
- return err
- }
-
- // Next, we'll write out the fields that are specified to the contract
- // resolver.
- if err := util.WriteBin(w, endian, h.outputIncubating); err != nil {
- return err
- }
- if err := util.WriteBin(w, endian, h.resolved); err != nil {
- return err
- }
- if err := util.WriteBin(w, endian, h.broadcastHeight); err != nil {
- return err
- }
- if _, err := util.Write(w, h.htlc.RHash[:]); err != nil {
- return err
- }
-
- return nil
-}
-
-// newSuccessResolverFromReader attempts to decode an encoded ContractResolver
-// from the passed Reader instance, returning an active ContractResolver
-// instance.
-func newSuccessResolverFromReader(r io.Reader, resCfg ResolverConfig) (
- *htlcSuccessResolver, er.R) {
-
- h := &htlcSuccessResolver{
- contractResolverKit: *newContractResolverKit(resCfg),
- }
-
- // First we'll decode our inner HTLC resolution.
- if err := decodeIncomingResolution(r, &h.htlcResolution); err != nil {
- return nil, err
- }
-
- // Next, we'll read all the fields that are specified to the contract
- // resolver.
- if err := util.ReadBin(r, endian, &h.outputIncubating); err != nil {
- return nil, err
- }
- if err := util.ReadBin(r, endian, &h.resolved); err != nil {
- return nil, err
- }
- if err := util.ReadBin(r, endian, &h.broadcastHeight); err != nil {
- return nil, err
- }
- if _, err := util.ReadFull(r, h.htlc.RHash[:]); err != nil {
- return nil, err
- }
-
- return h, nil
-}
-
-// Supplement adds additional information to the resolver that is required
-// before Resolve() is called.
-//
-// NOTE: Part of the htlcContractResolver interface.
-func (h *htlcSuccessResolver) Supplement(htlc channeldb.HTLC) {
- h.htlc = htlc
-}
-
-// HtlcPoint returns the htlc's outpoint on the commitment tx.
-//
-// NOTE: Part of the htlcContractResolver interface.
-func (h *htlcSuccessResolver) HtlcPoint() wire.OutPoint {
- return h.htlcResolution.HtlcPoint()
-}
-
-// A compile time assertion to ensure htlcSuccessResolver meets the
-// ContractResolver interface.
-var _ htlcContractResolver = (*htlcSuccessResolver)(nil)
diff --git a/lnd/contractcourt/htlc_success_resolver_test.go b/lnd/contractcourt/htlc_success_resolver_test.go
deleted file mode 100644
index 3d13e163..00000000
--- a/lnd/contractcourt/htlc_success_resolver_test.go
+++ /dev/null
@@ -1,243 +0,0 @@
-package contractcourt
-
-import (
- "testing"
-
- "github.com/pkt-cash/pktd/btcutil"
- "github.com/pkt-cash/pktd/btcutil/er"
- "github.com/pkt-cash/pktd/lnd/chainntnfs"
- "github.com/pkt-cash/pktd/lnd/channeldb"
- "github.com/pkt-cash/pktd/lnd/channeldb/kvdb"
- "github.com/pkt-cash/pktd/lnd/lntest/mock"
- "github.com/pkt-cash/pktd/lnd/lnwallet"
- "github.com/pkt-cash/pktd/lnd/lnwire"
- "github.com/pkt-cash/pktd/wire"
-)
-
-var testHtlcAmt = lnwire.MilliSatoshi(200000)
-
-type htlcSuccessResolverTestContext struct {
- resolver *htlcSuccessResolver
- notifier *mock.ChainNotifier
- resolverResultChan chan resolveResult
- t *testing.T
-}
-
-func newHtlcSuccessResolverTextContext(t *testing.T) *htlcSuccessResolverTestContext {
- notifier := &mock.ChainNotifier{
- EpochChan: make(chan *chainntnfs.BlockEpoch),
- SpendChan: make(chan *chainntnfs.SpendDetail),
- ConfChan: make(chan *chainntnfs.TxConfirmation),
- }
-
- checkPointChan := make(chan struct{}, 1)
-
- testCtx := &htlcSuccessResolverTestContext{
- notifier: notifier,
- t: t,
- }
-
- chainCfg := ChannelArbitratorConfig{
- ChainArbitratorConfig: ChainArbitratorConfig{
- Notifier: notifier,
- PublishTx: func(_ *wire.MsgTx, _ string) er.R {
- return nil
- },
- },
- PutResolverReport: func(_ kvdb.RwTx,
- report *channeldb.ResolverReport) er.R {
-
- return nil
- },
- }
-
- cfg := ResolverConfig{
- ChannelArbitratorConfig: chainCfg,
- Checkpoint: func(_ ContractResolver,
- _ ...*channeldb.ResolverReport) er.R {
-
- checkPointChan <- struct{}{}
- return nil
- },
- }
-
- testCtx.resolver = &htlcSuccessResolver{
- contractResolverKit: *newContractResolverKit(cfg),
- htlcResolution: lnwallet.IncomingHtlcResolution{},
- htlc: channeldb.HTLC{
- RHash: testResHash,
- OnionBlob: testOnionBlob,
- Amt: testHtlcAmt,
- },
- }
-
- return testCtx
-}
-
-func (i *htlcSuccessResolverTestContext) resolve() {
- // Start resolver.
- i.resolverResultChan = make(chan resolveResult, 1)
- go func() {
- nextResolver, err := i.resolver.Resolve()
- i.resolverResultChan <- resolveResult{
- nextResolver: nextResolver,
- err: err,
- }
- }()
-}
-
-func (i *htlcSuccessResolverTestContext) waitForResult() {
- i.t.Helper()
-
- result := <-i.resolverResultChan
- if result.err != nil {
- i.t.Fatal(result.err)
- }
-
- if result.nextResolver != nil {
- i.t.Fatal("expected no next resolver")
- }
-}
-
-// TestSingleStageSuccess tests successful sweep of a single stage htlc claim.
-func TestSingleStageSuccess(t *testing.T) {
- htlcOutpoint := wire.OutPoint{Index: 3}
-
- sweepTx := &wire.MsgTx{
- TxIn: []*wire.TxIn{{}},
- TxOut: []*wire.TxOut{{}},
- }
-
- // singleStageResolution is a resolution for a htlc on the remote
- // party's commitment.
- singleStageResolution := lnwallet.IncomingHtlcResolution{
- SweepSignDesc: testSignDesc,
- ClaimOutpoint: htlcOutpoint,
- }
-
- // We send a confirmation for our sweep tx to indicate that our sweep
- // succeeded.
- resolve := func(ctx *htlcSuccessResolverTestContext) {
- ctx.notifier.ConfChan <- &chainntnfs.TxConfirmation{
- Tx: ctx.resolver.sweepTx,
- BlockHeight: testInitialBlockHeight - 1,
- }
- }
-
- sweepTxid := sweepTx.TxHash()
- claim := &channeldb.ResolverReport{
- OutPoint: htlcOutpoint,
- Amount: btcutil.Amount(testSignDesc.Output.Value),
- ResolverType: channeldb.ResolverTypeIncomingHtlc,
- ResolverOutcome: channeldb.ResolverOutcomeClaimed,
- SpendTxID: &sweepTxid,
- }
- testHtlcSuccess(
- t, singleStageResolution, resolve, sweepTx, claim,
- )
-}
-
-// TestSecondStageResolution tests successful sweep of a second stage htlc
-// claim.
-func TestSecondStageResolution(t *testing.T) {
- commitOutpoint := wire.OutPoint{Index: 2}
- htlcOutpoint := wire.OutPoint{Index: 3}
-
- sweepTx := &wire.MsgTx{
- TxIn: []*wire.TxIn{{}},
- TxOut: []*wire.TxOut{{}},
- }
- sweepHash := sweepTx.TxHash()
-
- // twoStageResolution is a resolution for htlc on our own commitment
- // which is spent from the signed success tx.
- twoStageResolution := lnwallet.IncomingHtlcResolution{
- Preimage: [32]byte{},
- SignedSuccessTx: &wire.MsgTx{
- TxIn: []*wire.TxIn{
- {
- PreviousOutPoint: commitOutpoint,
- },
- },
- TxOut: []*wire.TxOut{},
- },
- ClaimOutpoint: htlcOutpoint,
- SweepSignDesc: testSignDesc,
- }
-
- // We send a spend notification for our output to resolve our htlc.
- resolve := func(ctx *htlcSuccessResolverTestContext) {
- ctx.notifier.SpendChan <- &chainntnfs.SpendDetail{
- SpendingTx: sweepTx,
- SpenderTxHash: &sweepHash,
- }
- }
-
- successTx := twoStageResolution.SignedSuccessTx.TxHash()
- firstStage := &channeldb.ResolverReport{
- OutPoint: commitOutpoint,
- Amount: testHtlcAmt.ToSatoshis(),
- ResolverType: channeldb.ResolverTypeIncomingHtlc,
- ResolverOutcome: channeldb.ResolverOutcomeFirstStage,
- SpendTxID: &successTx,
- }
-
- secondStage := &channeldb.ResolverReport{
- OutPoint: htlcOutpoint,
- Amount: btcutil.Amount(testSignDesc.Output.Value),
- ResolverType: channeldb.ResolverTypeIncomingHtlc,
- ResolverOutcome: channeldb.ResolverOutcomeClaimed,
- SpendTxID: &sweepHash,
- }
-
- testHtlcSuccess(
- t, twoStageResolution, resolve, sweepTx, secondStage, firstStage,
- )
-}
-
-// testHtlcSuccess tests resolution of a success resolver. It takes a resolve
-// function which triggers resolution and the sweeptxid that will resolve it.
-func testHtlcSuccess(t *testing.T, resolution lnwallet.IncomingHtlcResolution,
- resolve func(*htlcSuccessResolverTestContext),
- sweepTx *wire.MsgTx, reports ...*channeldb.ResolverReport) {
-
- defer timeout(t)()
-
- ctx := newHtlcSuccessResolverTextContext(t)
-
- // Replace our checkpoint with one which will push reports into a
- // channel for us to consume. We replace this function on the resolver
- // itself because it is created by the test context.
- reportChan := make(chan *channeldb.ResolverReport)
- ctx.resolver.Checkpoint = func(_ ContractResolver,
- reports ...*channeldb.ResolverReport) er.R {
-
- // Send all of our reports into the channel.
- for _, report := range reports {
- reportChan <- report
- }
-
- return nil
- }
-
- ctx.resolver.htlcResolution = resolution
-
- // We set the sweepTx to be non-nil and mark the output as already
- // incubating so that we do not need to set test values for crafting
- // our own sweep transaction.
- ctx.resolver.sweepTx = sweepTx
- ctx.resolver.outputIncubating = true
-
- // Start the htlc success resolver.
- ctx.resolve()
-
- // Trigger and event that will resolve our test context.
- resolve(ctx)
-
- for _, report := range reports {
- assertResolverReport(t, reportChan, report)
- }
-
- // Wait for the resolver to fully complete.
- ctx.waitForResult()
-}
diff --git a/lnd/contractcourt/htlc_timeout_resolver.go b/lnd/contractcourt/htlc_timeout_resolver.go
deleted file mode 100644
index 1de12fd3..00000000
--- a/lnd/contractcourt/htlc_timeout_resolver.go
+++ /dev/null
@@ -1,514 +0,0 @@
-package contractcourt
-
-import (
- "io"
-
- "github.com/davecgh/go-spew/spew"
- "github.com/pkt-cash/pktd/btcutil"
- "github.com/pkt-cash/pktd/btcutil/er"
- "github.com/pkt-cash/pktd/btcutil/util"
- "github.com/pkt-cash/pktd/chaincfg/chainhash"
- "github.com/pkt-cash/pktd/lnd/chainntnfs"
- "github.com/pkt-cash/pktd/lnd/channeldb"
- "github.com/pkt-cash/pktd/lnd/input"
- "github.com/pkt-cash/pktd/lnd/lntypes"
- "github.com/pkt-cash/pktd/lnd/lnwallet"
- "github.com/pkt-cash/pktd/lnd/lnwire"
- "github.com/pkt-cash/pktd/pktlog/log"
- "github.com/pkt-cash/pktd/wire"
-)
-
-// htlcTimeoutResolver is a ContractResolver that's capable of resolving an
-// outgoing HTLC. The HTLC may be on our commitment transaction, or on the
-// commitment transaction of the remote party. An output on our commitment
-// transaction is considered fully resolved once the second-level transaction
-// has been confirmed (and reached a sufficient depth). An output on the
-// commitment transaction of the remote party is resolved once we detect a
-// spend of the direct HTLC output using the timeout clause.
-type htlcTimeoutResolver struct {
- // htlcResolution contains all the information required to properly
- // resolve this outgoing HTLC.
- htlcResolution lnwallet.OutgoingHtlcResolution
-
- // outputIncubating returns true if we've sent the output to the output
- // incubator (utxo nursery).
- outputIncubating bool
-
- // resolved reflects if the contract has been fully resolved or not.
- resolved bool
-
- // broadcastHeight is the height that the original contract was
- // broadcast to the main-chain at. We'll use this value to bound any
- // historical queries to the chain for spends/confirmations.
- //
- // TODO(roasbeef): wrap above into definite resolution embedding?
- broadcastHeight uint32
-
- // htlc contains information on the htlc that we are resolving on-chain.
- htlc channeldb.HTLC
-
- contractResolverKit
-}
-
-// newTimeoutResolver instantiates a new timeout htlc resolver.
-func newTimeoutResolver(res lnwallet.OutgoingHtlcResolution,
- broadcastHeight uint32, htlc channeldb.HTLC,
- resCfg ResolverConfig) *htlcTimeoutResolver {
-
- return &htlcTimeoutResolver{
- contractResolverKit: *newContractResolverKit(resCfg),
- htlcResolution: res,
- broadcastHeight: broadcastHeight,
- htlc: htlc,
- }
-}
-
-// ResolverKey returns an identifier which should be globally unique for this
-// particular resolver within the chain the original contract resides within.
-//
-// NOTE: Part of the ContractResolver interface.
-func (h *htlcTimeoutResolver) ResolverKey() []byte {
- // The primary key for this resolver will be the outpoint of the HTLC
- // on the commitment transaction itself. If this is our commitment,
- // then the output can be found within the signed timeout tx,
- // otherwise, it's just the ClaimOutpoint.
- var op wire.OutPoint
- if h.htlcResolution.SignedTimeoutTx != nil {
- op = h.htlcResolution.SignedTimeoutTx.TxIn[0].PreviousOutPoint
- } else {
- op = h.htlcResolution.ClaimOutpoint
- }
-
- key := newResolverID(op)
- return key[:]
-}
-
-const (
- // expectedRemoteWitnessSuccessSize is the expected size of the witness
- // on the remote commitment transaction for an outgoing HTLC that is
- // swept on-chain by them with pre-image.
- expectedRemoteWitnessSuccessSize = 5
-
- // remotePreimageIndex index within the witness on the remote
- // commitment transaction that will hold they pre-image if they go to
- // sweep it on chain.
- remotePreimageIndex = 3
-
- // localPreimageIndex is the index within the witness on the local
- // commitment transaction for an outgoing HTLC that will hold the
- // pre-image if the remote party sweeps it.
- localPreimageIndex = 1
-)
-
-// claimCleanUp is a helper method that's called once the HTLC output is spent
-// by the remote party. It'll extract the preimage, add it to the global cache,
-// and finally send the appropriate clean up message.
-func (h *htlcTimeoutResolver) claimCleanUp(
- commitSpend *chainntnfs.SpendDetail) (ContractResolver, er.R) {
-
- // Depending on if this is our commitment or not, then we'll be looking
- // for a different witness pattern.
- spenderIndex := commitSpend.SpenderInputIndex
- spendingInput := commitSpend.SpendingTx.TxIn[spenderIndex]
-
- log.Infof("%T(%v): extracting preimage! remote party spent "+
- "HTLC with tx=%v", h, h.htlcResolution.ClaimOutpoint,
- spew.Sdump(commitSpend.SpendingTx))
-
- // If this is the remote party's commitment, then we'll be looking for
- // them to spend using the second-level success transaction.
- var preimageBytes []byte
- if h.htlcResolution.SignedTimeoutTx == nil {
- // The witness stack when the remote party sweeps the output to
- // them looks like:
- //
- // * <0>
- preimageBytes = spendingInput.Witness[remotePreimageIndex]
- } else {
- // Otherwise, they'll be spending directly from our commitment
- // output. In which case the witness stack looks like:
- //
- // *
- preimageBytes = spendingInput.Witness[localPreimageIndex]
- }
-
- preimage, err := lntypes.MakePreimage(preimageBytes)
- if err != nil {
- return nil, er.Errorf("unable to create pre-image from "+
- "witness: %v", err)
- }
-
- log.Infof("%T(%v): extracting preimage=%v from on-chain "+
- "spend!", h, h.htlcResolution.ClaimOutpoint, preimage)
-
- // With the preimage obtained, we can now add it to the global cache.
- if err := h.PreimageDB.AddPreimages(preimage); err != nil {
- log.Errorf("%T(%v): unable to add witness to cache",
- h, h.htlcResolution.ClaimOutpoint)
- }
-
- var pre [32]byte
- copy(pre[:], preimage[:])
-
- // Finally, we'll send the clean up message, mark ourselves as
- // resolved, then exit.
- if err := h.DeliverResolutionMsg(ResolutionMsg{
- SourceChan: h.ShortChanID,
- HtlcIndex: h.htlc.HtlcIndex,
- PreImage: &pre,
- }); err != nil {
- return nil, err
- }
- h.resolved = true
-
- // Checkpoint our resolver with a report which reflects the preimage
- // claim by the remote party.
- amt := btcutil.Amount(h.htlcResolution.SweepSignDesc.Output.Value)
- report := &channeldb.ResolverReport{
- OutPoint: h.htlcResolution.ClaimOutpoint,
- Amount: amt,
- ResolverType: channeldb.ResolverTypeOutgoingHtlc,
- ResolverOutcome: channeldb.ResolverOutcomeClaimed,
- SpendTxID: commitSpend.SpenderTxHash,
- }
-
- return nil, h.Checkpoint(h, report)
-}
-
-// chainDetailsToWatch returns the output and script which we use to watch for
-// spends from the direct HTLC output on the commitment transaction.
-//
-// TODO(joostjager): output already set properly in
-// lnwallet.newOutgoingHtlcResolution? And script too?
-func (h *htlcTimeoutResolver) chainDetailsToWatch() (*wire.OutPoint, []byte, er.R) {
- // If there's no timeout transaction, then the claim output is the
- // output directly on the commitment transaction, so we'll just use
- // that.
- if h.htlcResolution.SignedTimeoutTx == nil {
- outPointToWatch := h.htlcResolution.ClaimOutpoint
- scriptToWatch := h.htlcResolution.SweepSignDesc.Output.PkScript
-
- return &outPointToWatch, scriptToWatch, nil
- }
-
- // If this is the remote party's commitment, then we'll need to grab
- // watch the output that our timeout transaction points to. We can
- // directly grab the outpoint, then also extract the witness script
- // (the last element of the witness stack) to re-construct the pkScript
- // we need to watch.
- outPointToWatch := h.htlcResolution.SignedTimeoutTx.TxIn[0].PreviousOutPoint
- witness := h.htlcResolution.SignedTimeoutTx.TxIn[0].Witness
- scriptToWatch, err := input.WitnessScriptHash(witness[len(witness)-1])
- if err != nil {
- return nil, nil, err
- }
-
- return &outPointToWatch, scriptToWatch, nil
-}
-
-// isSuccessSpend returns true if the passed spend on the specified commitment
-// is a success spend that reveals the pre-image or not.
-func isSuccessSpend(spend *chainntnfs.SpendDetail, localCommit bool) bool {
- // Based on the spending input index and transaction, obtain the
- // witness that tells us what type of spend this is.
- spenderIndex := spend.SpenderInputIndex
- spendingInput := spend.SpendingTx.TxIn[spenderIndex]
- spendingWitness := spendingInput.Witness
-
- // If this is the remote commitment then the only possible spends for
- // outgoing HTLCs are:
- //
- // RECVR: <0> (2nd level success spend)
- // REVOK:
- // SENDR: 0
- //
- // In this case, if 5 witness elements are present (factoring the
- // witness script), and the 3rd element is the size of the pre-image,
- // then this is a remote spend. If not, then we swept it ourselves, or
- // revoked their output.
- if !localCommit {
- return len(spendingWitness) == expectedRemoteWitnessSuccessSize &&
- len(spendingWitness[remotePreimageIndex]) == lntypes.HashSize
- }
-
- // Otherwise, for our commitment, the only possible spends for an
- // outgoing HTLC are:
- //
- // SENDR: <0> <0> (2nd level timeout)
- // RECVR:
- // REVOK:
- //
- // So the only success case has the pre-image as the 2nd (index 1)
- // element in the witness.
- return len(spendingWitness[localPreimageIndex]) == lntypes.HashSize
-}
-
-// Resolve kicks off full resolution of an outgoing HTLC output. If it's our
-// commitment, it isn't resolved until we see the second level HTLC txn
-// confirmed. If it's the remote party's commitment, we don't resolve until we
-// see a direct sweep via the timeout clause.
-//
-// NOTE: Part of the ContractResolver interface.
-func (h *htlcTimeoutResolver) Resolve() (ContractResolver, er.R) {
- // If we're already resolved, then we can exit early.
- if h.resolved {
- return nil, nil
- }
-
- // If we haven't already sent the output to the utxo nursery, then
- // we'll do so now.
- if !h.outputIncubating {
- log.Tracef("%T(%v): incubating htlc output", h,
- h.htlcResolution.ClaimOutpoint)
-
- err := h.IncubateOutputs(
- h.ChanPoint, &h.htlcResolution, nil,
- h.broadcastHeight,
- )
- if err != nil {
- return nil, err
- }
-
- h.outputIncubating = true
-
- if err := h.Checkpoint(h); err != nil {
- log.Errorf("unable to Checkpoint: %v", err)
- return nil, err
- }
- }
-
- var spendTxID *chainhash.Hash
-
- // waitForOutputResolution waits for the HTLC output to be fully
- // resolved. The output is considered fully resolved once it has been
- // spent, and the spending transaction has been fully confirmed.
- waitForOutputResolution := func() er.R {
- // We first need to register to see when the HTLC output itself
- // has been spent by a confirmed transaction.
- spendNtfn, err := h.Notifier.RegisterSpendNtfn(
- &h.htlcResolution.ClaimOutpoint,
- h.htlcResolution.SweepSignDesc.Output.PkScript,
- h.broadcastHeight,
- )
- if err != nil {
- return err
- }
-
- select {
- case spendDetail, ok := <-spendNtfn.Spend:
- if !ok {
- return errResolverShuttingDown.Default()
- }
- spendTxID = spendDetail.SpenderTxHash
-
- case <-h.quit:
- return errResolverShuttingDown.Default()
- }
-
- return nil
- }
-
- // Now that we've handed off the HTLC to the nursery, we'll watch for a
- // spend of the output, and make our next move off of that. Depending
- // on if this is our commitment, or the remote party's commitment,
- // we'll be watching a different outpoint and script.
- outpointToWatch, scriptToWatch, err := h.chainDetailsToWatch()
- if err != nil {
- return nil, err
- }
- spendNtfn, err := h.Notifier.RegisterSpendNtfn(
- outpointToWatch, scriptToWatch, h.broadcastHeight,
- )
- if err != nil {
- return nil, err
- }
-
- log.Infof("%T(%v): waiting for HTLC output %v to be spent"+
- "fully confirmed", h, h.htlcResolution.ClaimOutpoint,
- outpointToWatch)
-
- // We'll block here until either we exit, or the HTLC output on the
- // commitment transaction has been spent.
- var (
- spend *chainntnfs.SpendDetail
- ok bool
- )
- select {
- case spend, ok = <-spendNtfn.Spend:
- if !ok {
- return nil, errResolverShuttingDown.Default()
- }
- spendTxID = spend.SpenderTxHash
-
- case <-h.quit:
- return nil, errResolverShuttingDown.Default()
- }
-
- // If the spend reveals the pre-image, then we'll enter the clean up
- // workflow to pass the pre-image back to the incoming link, add it to
- // the witness cache, and exit.
- if isSuccessSpend(spend, h.htlcResolution.SignedTimeoutTx != nil) {
- log.Infof("%T(%v): HTLC has been swept with pre-image by "+
- "remote party during timeout flow! Adding pre-image to "+
- "witness cache", h.htlcResolution.ClaimOutpoint)
-
- return h.claimCleanUp(spend)
- }
-
- log.Infof("%T(%v): resolving htlc with incoming fail msg, fully "+
- "confirmed", h, h.htlcResolution.ClaimOutpoint)
-
- // At this point, the second-level transaction is sufficiently
- // confirmed, or a transaction directly spending the output is.
- // Therefore, we can now send back our clean up message, failing the
- // HTLC on the incoming link.
- failureMsg := &lnwire.FailPermanentChannelFailure{}
- if err := h.DeliverResolutionMsg(ResolutionMsg{
- SourceChan: h.ShortChanID,
- HtlcIndex: h.htlc.HtlcIndex,
- Failure: failureMsg,
- }); err != nil {
- return nil, err
- }
-
- var reports []*channeldb.ResolverReport
-
- // Finally, if this was an output on our commitment transaction, we'll
- // wait for the second-level HTLC output to be spent, and for that
- // transaction itself to confirm.
- if h.htlcResolution.SignedTimeoutTx != nil {
- log.Infof("%T(%v): waiting for nursery to spend CSV delayed "+
- "output", h, h.htlcResolution.ClaimOutpoint)
- if err := waitForOutputResolution(); err != nil {
- return nil, err
- }
-
- // Once our timeout tx has confirmed, we add a resolution for
- // our timeoutTx tx first stage transaction.
- timeoutTx := h.htlcResolution.SignedTimeoutTx
- spendHash := timeoutTx.TxHash()
-
- reports = append(reports, &channeldb.ResolverReport{
- OutPoint: timeoutTx.TxIn[0].PreviousOutPoint,
- Amount: h.htlc.Amt.ToSatoshis(),
- ResolverType: channeldb.ResolverTypeOutgoingHtlc,
- ResolverOutcome: channeldb.ResolverOutcomeFirstStage,
- SpendTxID: &spendHash,
- })
- }
-
- // With the clean up message sent, we'll now mark the contract
- // resolved, record the timeout and the sweep txid on disk, and wait.
- h.resolved = true
-
- amt := btcutil.Amount(h.htlcResolution.SweepSignDesc.Output.Value)
- reports = append(reports, &channeldb.ResolverReport{
- OutPoint: h.htlcResolution.ClaimOutpoint,
- Amount: amt,
- ResolverType: channeldb.ResolverTypeOutgoingHtlc,
- ResolverOutcome: channeldb.ResolverOutcomeTimeout,
- SpendTxID: spendTxID,
- })
-
- return nil, h.Checkpoint(h, reports...)
-}
-
-// Stop signals the resolver to cancel any current resolution processes, and
-// suspend.
-//
-// NOTE: Part of the ContractResolver interface.
-func (h *htlcTimeoutResolver) Stop() {
- close(h.quit)
-}
-
-// IsResolved returns true if the stored state in the resolve is fully
-// resolved. In this case the target output can be forgotten.
-//
-// NOTE: Part of the ContractResolver interface.
-func (h *htlcTimeoutResolver) IsResolved() bool {
- return h.resolved
-}
-
-// Encode writes an encoded version of the ContractResolver into the passed
-// Writer.
-//
-// NOTE: Part of the ContractResolver interface.
-func (h *htlcTimeoutResolver) Encode(w io.Writer) er.R {
- // First, we'll write out the relevant fields of the
- // OutgoingHtlcResolution to the writer.
- if err := encodeOutgoingResolution(w, &h.htlcResolution); err != nil {
- return err
- }
-
- // With that portion written, we can now write out the fields specific
- // to the resolver itself.
- if err := util.WriteBin(w, endian, h.outputIncubating); err != nil {
- return err
- }
- if err := util.WriteBin(w, endian, h.resolved); err != nil {
- return err
- }
- if err := util.WriteBin(w, endian, h.broadcastHeight); err != nil {
- return err
- }
-
- if err := util.WriteBin(w, endian, h.htlc.HtlcIndex); err != nil {
- return err
- }
-
- return nil
-}
-
-// newTimeoutResolverFromReader attempts to decode an encoded ContractResolver
-// from the passed Reader instance, returning an active ContractResolver
-// instance.
-func newTimeoutResolverFromReader(r io.Reader, resCfg ResolverConfig) (
- *htlcTimeoutResolver, er.R) {
-
- h := &htlcTimeoutResolver{
- contractResolverKit: *newContractResolverKit(resCfg),
- }
-
- // First, we'll read out all the mandatory fields of the
- // OutgoingHtlcResolution that we store.
- if err := decodeOutgoingResolution(r, &h.htlcResolution); err != nil {
- return nil, err
- }
-
- // With those fields read, we can now read back the fields that are
- // specific to the resolver itself.
- if err := util.ReadBin(r, endian, &h.outputIncubating); err != nil {
- return nil, err
- }
- if err := util.ReadBin(r, endian, &h.resolved); err != nil {
- return nil, err
- }
- if err := util.ReadBin(r, endian, &h.broadcastHeight); err != nil {
- return nil, err
- }
-
- if err := util.ReadBin(r, endian, &h.htlc.HtlcIndex); err != nil {
- return nil, err
- }
-
- return h, nil
-}
-
-// Supplement adds additional information to the resolver that is required
-// before Resolve() is called.
-//
-// NOTE: Part of the htlcContractResolver interface.
-func (h *htlcTimeoutResolver) Supplement(htlc channeldb.HTLC) {
- h.htlc = htlc
-}
-
-// HtlcPoint returns the htlc's outpoint on the commitment tx.
-//
-// NOTE: Part of the htlcContractResolver interface.
-func (h *htlcTimeoutResolver) HtlcPoint() wire.OutPoint {
- return h.htlcResolution.HtlcPoint()
-}
-
-// A compile time assertion to ensure htlcTimeoutResolver meets the
-// ContractResolver interface.
-var _ htlcContractResolver = (*htlcTimeoutResolver)(nil)
diff --git a/lnd/contractcourt/htlc_timeout_resolver_test.go b/lnd/contractcourt/htlc_timeout_resolver_test.go
deleted file mode 100644
index 523e65e1..00000000
--- a/lnd/contractcourt/htlc_timeout_resolver_test.go
+++ /dev/null
@@ -1,435 +0,0 @@
-package contractcourt
-
-import (
- "bytes"
- "sync"
- "testing"
- "time"
-
- "github.com/pkt-cash/pktd/btcutil"
- "github.com/pkt-cash/pktd/btcutil/er"
- "github.com/pkt-cash/pktd/lnd/chainntnfs"
- "github.com/pkt-cash/pktd/lnd/channeldb"
- "github.com/pkt-cash/pktd/lnd/channeldb/kvdb"
- "github.com/pkt-cash/pktd/lnd/input"
- "github.com/pkt-cash/pktd/lnd/lntest/mock"
- "github.com/pkt-cash/pktd/lnd/lntypes"
- "github.com/pkt-cash/pktd/lnd/lnwallet"
- "github.com/pkt-cash/pktd/txscript/params"
- "github.com/pkt-cash/pktd/wire"
-)
-
-type mockWitnessBeacon struct {
- preImageUpdates chan lntypes.Preimage
- newPreimages chan []lntypes.Preimage
- lookupPreimage map[lntypes.Hash]lntypes.Preimage
-}
-
-func newMockWitnessBeacon() *mockWitnessBeacon {
- return &mockWitnessBeacon{
- preImageUpdates: make(chan lntypes.Preimage, 1),
- newPreimages: make(chan []lntypes.Preimage),
- lookupPreimage: make(map[lntypes.Hash]lntypes.Preimage),
- }
-}
-
-func (m *mockWitnessBeacon) SubscribeUpdates() *WitnessSubscription {
- return &WitnessSubscription{
- WitnessUpdates: m.preImageUpdates,
- CancelSubscription: func() {},
- }
-}
-
-func (m *mockWitnessBeacon) LookupPreimage(payhash lntypes.Hash) (lntypes.Preimage, bool) {
- preimage, ok := m.lookupPreimage[payhash]
- if !ok {
- return lntypes.Preimage{}, false
- }
- return preimage, true
-}
-
-func (m *mockWitnessBeacon) AddPreimages(preimages ...lntypes.Preimage) er.R {
- m.newPreimages <- preimages
- return nil
-}
-
-// TestHtlcTimeoutResolver tests that the timeout resolver properly handles all
-// variations of possible local+remote spends.
-func TestHtlcTimeoutResolver(t *testing.T) {
- t.Parallel()
-
- fakePreimageBytes := bytes.Repeat([]byte{1}, lntypes.HashSize)
-
- var (
- htlcOutpoint wire.OutPoint
- fakePreimage lntypes.Preimage
- )
- fakeSignDesc := &input.SignDescriptor{
- Output: &wire.TxOut{},
- }
-
- copy(fakePreimage[:], fakePreimageBytes)
-
- signer := &mock.DummySigner{}
- sweepTx := &wire.MsgTx{
- TxIn: []*wire.TxIn{
- {
- PreviousOutPoint: htlcOutpoint,
- Witness: [][]byte{{0x01}},
- },
- },
- }
- fakeTimeout := int32(5)
-
- templateTx := &wire.MsgTx{
- TxIn: []*wire.TxIn{
- {
- PreviousOutPoint: htlcOutpoint,
- },
- },
- }
-
- testCases := []struct {
- // name is a human readable description of the test case.
- name string
-
- // remoteCommit denotes if the commitment broadcast was the
- // remote commitment or not.
- remoteCommit bool
-
- // timeout denotes if the HTLC should be let timeout, or if the
- // "remote" party should sweep it on-chain. This also affects
- // what type of resolution message we expect.
- timeout bool
-
- // txToBroadcast is a function closure that should generate the
- // transaction that should spend the HTLC output. Test authors
- // can use this to customize the witness used when spending to
- // trigger various redemption cases.
- txToBroadcast func() (*wire.MsgTx, er.R)
-
- // outcome is the resolver outcome that we expect to be reported
- // once the contract is fully resolved.
- outcome channeldb.ResolverOutcome
- }{
- // Remote commitment is broadcast, we time out the HTLC on
- // chain, and should expect a fail HTLC resolution.
- {
- name: "timeout remote tx",
- remoteCommit: true,
- timeout: true,
- txToBroadcast: func() (*wire.MsgTx, er.R) {
- witness, err := input.ReceiverHtlcSpendTimeout(
- signer, fakeSignDesc, sweepTx,
- fakeTimeout,
- )
- if err != nil {
- return nil, err
- }
-
- templateTx.TxIn[0].Witness = witness
- return templateTx, nil
- },
- outcome: channeldb.ResolverOutcomeTimeout,
- },
-
- // Our local commitment is broadcast, we timeout the HTLC and
- // still expect an HTLC fail resolution.
- {
- name: "timeout local tx",
- remoteCommit: false,
- timeout: true,
- txToBroadcast: func() (*wire.MsgTx, er.R) {
- witness, err := input.SenderHtlcSpendTimeout(
- &mock.DummySignature{}, params.SigHashAll,
- signer, fakeSignDesc, sweepTx,
- )
- if err != nil {
- return nil, err
- }
-
- templateTx.TxIn[0].Witness = witness
-
- // Set the outpoint to be on our commitment, since
- // we need to claim in two stages.
- templateTx.TxIn[0].PreviousOutPoint = testChanPoint1
- return templateTx, nil
- },
- outcome: channeldb.ResolverOutcomeTimeout,
- },
-
- // The remote commitment is broadcast, they sweep with the
- // pre-image, we should get a settle HTLC resolution.
- {
- name: "success remote tx",
- remoteCommit: true,
- timeout: false,
- txToBroadcast: func() (*wire.MsgTx, er.R) {
- witness, err := input.ReceiverHtlcSpendRedeem(
- &mock.DummySignature{}, params.SigHashAll,
- fakePreimageBytes, signer, fakeSignDesc,
- sweepTx,
- )
- if err != nil {
- return nil, err
- }
-
- templateTx.TxIn[0].Witness = witness
- return templateTx, nil
- },
- outcome: channeldb.ResolverOutcomeClaimed,
- },
-
- // The local commitment is broadcast, they sweep it with a
- // timeout from the output, and we should still get the HTLC
- // settle resolution back.
- {
- name: "success local tx",
- remoteCommit: false,
- timeout: false,
- txToBroadcast: func() (*wire.MsgTx, er.R) {
- witness, err := input.SenderHtlcSpendRedeem(
- signer, fakeSignDesc, sweepTx,
- fakePreimageBytes,
- )
- if err != nil {
- return nil, err
- }
-
- templateTx.TxIn[0].Witness = witness
- return templateTx, nil
- },
- outcome: channeldb.ResolverOutcomeClaimed,
- },
- }
-
- notifier := &mock.ChainNotifier{
- EpochChan: make(chan *chainntnfs.BlockEpoch),
- SpendChan: make(chan *chainntnfs.SpendDetail),
- ConfChan: make(chan *chainntnfs.TxConfirmation),
- }
- witnessBeacon := newMockWitnessBeacon()
-
- for _, testCase := range testCases {
- t.Logf("Running test case: %v", testCase.name)
-
- checkPointChan := make(chan struct{}, 1)
- incubateChan := make(chan struct{}, 1)
- resolutionChan := make(chan ResolutionMsg, 1)
- reportChan := make(chan *channeldb.ResolverReport)
-
- chainCfg := ChannelArbitratorConfig{
- ChainArbitratorConfig: ChainArbitratorConfig{
- Notifier: notifier,
- PreimageDB: witnessBeacon,
- IncubateOutputs: func(wire.OutPoint,
- *lnwallet.OutgoingHtlcResolution,
- *lnwallet.IncomingHtlcResolution,
- uint32) er.R {
-
- incubateChan <- struct{}{}
- return nil
- },
- DeliverResolutionMsg: func(msgs ...ResolutionMsg) er.R {
- if len(msgs) != 1 {
- return er.Errorf("expected 1 "+
- "resolution msg, instead got %v",
- len(msgs))
- }
-
- resolutionChan <- msgs[0]
- return nil
- },
- },
- PutResolverReport: func(_ kvdb.RwTx,
- _ *channeldb.ResolverReport) er.R {
-
- return nil
- },
- }
-
- cfg := ResolverConfig{
- ChannelArbitratorConfig: chainCfg,
- Checkpoint: func(_ ContractResolver,
- reports ...*channeldb.ResolverReport) er.R {
-
- checkPointChan <- struct{}{}
-
- // Send all of our reports into the channel.
- for _, report := range reports {
- reportChan <- report
- }
-
- return nil
- },
- }
- resolver := &htlcTimeoutResolver{
- htlcResolution: lnwallet.OutgoingHtlcResolution{
- ClaimOutpoint: testChanPoint2,
- SweepSignDesc: *fakeSignDesc,
- },
- contractResolverKit: *newContractResolverKit(
- cfg,
- ),
- htlc: channeldb.HTLC{
- Amt: testHtlcAmt,
- },
- }
-
- var reports []*channeldb.ResolverReport
-
- // If the test case needs the remote commitment to be
- // broadcast, then we'll set the timeout commit to a fake
- // transaction to force the code path.
- if !testCase.remoteCommit {
- resolver.htlcResolution.SignedTimeoutTx = sweepTx
-
- if testCase.timeout {
- success := sweepTx.TxHash()
- reports = append(reports, &channeldb.ResolverReport{
- OutPoint: sweepTx.TxIn[0].PreviousOutPoint,
- Amount: testHtlcAmt.ToSatoshis(),
- ResolverType: channeldb.ResolverTypeOutgoingHtlc,
- ResolverOutcome: channeldb.ResolverOutcomeFirstStage,
- SpendTxID: &success,
- })
- }
- }
-
- // With all the setup above complete, we can initiate the
- // resolution process, and the bulk of our test.
- var wg sync.WaitGroup
- resolveErr := make(chan er.R, 1)
- wg.Add(1)
- go func() {
- defer wg.Done()
-
- _, err := resolver.Resolve()
- if err != nil {
- resolveErr <- err
- }
- }()
-
- // At the output isn't yet in the nursery, we expect that we
- // should receive an incubation request.
- select {
- case <-incubateChan:
- case err := <-resolveErr:
- t.Fatalf("unable to resolve HTLC: %v", err)
- case <-time.After(time.Second * 5):
- t.Fatalf("failed to receive incubation request")
- }
-
- // Next, the resolver should request a spend notification for
- // the direct HTLC output. We'll use the txToBroadcast closure
- // for the test case to generate the transaction that we'll
- // send to the resolver.
- spendingTx, err := testCase.txToBroadcast()
- if err != nil {
- t.Fatalf("unable to generate tx: %v", err)
- }
- spendTxHash := spendingTx.TxHash()
-
- select {
- case notifier.SpendChan <- &chainntnfs.SpendDetail{
- SpendingTx: spendingTx,
- SpenderTxHash: &spendTxHash,
- }:
- case <-time.After(time.Second * 5):
- t.Fatalf("failed to request spend ntfn")
- }
-
- if !testCase.timeout {
- // If the resolver should settle now, then we'll
- // extract the pre-image to be extracted and the
- // resolution message sent.
- select {
- case newPreimage := <-witnessBeacon.newPreimages:
- if newPreimage[0] != fakePreimage {
- t.Fatalf("wrong pre-image: "+
- "expected %v, got %v",
- fakePreimage, newPreimage)
- }
-
- case <-time.After(time.Second * 5):
- t.Fatalf("pre-image not added")
- }
-
- // Finally, we should get a resolution message with the
- // pre-image set within the message.
- select {
- case resolutionMsg := <-resolutionChan:
- // Once again, the pre-images should match up.
- if *resolutionMsg.PreImage != fakePreimage {
- t.Fatalf("wrong pre-image: "+
- "expected %v, got %v",
- fakePreimage, resolutionMsg.PreImage)
- }
- case <-time.After(time.Second * 5):
- t.Fatalf("resolution not sent")
- }
- } else {
-
- // Otherwise, the HTLC should now timeout. First, we
- // should get a resolution message with a populated
- // failure message.
- select {
- case resolutionMsg := <-resolutionChan:
- if resolutionMsg.Failure == nil {
- t.Fatalf("expected failure resolution msg")
- }
- case <-time.After(time.Second * 5):
- t.Fatalf("resolution not sent")
- }
-
- // We should also get another request for the spend
- // notification of the second-level transaction to
- // indicate that it's been swept by the nursery, but
- // only if this is a local commitment transaction.
- if !testCase.remoteCommit {
- select {
- case notifier.SpendChan <- &chainntnfs.SpendDetail{
- SpendingTx: spendingTx,
- SpenderTxHash: &spendTxHash,
- }:
- case <-time.After(time.Second * 5):
- t.Fatalf("failed to request spend ntfn")
- }
- }
- }
-
- // In any case, before the resolver exits, it should checkpoint
- // its final state.
- select {
- case <-checkPointChan:
- case err := <-resolveErr:
- t.Fatalf("unable to resolve HTLC: %v", err)
- case <-time.After(time.Second * 5):
- t.Fatalf("check point not received")
- }
-
- // Add a report to our set of expected reports with the outcome
- // that the test specifies (either success or timeout).
- spendTxID := spendingTx.TxHash()
- amt := btcutil.Amount(fakeSignDesc.Output.Value)
-
- reports = append(reports, &channeldb.ResolverReport{
- OutPoint: testChanPoint2,
- Amount: amt,
- ResolverType: channeldb.ResolverTypeOutgoingHtlc,
- ResolverOutcome: testCase.outcome,
- SpendTxID: &spendTxID,
- })
-
- for _, report := range reports {
- assertResolverReport(t, reportChan, report)
- }
-
- wg.Wait()
-
- // Finally, the resolver should be marked as resolved.
- if !resolver.resolved {
- t.Fatalf("resolver should be marked as resolved")
- }
- }
-}
diff --git a/lnd/contractcourt/interfaces.go b/lnd/contractcourt/interfaces.go
deleted file mode 100644
index b79de79e..00000000
--- a/lnd/contractcourt/interfaces.go
+++ /dev/null
@@ -1,68 +0,0 @@
-package contractcourt
-
-import (
- "io"
-
- "github.com/pkt-cash/pktd/btcutil/er"
- "github.com/pkt-cash/pktd/lnd/channeldb"
- "github.com/pkt-cash/pktd/lnd/htlcswitch/hop"
- "github.com/pkt-cash/pktd/lnd/input"
- "github.com/pkt-cash/pktd/lnd/invoices"
- "github.com/pkt-cash/pktd/lnd/lntypes"
- "github.com/pkt-cash/pktd/lnd/lnwallet/chainfee"
- "github.com/pkt-cash/pktd/lnd/lnwire"
- "github.com/pkt-cash/pktd/lnd/sweep"
- "github.com/pkt-cash/pktd/wire"
-)
-
-// Registry is an interface which represents the invoice registry.
-type Registry interface {
- // LookupInvoice attempts to look up an invoice according to its 32
- // byte payment hash.
- LookupInvoice(lntypes.Hash) (channeldb.Invoice, er.R)
-
- // NotifyExitHopHtlc attempts to mark an invoice as settled. If the
- // invoice is a debug invoice, then this method is a noop as debug
- // invoices are never fully settled. The return value describes how the
- // htlc should be resolved. If the htlc cannot be resolved immediately,
- // the resolution is sent on the passed in hodlChan later.
- NotifyExitHopHtlc(payHash lntypes.Hash, paidAmount lnwire.MilliSatoshi,
- expiry uint32, currentHeight int32,
- circuitKey channeldb.CircuitKey, hodlChan chan<- interface{},
- payload invoices.Payload) (invoices.HtlcResolution, er.R)
-
- // HodlUnsubscribeAll unsubscribes from all htlc resolutions.
- HodlUnsubscribeAll(subscriber chan<- interface{})
-}
-
-// OnionProcessor is an interface used to decode onion blobs.
-type OnionProcessor interface {
- // ReconstructHopIterator attempts to decode a valid sphinx packet from
- // the passed io.Reader instance.
- ReconstructHopIterator(r io.Reader, rHash []byte) (hop.Iterator, er.R)
-}
-
-// UtxoSweeper defines the sweep functions that contract court requires.
-type UtxoSweeper interface {
- // SweepInput sweeps inputs back into the wallet.
- SweepInput(input input.Input, params sweep.Params) (chan sweep.Result,
- er.R)
-
- // CreateSweepTx accepts a list of inputs and signs and generates a txn
- // that spends from them. This method also makes an accurate fee
- // estimate before generating the required witnesses.
- CreateSweepTx(inputs []input.Input, feePref sweep.FeePreference,
- currentBlockHeight uint32) (*wire.MsgTx, er.R)
-
- // RelayFeePerKW returns the minimum fee rate required for transactions
- // to be relayed.
- RelayFeePerKW() chainfee.SatPerKWeight
-
- // UpdateParams allows updating the sweep parameters of a pending input
- // in the UtxoSweeper. This function can be used to provide an updated
- // fee preference that will be used for a new sweep transaction of the
- // input that will act as a replacement transaction (RBF) of the
- // original sweeping transaction, if any.
- UpdateParams(input wire.OutPoint, params sweep.ParamsUpdate) (
- chan sweep.Result, er.R)
-}
diff --git a/lnd/contractcourt/mock_registry_test.go b/lnd/contractcourt/mock_registry_test.go
deleted file mode 100644
index bd187b8e..00000000
--- a/lnd/contractcourt/mock_registry_test.go
+++ /dev/null
@@ -1,52 +0,0 @@
-package contractcourt
-
-import (
- "github.com/pkt-cash/pktd/btcutil/er"
- "github.com/pkt-cash/pktd/lnd/channeldb"
- "github.com/pkt-cash/pktd/lnd/invoices"
- "github.com/pkt-cash/pktd/lnd/lntypes"
- "github.com/pkt-cash/pktd/lnd/lnwire"
-)
-
-type notifyExitHopData struct {
- payHash lntypes.Hash
- paidAmount lnwire.MilliSatoshi
- hodlChan chan<- interface{}
- expiry uint32
- currentHeight int32
-}
-
-type mockRegistry struct {
- notifyChan chan notifyExitHopData
- notifyErr *er.ErrorCode
- notifyResolution invoices.HtlcResolution
-}
-
-func (r *mockRegistry) NotifyExitHopHtlc(payHash lntypes.Hash,
- paidAmount lnwire.MilliSatoshi, expiry uint32, currentHeight int32,
- circuitKey channeldb.CircuitKey, hodlChan chan<- interface{},
- payload invoices.Payload) (invoices.HtlcResolution, er.R) {
-
- r.notifyChan <- notifyExitHopData{
- hodlChan: hodlChan,
- payHash: payHash,
- paidAmount: paidAmount,
- expiry: expiry,
- currentHeight: currentHeight,
- }
-
- var e er.R
- if r.notifyErr != nil {
- e = r.notifyErr.Default()
- }
-
- return r.notifyResolution, e
-}
-
-func (r *mockRegistry) HodlUnsubscribeAll(subscriber chan<- interface{}) {}
-
-func (r *mockRegistry) LookupInvoice(lntypes.Hash) (channeldb.Invoice,
- er.R) {
-
- return channeldb.Invoice{}, channeldb.ErrInvoiceNotFound.Default()
-}
diff --git a/lnd/contractcourt/utils_test.go b/lnd/contractcourt/utils_test.go
deleted file mode 100644
index 2bf81b41..00000000
--- a/lnd/contractcourt/utils_test.go
+++ /dev/null
@@ -1,26 +0,0 @@
-package contractcourt
-
-import (
- "os"
- "runtime/pprof"
- "testing"
- "time"
-)
-
-// timeout implements a test level timeout.
-func timeout(t *testing.T) func() {
- done := make(chan struct{})
- go func() {
- select {
- case <-time.After(5 * time.Second):
- pprof.Lookup("goroutine").WriteTo(os.Stdout, 1)
-
- panic("test timeout")
- case <-done:
- }
- }()
-
- return func() {
- close(done)
- }
-}
diff --git a/lnd/contrib/lncli.bash-completion b/lnd/contrib/lncli.bash-completion
deleted file mode 100644
index 8388942f..00000000
--- a/lnd/contrib/lncli.bash-completion
+++ /dev/null
@@ -1,53 +0,0 @@
-# bash programmable completion for lncli
-# copy to /etc/bash_completion.d and restart your shell session
-# Copyright (c) by Andreas M. Antonopoulos
-# Distributed under the MIT software license, see the accompanying
-# file COPYING or http://www.opensource.org/licenses/mit-license.php.
-
-_lncli() {
- local cur prev words=() cword
- local lncli
-
- # lncli might not be in $PATH
- lncli="$1"
-
- COMPREPLY=()
- _get_comp_words_by_ref -n = cur prev words cword
-
- case "$prev" in
- # example of further completion
- newaddress)
- COMPREPLY=( $( compgen -W "p2wkh np2wkh" -- "$cur" ) )
- return 0
- ;;
- esac
-
- case "$cur" in
- -*=*) # prevent nonsense completions
- return 0
- ;;
- *)
- local helpopts globalcmds
-
- # get the global options, starting with --
- if [[ -z "$cur" || "$cur" =~ ^- ]]; then
- globalcmds=$($lncli help 2>&1 | awk '$1 ~ /^-/ { sub(/,/, ""); print $1}')
- fi
-
- # get the regular commands
- if [[ -z "$cur" || "$cur" =~ ^[a-z] ]]; then
- helpopts=$($lncli help 2>/dev/null | awk '$1 ~ /^[a-z]/ { print $1; }' )
- fi
-
- COMPREPLY=( $( compgen -W "$helpopts $globalcmds" -X "*," -- "$cur" ) )
- esac
-} &&
-complete -F _lncli lncli
-
-# Local variables:
-# mode: shell-script
-# sh-basic-offset: 4
-# sh-indent-comment: t
-# indent-tabs-mode: nil
-# End:
-# ex: ts=4 sw=4 et filetype=sh
diff --git a/lnd/dev.Dockerfile b/lnd/dev.Dockerfile
deleted file mode 100644
index db55ce12..00000000
--- a/lnd/dev.Dockerfile
+++ /dev/null
@@ -1,37 +0,0 @@
-FROM golang:1.13-alpine as builder
-
-LABEL maintainer="Olaoluwa Osuntokun "
-
-# Force Go to use the cgo based DNS resolver. This is required to ensure DNS
-# queries required to connect to linked containers succeed.
-ENV GODEBUG netdns=cgo
-
-# Install dependencies and install/build lnd.
-RUN apk add --no-cache --update alpine-sdk \
- git \
- make
-
-# Copy in the local repository to build from.
-COPY . /go/src/github.com/lightningnetwork/lnd
-
-RUN cd /go/src/github.com/lightningnetwork/lnd \
-&& make \
-&& make install tags="signrpc walletrpc chainrpc invoicesrpc"
-
-# Start a new, final image to reduce size.
-FROM alpine as final
-
-# Expose lnd ports (server, rpc).
-EXPOSE 9735 10009
-
-# Copy the binaries and entrypoint from the builder image.
-COPY --from=builder /go/bin/lncli /bin/
-COPY --from=builder /go/bin/lnd /bin/
-
-# Add bash.
-RUN apk add --no-cache \
- bash
-
-# Copy the entrypoint script.
-COPY "docker/lnd/start-lnd.sh" .
-RUN chmod +x start-lnd.sh
diff --git a/lnd/discovery/bootstrapper.go b/lnd/discovery/bootstrapper.go
deleted file mode 100644
index a1681cd5..00000000
--- a/lnd/discovery/bootstrapper.go
+++ /dev/null
@@ -1,535 +0,0 @@
-package discovery
-
-import (
- "bytes"
- "crypto/rand"
- "crypto/sha256"
- "fmt"
- prand "math/rand"
- "net"
- "strconv"
- "strings"
- "time"
-
- "github.com/davecgh/go-spew/spew"
- "github.com/miekg/dns"
- "github.com/pkt-cash/pktd/btcec"
- "github.com/pkt-cash/pktd/btcutil/bech32"
- "github.com/pkt-cash/pktd/btcutil/er"
- "github.com/pkt-cash/pktd/lnd/autopilot"
- "github.com/pkt-cash/pktd/lnd/lnwire"
- "github.com/pkt-cash/pktd/lnd/tor"
- "github.com/pkt-cash/pktd/pktlog/log"
-)
-
-func init() {
- prand.Seed(time.Now().Unix())
-}
-
-// NetworkPeerBootstrapper is an interface that represents an initial peer
-// bootstrap mechanism. This interface is to be used to bootstrap a new peer to
-// the connection by providing it with the pubkey+address of a set of existing
-// peers on the network. Several bootstrap mechanisms can be implemented such
-// as DNS, in channel graph, DHT's, etc.
-type NetworkPeerBootstrapper interface {
- // SampleNodeAddrs uniformly samples a set of specified address from
- // the network peer bootstrapper source. The num addrs field passed in
- // denotes how many valid peer addresses to return. The passed set of
- // node nodes allows the caller to ignore a set of nodes perhaps
- // because they already have connections established.
- SampleNodeAddrs(numAddrs uint32,
- ignore map[autopilot.NodeID]struct{}) ([]*lnwire.NetAddress, er.R)
-
- // Name returns a human readable string which names the concrete
- // implementation of the NetworkPeerBootstrapper.
- Name() string
-}
-
-// MultiSourceBootstrap attempts to utilize a set of NetworkPeerBootstrapper
-// passed in to return the target (numAddrs) number of peer addresses that can
-// be used to bootstrap a peer just joining the Lightning Network. Each
-// bootstrapper will be queried successively until the target amount is met. If
-// the ignore map is populated, then the bootstrappers will be instructed to
-// skip those nodes.
-func MultiSourceBootstrap(ignore map[autopilot.NodeID]struct{}, numAddrs uint32,
- bootstrappers ...NetworkPeerBootstrapper) ([]*lnwire.NetAddress, er.R) {
-
- // We'll randomly shuffle our bootstrappers before querying them in
- // order to avoid from querying the same bootstrapper method over and
- // over, as some of these might tend to provide better/worse results
- // than others.
- bootstrappers = shuffleBootstrappers(bootstrappers)
-
- var addrs []*lnwire.NetAddress
- for _, bootstrapper := range bootstrappers {
- // If we already have enough addresses, then we can exit early
- // w/o querying the additional bootstrappers.
- if uint32(len(addrs)) >= numAddrs {
- break
- }
-
- log.Infof("Attempting to bootstrap with: %v", bootstrapper.Name())
-
- // If we still need additional addresses, then we'll compute
- // the number of address remaining that we need to fetch.
- numAddrsLeft := numAddrs - uint32(len(addrs))
- log.Tracef("Querying for %v addresses", numAddrsLeft)
- netAddrs, err := bootstrapper.SampleNodeAddrs(numAddrsLeft, ignore)
- if err != nil {
- // If we encounter an error with a bootstrapper, then
- // we'll continue on to the next available
- // bootstrapper.
- log.Errorf("Unable to query bootstrapper %v: %v",
- bootstrapper.Name(), err)
- continue
- }
-
- addrs = append(addrs, netAddrs...)
- }
-
- if len(addrs) == 0 {
- return nil, er.New("no addresses found")
- }
-
- log.Infof("Obtained %v addrs to bootstrap network with", len(addrs))
-
- return addrs, nil
-}
-
-// shuffleBootstrappers shuffles the set of bootstrappers in order to avoid
-// querying the same bootstrapper over and over. To shuffle the set of
-// candidates, we use a version of the Fisher–Yates shuffle algorithm.
-func shuffleBootstrappers(candidates []NetworkPeerBootstrapper) []NetworkPeerBootstrapper {
- shuffled := make([]NetworkPeerBootstrapper, len(candidates))
- perm := prand.Perm(len(candidates))
-
- for i, v := range perm {
- shuffled[v] = candidates[i]
- }
-
- return shuffled
-}
-
-// ChannelGraphBootstrapper is an implementation of the NetworkPeerBootstrapper
-// which attempts to retrieve advertised peers directly from the active channel
-// graph. This instance requires a backing autopilot.ChannelGraph instance in
-// order to operate properly.
-type ChannelGraphBootstrapper struct {
- chanGraph autopilot.ChannelGraph
-
- // hashAccumulator is a set of 32 random bytes that are read upon the
- // creation of the channel graph bootstrapper. We use this value to
- // randomly select nodes within the known graph to connect to. After
- // each selection, we rotate the accumulator by hashing it with itself.
- hashAccumulator [32]byte
-
- tried map[autopilot.NodeID]struct{}
-}
-
-// A compile time assertion to ensure that ChannelGraphBootstrapper meets the
-// NetworkPeerBootstrapper interface.
-var _ NetworkPeerBootstrapper = (*ChannelGraphBootstrapper)(nil)
-
-// NewGraphBootstrapper returns a new instance of a ChannelGraphBootstrapper
-// backed by an active autopilot.ChannelGraph instance. This type of network
-// peer bootstrapper will use the authenticated nodes within the known channel
-// graph to bootstrap connections.
-func NewGraphBootstrapper(cg autopilot.ChannelGraph) (NetworkPeerBootstrapper, er.R) {
-
- c := &ChannelGraphBootstrapper{
- chanGraph: cg,
- tried: make(map[autopilot.NodeID]struct{}),
- }
-
- if _, err := rand.Read(c.hashAccumulator[:]); err != nil {
- return nil, er.E(err)
- }
-
- return c, nil
-}
-
-// SampleNodeAddrs uniformly samples a set of specified address from the
-// network peer bootstrapper source. The num addrs field passed in denotes how
-// many valid peer addresses to return.
-//
-// NOTE: Part of the NetworkPeerBootstrapper interface.
-func (c *ChannelGraphBootstrapper) SampleNodeAddrs(numAddrs uint32,
- ignore map[autopilot.NodeID]struct{}) ([]*lnwire.NetAddress, er.R) {
-
- // We'll merge the ignore map with our currently selected map in order
- // to ensure we don't return any duplicate nodes.
- for n := range ignore {
- c.tried[n] = struct{}{}
- }
-
- // In order to bootstrap, we'll iterate all the nodes in the channel
- // graph, accumulating nodes until either we go through all active
- // nodes, or we reach our limit. We ensure that we meet the randomly
- // sample constraint as we maintain an xor accumulator to ensure we
- // randomly sample nodes independent of the iteration of the channel
- // graph.
- sampleAddrs := func() ([]*lnwire.NetAddress, er.R) {
- var a []*lnwire.NetAddress
-
- err := c.chanGraph.ForEachNode(func(node autopilot.Node) er.R {
- nID := autopilot.NodeID(node.PubKey())
- if _, ok := c.tried[nID]; ok {
- return nil
- }
-
- // We'll select the first node we come across who's
- // public key is less than our current accumulator
- // value. When comparing, we skip the first byte as
- // it's 50/50. If it isn't less, than then we'll
- // continue forward.
- nodePubKeyBytes := node.PubKey()
- if bytes.Compare(c.hashAccumulator[:], nodePubKeyBytes[1:]) > 0 {
- return nil
- }
-
- for _, nodeAddr := range node.Addrs() {
- // If we haven't yet reached our limit, then
- // we'll copy over the details of this node
- // into the set of addresses to be returned.
- switch nodeAddr.(type) {
- case *net.TCPAddr, *tor.OnionAddr:
- default:
- // If this isn't a valid address
- // supported by the protocol, then we'll
- // skip this node.
- return nil
- }
-
- nodePub, err := btcec.ParsePubKey(
- nodePubKeyBytes[:], btcec.S256(),
- )
- if err != nil {
- return err
- }
-
- // At this point, we've found an eligible node,
- // so we'll return early with our shibboleth
- // error.
- a = append(a, &lnwire.NetAddress{
- IdentityKey: nodePub,
- Address: nodeAddr,
- })
- }
-
- c.tried[nID] = struct{}{}
-
- return er.LoopBreak
- })
- if err != nil && !er.IsLoopBreak(err) {
- return nil, err
- }
-
- return a, nil
- }
-
- // We'll loop and sample new addresses from the graph source until
- // we've reached our target number of outbound connections or we hit 50
- // attempts, which ever comes first.
- var (
- addrs []*lnwire.NetAddress
- tries uint32
- )
- for tries < 30 && uint32(len(addrs)) < numAddrs {
- sampleAddrs, err := sampleAddrs()
- if err != nil {
- return nil, err
- }
-
- tries++
-
- // We'll now rotate our hash accumulator one value forwards.
- c.hashAccumulator = sha256.Sum256(c.hashAccumulator[:])
-
- // If this attempt didn't yield any addresses, then we'll exit
- // early.
- if len(sampleAddrs) == 0 {
- continue
- }
-
- addrs = append(addrs, sampleAddrs...)
- }
-
- log.Tracef("Ending hash accumulator state: %x", c.hashAccumulator)
-
- return addrs, nil
-}
-
-// Name returns a human readable string which names the concrete implementation
-// of the NetworkPeerBootstrapper.
-//
-// NOTE: Part of the NetworkPeerBootstrapper interface.
-func (c *ChannelGraphBootstrapper) Name() string {
- return "Authenticated Channel Graph"
-}
-
-// DNSSeedBootstrapper as an implementation of the NetworkPeerBootstrapper
-// interface which implements peer bootstrapping via a special DNS seed as
-// defined in BOLT-0010. For further details concerning Lightning's current DNS
-// boot strapping protocol, see this link:
-// * https://github.com/lightningnetwork/lightning-rfc/blob/master/10-dns-bootstrap.md
-type DNSSeedBootstrapper struct {
- // dnsSeeds is an array of two tuples we'll use for bootstrapping. The
- // first item in the tuple is the primary host we'll use to attempt the
- // SRV lookup we require. If we're unable to receive a response over
- // UDP, then we'll fall back to manual TCP resolution. The second item
- // in the tuple is a special A record that we'll query in order to
- // receive the IP address of the current authoritative DNS server for
- // the network seed.
- dnsSeeds [][2]string
- net tor.Net
-
- // timeout is the maximum amount of time a dial will wait for a connect to
- // complete.
- timeout time.Duration
-}
-
-// A compile time assertion to ensure that DNSSeedBootstrapper meets the
-// NetworkPeerjBootstrapper interface.
-var _ NetworkPeerBootstrapper = (*ChannelGraphBootstrapper)(nil)
-
-// NewDNSSeedBootstrapper returns a new instance of the DNSSeedBootstrapper.
-// The set of passed seeds should point to DNS servers that properly implement
-// Lightning's DNS peer bootstrapping protocol as defined in BOLT-0010. The set
-// of passed DNS seeds should come in pairs, with the second host name to be
-// used as a fallback for manual TCP resolution in the case of an error
-// receiving the UDP response. The second host should return a single A record
-// with the IP address of the authoritative name server.
-func NewDNSSeedBootstrapper(
- seeds [][2]string, net tor.Net,
- timeout time.Duration) NetworkPeerBootstrapper {
- return &DNSSeedBootstrapper{dnsSeeds: seeds, net: net, timeout: timeout}
-}
-
-// fallBackSRVLookup attempts to manually query for SRV records we need to
-// properly bootstrap. We do this by querying the special record at the "soa."
-// sub-domain of supporting DNS servers. The retuned IP address will be the IP
-// address of the authoritative DNS server. Once we have this IP address, we'll
-// connect manually over TCP to request the SRV record. This is necessary as
-// the records we return are currently too large for a class of resolvers,
-// causing them to be filtered out. The targetEndPoint is the original end
-// point that was meant to be hit.
-func (d *DNSSeedBootstrapper) fallBackSRVLookup(soaShim string,
- targetEndPoint string) ([]*net.SRV, er.R) {
-
- log.Tracef("Attempting to query fallback DNS seed")
-
- // First, we'll lookup the IP address of the server that will act as
- // our shim.
- addrs, err := d.net.LookupHost(soaShim)
- if err != nil {
- return nil, err
- }
-
- // Once we have the IP address, we'll establish a TCP connection using
- // port 53.
- dnsServer := net.JoinHostPort(addrs[0], "53")
- conn, err := d.net.Dial("tcp", dnsServer, d.timeout)
- if err != nil {
- return nil, err
- }
-
- dnsHost := fmt.Sprintf("_nodes._tcp.%v.", targetEndPoint)
- dnsConn := &dns.Conn{Conn: conn}
- defer dnsConn.Close()
-
- // With the connection established, we'll craft our SRV query, write
- // toe request, then wait for the server to give our response.
- msg := new(dns.Msg)
- msg.SetQuestion(dnsHost, dns.TypeSRV)
- if err := dnsConn.WriteMsg(msg); err != nil {
- return nil, er.E(err)
- }
- resp, errr := dnsConn.ReadMsg()
- if errr != nil {
- return nil, er.E(errr)
- }
-
- // If the message response code was not the success code, fail.
- if resp.Rcode != dns.RcodeSuccess {
- return nil, er.Errorf("unsuccessful SRV request, "+
- "received: %v", resp.Rcode)
- }
-
- // Retrieve the RR(s) of the Answer section, and covert to the format
- // that net.LookupSRV would normally return.
- var rrs []*net.SRV
- for _, rr := range resp.Answer {
- srv := rr.(*dns.SRV)
- rrs = append(rrs, &net.SRV{
- Target: srv.Target,
- Port: srv.Port,
- Priority: srv.Priority,
- Weight: srv.Weight,
- })
- }
-
- return rrs, nil
-}
-
-// SampleNodeAddrs uniformly samples a set of specified address from the
-// network peer bootstrapper source. The num addrs field passed in denotes how
-// many valid peer addresses to return. The set of DNS seeds are used
-// successively to retrieve eligible target nodes.
-func (d *DNSSeedBootstrapper) SampleNodeAddrs(numAddrs uint32,
- ignore map[autopilot.NodeID]struct{}) ([]*lnwire.NetAddress, er.R) {
-
- var netAddrs []*lnwire.NetAddress
-
- // We'll try all the registered DNS seeds, exiting early if one of them
- // gives us all the peers we need.
- //
- // TODO(roasbeef): should combine results from both
-search:
- for _, dnsSeedTuple := range d.dnsSeeds {
- // We'll first query the seed with an SRV record so we can
- // obtain a random sample of the encoded public keys of nodes.
- // We use the lndLookupSRV function for this task.
- primarySeed := dnsSeedTuple[0]
- _, addrs, err := d.net.LookupSRV(
- "nodes", "tcp", primarySeed, d.timeout,
- )
- if err != nil {
- log.Tracef("Unable to lookup SRV records via "+
- "primary seed (%v): %v", primarySeed, err)
-
- log.Trace("Falling back to secondary")
-
- // If the host of the secondary seed is blank, then
- // we'll bail here as we can't proceed.
- if dnsSeedTuple[1] == "" {
- log.Tracef("DNS seed %v has no secondary, "+
- "skipping fallback", primarySeed)
- continue
- }
-
- // If we get an error when trying to query via the
- // primary seed, we'll fallback to the secondary seed
- // before concluding failure.
- soaShim := dnsSeedTuple[1]
- addrs, err = d.fallBackSRVLookup(
- soaShim, primarySeed,
- )
- if err != nil {
- log.Tracef("Unable to query fall "+
- "back dns seed (%v): %v", soaShim, err)
- continue
- }
-
- log.Tracef("Successfully queried fallback DNS seed")
- }
-
- log.Tracef("Retrieved SRV records from dns seed: %v",
- log.C(func() string {
- return spew.Sdump(addrs)
- }),
- )
-
- // Next, we'll need to issue an A record request for each of
- // the nodes, skipping it if nothing comes back.
- for _, nodeSrv := range addrs {
- if uint32(len(netAddrs)) >= numAddrs {
- break search
- }
-
- // With the SRV target obtained, we'll now perform
- // another query to obtain the IP address for the
- // matching bech32 encoded node key. We use the
- // lndLookup function for this task.
- bechNodeHost := nodeSrv.Target
- addrs, err := d.net.LookupHost(bechNodeHost)
- if err != nil {
- return nil, err
- }
-
- if len(addrs) == 0 {
- log.Tracef("No addresses for %v, skipping",
- bechNodeHost)
- continue
- }
-
- log.Tracef("Attempting to convert: %v", bechNodeHost)
-
- // If the host isn't correctly formatted, then we'll
- // skip it.
- if len(bechNodeHost) == 0 ||
- !strings.Contains(bechNodeHost, ".") {
-
- continue
- }
-
- // If we have a set of valid addresses, then we'll need
- // to parse the public key from the original bech32
- // encoded string.
- bechNode := strings.Split(bechNodeHost, ".")
- _, nodeBytes5Bits, err := bech32.Decode(bechNode[0])
- if err != nil {
- return nil, err
- }
-
- // Once we have the bech32 decoded pubkey, we'll need
- // to convert the 5-bit word grouping into our regular
- // 8-bit word grouping so we can convert it into a
- // public key.
- nodeBytes, err := bech32.ConvertBits(
- nodeBytes5Bits, 5, 8, false,
- )
- if err != nil {
- return nil, err
- }
- nodeKey, err := btcec.ParsePubKey(
- nodeBytes, btcec.S256(),
- )
- if err != nil {
- return nil, err
- }
-
- // If we have an ignore list, and this node is in the
- // ignore list, then we'll go to the next candidate.
- if ignore != nil {
- nID := autopilot.NewNodeID(nodeKey)
- if _, ok := ignore[nID]; ok {
- continue
- }
- }
-
- // Finally we'll convert the host:port peer to a proper
- // TCP address to use within the lnwire.NetAddress. We
- // don't need to use the lndResolveTCP function here
- // because we already have the host:port peer.
- addr := net.JoinHostPort(
- addrs[0],
- strconv.FormatUint(uint64(nodeSrv.Port), 10),
- )
- tcpAddr, errr := net.ResolveTCPAddr("tcp", addr)
- if errr != nil {
- return nil, er.E(errr)
- }
-
- // Finally, with all the information parsed, we'll
- // return this fully valid address as a connection
- // attempt.
- lnAddr := &lnwire.NetAddress{
- IdentityKey: nodeKey,
- Address: tcpAddr,
- }
-
- log.Tracef("Obtained %v as valid reachable "+
- "node", lnAddr)
-
- netAddrs = append(netAddrs, lnAddr)
- }
- }
-
- return netAddrs, nil
-}
-
-// Name returns a human readable string which names the concrete
-// implementation of the NetworkPeerBootstrapper.
-func (d *DNSSeedBootstrapper) Name() string {
- return fmt.Sprintf("BOLT-0010 DNS Seed: %v", d.dnsSeeds)
-}
diff --git a/lnd/discovery/chan_series.go b/lnd/discovery/chan_series.go
deleted file mode 100644
index b69b0be5..00000000
--- a/lnd/discovery/chan_series.go
+++ /dev/null
@@ -1,350 +0,0 @@
-package discovery
-
-import (
- "time"
-
- "github.com/pkt-cash/pktd/btcutil/er"
- "github.com/pkt-cash/pktd/chaincfg/chainhash"
- "github.com/pkt-cash/pktd/lnd/channeldb"
- "github.com/pkt-cash/pktd/lnd/lnwire"
- "github.com/pkt-cash/pktd/lnd/netann"
- "github.com/pkt-cash/pktd/lnd/routing/route"
- "github.com/pkt-cash/pktd/pktlog/log"
-)
-
-// ChannelGraphTimeSeries is an interface that provides time and block based
-// querying into our view of the channel graph. New channels will have
-// monotonically increasing block heights, and new channel updates will have
-// increasing timestamps. Once we connect to a peer, we'll use the methods in
-// this interface to determine if we're already in sync, or need to request
-// some new information from them.
-type ChannelGraphTimeSeries interface {
- // HighestChanID should return the channel ID of the channel we know of
- // that's furthest in the target chain. This channel will have a block
- // height that's close to the current tip of the main chain as we
- // know it. We'll use this to start our QueryChannelRange dance with
- // the remote node.
- HighestChanID(chain chainhash.Hash) (*lnwire.ShortChannelID, er.R)
-
- // UpdatesInHorizon returns all known channel and node updates with an
- // update timestamp between the start time and end time. We'll use this
- // to catch up a remote node to the set of channel updates that they
- // may have missed out on within the target chain.
- UpdatesInHorizon(chain chainhash.Hash,
- startTime time.Time, endTime time.Time) ([]lnwire.Message, er.R)
-
- // FilterKnownChanIDs takes a target chain, and a set of channel ID's,
- // and returns a filtered set of chan ID's. This filtered set of chan
- // ID's represents the ID's that we don't know of which were in the
- // passed superSet.
- FilterKnownChanIDs(chain chainhash.Hash,
- superSet []lnwire.ShortChannelID) ([]lnwire.ShortChannelID, er.R)
-
- // FilterChannelRange returns the set of channels that we created
- // between the start height and the end height. We'll use this to to a
- // remote peer's QueryChannelRange message.
- FilterChannelRange(chain chainhash.Hash,
- startHeight, endHeight uint32) ([]lnwire.ShortChannelID, er.R)
-
- // FetchChanAnns returns a full set of channel announcements as well as
- // their updates that match the set of specified short channel ID's.
- // We'll use this to reply to a QueryShortChanIDs message sent by a
- // remote peer. The response will contain a unique set of
- // ChannelAnnouncements, the latest ChannelUpdate for each of the
- // announcements, and a unique set of NodeAnnouncements.
- FetchChanAnns(chain chainhash.Hash,
- shortChanIDs []lnwire.ShortChannelID) ([]lnwire.Message, er.R)
-
- // FetchChanUpdates returns the latest channel update messages for the
- // specified short channel ID. If no channel updates are known for the
- // channel, then an empty slice will be returned.
- FetchChanUpdates(chain chainhash.Hash,
- shortChanID lnwire.ShortChannelID) ([]*lnwire.ChannelUpdate, er.R)
-}
-
-// ChanSeries is an implementation of the ChannelGraphTimeSeries
-// interface backed by the channeldb ChannelGraph database. We'll provide this
-// implementation to the AuthenticatedGossiper so it can properly use the
-// in-protocol channel range queries to quickly and efficiently synchronize our
-// channel state with all peers.
-type ChanSeries struct {
- graph *channeldb.ChannelGraph
-}
-
-// NewChanSeries constructs a new ChanSeries backed by a channeldb.ChannelGraph.
-// The returned ChanSeries implements the ChannelGraphTimeSeries interface.
-func NewChanSeries(graph *channeldb.ChannelGraph) *ChanSeries {
- return &ChanSeries{
- graph: graph,
- }
-}
-
-// HighestChanID should return is the channel ID of the channel we know of
-// that's furthest in the target chain. This channel will have a block height
-// that's close to the current tip of the main chain as we know it. We'll use
-// this to start our QueryChannelRange dance with the remote node.
-//
-// NOTE: This is part of the ChannelGraphTimeSeries interface.
-func (c *ChanSeries) HighestChanID(chain chainhash.Hash) (*lnwire.ShortChannelID, er.R) {
- chanID, err := c.graph.HighestChanID()
- if err != nil {
- return nil, err
- }
-
- shortChanID := lnwire.NewShortChanIDFromInt(chanID)
- return &shortChanID, nil
-}
-
-// UpdatesInHorizon returns all known channel and node updates with an update
-// timestamp between the start time and end time. We'll use this to catch up a
-// remote node to the set of channel updates that they may have missed out on
-// within the target chain.
-//
-// NOTE: This is part of the ChannelGraphTimeSeries interface.
-func (c *ChanSeries) UpdatesInHorizon(chain chainhash.Hash,
- startTime time.Time, endTime time.Time) ([]lnwire.Message, er.R) {
-
- var updates []lnwire.Message
-
- // First, we'll query for all the set of channels that have an update
- // that falls within the specified horizon.
- chansInHorizon, err := c.graph.ChanUpdatesInHorizon(
- startTime, endTime,
- )
- if err != nil {
- return nil, err
- }
- for _, channel := range chansInHorizon {
- // If the channel hasn't been fully advertised yet, or is a
- // private channel, then we'll skip it as we can't construct a
- // full authentication proof if one is requested.
- if channel.Info.AuthProof == nil {
- continue
- }
-
- chanAnn, edge1, edge2, err := netann.CreateChanAnnouncement(
- channel.Info.AuthProof, channel.Info, channel.Policy1,
- channel.Policy2,
- )
- if err != nil {
- return nil, err
- }
-
- updates = append(updates, chanAnn)
- if edge1 != nil {
- updates = append(updates, edge1)
- }
- if edge2 != nil {
- updates = append(updates, edge2)
- }
- }
-
- // Next, we'll send out all the node announcements that have an update
- // within the horizon as well. We send these second to ensure that they
- // follow any active channels they have.
- nodeAnnsInHorizon, err := c.graph.NodeUpdatesInHorizon(
- startTime, endTime,
- )
- if err != nil {
- return nil, err
- }
- for _, nodeAnn := range nodeAnnsInHorizon {
- // Ensure we only forward nodes that are publicly advertised to
- // prevent leaking information about nodes.
- isNodePublic, err := c.graph.IsPublicNode(nodeAnn.PubKeyBytes)
- if err != nil {
- log.Errorf("Unable to determine if node %x is "+
- "advertised: %v", nodeAnn.PubKeyBytes, err)
- continue
- }
-
- if !isNodePublic {
- log.Tracef("Skipping forwarding announcement for "+
- "node %x due to being unadvertised",
- nodeAnn.PubKeyBytes)
- continue
- }
-
- nodeUpdate, err := nodeAnn.NodeAnnouncement(true)
- if err != nil {
- return nil, err
- }
-
- updates = append(updates, nodeUpdate)
- }
-
- return updates, nil
-}
-
-// FilterKnownChanIDs takes a target chain, and a set of channel ID's, and
-// returns a filtered set of chan ID's. This filtered set of chan ID's
-// represents the ID's that we don't know of which were in the passed superSet.
-//
-// NOTE: This is part of the ChannelGraphTimeSeries interface.
-func (c *ChanSeries) FilterKnownChanIDs(chain chainhash.Hash,
- superSet []lnwire.ShortChannelID) ([]lnwire.ShortChannelID, er.R) {
-
- chanIDs := make([]uint64, 0, len(superSet))
- for _, chanID := range superSet {
- chanIDs = append(chanIDs, chanID.ToUint64())
- }
-
- newChanIDs, err := c.graph.FilterKnownChanIDs(chanIDs)
- if err != nil {
- return nil, err
- }
-
- filteredIDs := make([]lnwire.ShortChannelID, 0, len(newChanIDs))
- for _, chanID := range newChanIDs {
- filteredIDs = append(
- filteredIDs, lnwire.NewShortChanIDFromInt(chanID),
- )
- }
-
- return filteredIDs, nil
-}
-
-// FilterChannelRange returns the set of channels that we created between the
-// start height and the end height. We'll use this respond to a remote peer's
-// QueryChannelRange message.
-//
-// NOTE: This is part of the ChannelGraphTimeSeries interface.
-func (c *ChanSeries) FilterChannelRange(chain chainhash.Hash,
- startHeight, endHeight uint32) ([]lnwire.ShortChannelID, er.R) {
-
- chansInRange, err := c.graph.FilterChannelRange(startHeight, endHeight)
- if err != nil {
- return nil, err
- }
-
- chanResp := make([]lnwire.ShortChannelID, 0, len(chansInRange))
- for _, chanID := range chansInRange {
- chanResp = append(
- chanResp, lnwire.NewShortChanIDFromInt(chanID),
- )
- }
-
- return chanResp, nil
-}
-
-// FetchChanAnns returns a full set of channel announcements as well as their
-// updates that match the set of specified short channel ID's. We'll use this
-// to reply to a QueryShortChanIDs message sent by a remote peer. The response
-// will contain a unique set of ChannelAnnouncements, the latest ChannelUpdate
-// for each of the announcements, and a unique set of NodeAnnouncements.
-//
-// NOTE: This is part of the ChannelGraphTimeSeries interface.
-func (c *ChanSeries) FetchChanAnns(chain chainhash.Hash,
- shortChanIDs []lnwire.ShortChannelID) ([]lnwire.Message, er.R) {
-
- chanIDs := make([]uint64, 0, len(shortChanIDs))
- for _, chanID := range shortChanIDs {
- chanIDs = append(chanIDs, chanID.ToUint64())
- }
-
- channels, err := c.graph.FetchChanInfos(chanIDs)
- if err != nil {
- return nil, err
- }
-
- // We'll use this map to ensure we don't send the same node
- // announcement more than one time as one node may have many channel
- // anns we'll need to send.
- nodePubsSent := make(map[route.Vertex]struct{})
-
- chanAnns := make([]lnwire.Message, 0, len(channels)*3)
- for _, channel := range channels {
- // If the channel doesn't have an authentication proof, then we
- // won't send it over as it may not yet be finalized, or be a
- // non-advertised channel.
- if channel.Info.AuthProof == nil {
- continue
- }
-
- chanAnn, edge1, edge2, err := netann.CreateChanAnnouncement(
- channel.Info.AuthProof, channel.Info, channel.Policy1,
- channel.Policy2,
- )
- if err != nil {
- return nil, err
- }
-
- chanAnns = append(chanAnns, chanAnn)
- if edge1 != nil {
- chanAnns = append(chanAnns, edge1)
-
- // If this edge has a validated node announcement, that
- // we haven't yet sent, then we'll send that as well.
- nodePub := channel.Policy1.Node.PubKeyBytes
- hasNodeAnn := channel.Policy1.Node.HaveNodeAnnouncement
- if _, ok := nodePubsSent[nodePub]; !ok && hasNodeAnn {
- nodeAnn, err := channel.Policy1.Node.NodeAnnouncement(true)
- if err != nil {
- return nil, err
- }
-
- chanAnns = append(chanAnns, nodeAnn)
- nodePubsSent[nodePub] = struct{}{}
- }
- }
- if edge2 != nil {
- chanAnns = append(chanAnns, edge2)
-
- // If this edge has a validated node announcement, that
- // we haven't yet sent, then we'll send that as well.
- nodePub := channel.Policy2.Node.PubKeyBytes
- hasNodeAnn := channel.Policy2.Node.HaveNodeAnnouncement
- if _, ok := nodePubsSent[nodePub]; !ok && hasNodeAnn {
- nodeAnn, err := channel.Policy2.Node.NodeAnnouncement(true)
- if err != nil {
- return nil, err
- }
-
- chanAnns = append(chanAnns, nodeAnn)
- nodePubsSent[nodePub] = struct{}{}
- }
- }
- }
-
- return chanAnns, nil
-}
-
-// FetchChanUpdates returns the latest channel update messages for the
-// specified short channel ID. If no channel updates are known for the channel,
-// then an empty slice will be returned.
-//
-// NOTE: This is part of the ChannelGraphTimeSeries interface.
-func (c *ChanSeries) FetchChanUpdates(chain chainhash.Hash,
- shortChanID lnwire.ShortChannelID) ([]*lnwire.ChannelUpdate, er.R) {
-
- chanInfo, e1, e2, err := c.graph.FetchChannelEdgesByID(
- shortChanID.ToUint64(),
- )
- if err != nil {
- return nil, err
- }
-
- chanUpdates := make([]*lnwire.ChannelUpdate, 0, 2)
- if e1 != nil {
- chanUpdate, err := netann.ChannelUpdateFromEdge(chanInfo, e1)
- if err != nil {
- return nil, err
- }
-
- chanUpdates = append(chanUpdates, chanUpdate)
- }
- if e2 != nil {
- chanUpdate, err := netann.ChannelUpdateFromEdge(chanInfo, e2)
- if err != nil {
- return nil, err
- }
-
- chanUpdates = append(chanUpdates, chanUpdate)
- }
-
- return chanUpdates, nil
-}
-
-// A compile-time assertion to ensure that ChanSeries meets the
-// ChannelGraphTimeSeries interface.
-var _ ChannelGraphTimeSeries = (*ChanSeries)(nil)
diff --git a/lnd/discovery/gossiper.go b/lnd/discovery/gossiper.go
deleted file mode 100644
index c92ee54c..00000000
--- a/lnd/discovery/gossiper.go
+++ /dev/null
@@ -1,2539 +0,0 @@
-package discovery
-
-import (
- "bytes"
- "runtime"
- "sync"
- "time"
-
- "github.com/davecgh/go-spew/spew"
- "github.com/pkt-cash/pktd/btcec"
- "github.com/pkt-cash/pktd/btcutil"
- "github.com/pkt-cash/pktd/btcutil/er"
- "github.com/pkt-cash/pktd/chaincfg/chainhash"
- "github.com/pkt-cash/pktd/lnd/chainntnfs"
- "github.com/pkt-cash/pktd/lnd/channeldb"
- "github.com/pkt-cash/pktd/lnd/lnpeer"
- "github.com/pkt-cash/pktd/lnd/lnwallet"
- "github.com/pkt-cash/pktd/lnd/lnwire"
- "github.com/pkt-cash/pktd/lnd/multimutex"
- "github.com/pkt-cash/pktd/lnd/netann"
- "github.com/pkt-cash/pktd/lnd/routing"
- "github.com/pkt-cash/pktd/lnd/routing/route"
- "github.com/pkt-cash/pktd/lnd/ticker"
- "github.com/pkt-cash/pktd/pktlog/log"
- "github.com/pkt-cash/pktd/wire"
-)
-
-var (
- Err = er.NewErrorType("lnd.discovery")
- // ErrGossiperShuttingDown is an error that is returned if the gossiper
- // is in the process of being shut down.
- ErrGossiperShuttingDown = Err.CodeWithDetail("ErrGossiperShuttingDown", "gossiper is shutting down")
-
- // ErrGossipSyncerNotFound signals that we were unable to find an active
- // gossip syncer corresponding to a gossip query message received from
- // the remote peer.
- ErrGossipSyncerNotFound = Err.CodeWithDetail("ErrGossipSyncerNotFound", "gossip syncer not found")
-)
-
-// optionalMsgFields is a set of optional message fields that external callers
-// can provide that serve useful when processing a specific network
-// announcement.
-type optionalMsgFields struct {
- capacity *btcutil.Amount
- channelPoint *wire.OutPoint
-}
-
-// apply applies the optional fields within the functional options.
-func (f *optionalMsgFields) apply(optionalMsgFields ...OptionalMsgField) {
- for _, optionalMsgField := range optionalMsgFields {
- optionalMsgField(f)
- }
-}
-
-// OptionalMsgField is a functional option parameter that can be used to provide
-// external information that is not included within a network message but serves
-// useful when processing it.
-type OptionalMsgField func(*optionalMsgFields)
-
-// ChannelCapacity is an optional field that lets the gossiper know of the
-// capacity of a channel.
-func ChannelCapacity(capacity btcutil.Amount) OptionalMsgField {
- return func(f *optionalMsgFields) {
- f.capacity = &capacity
- }
-}
-
-// ChannelPoint is an optional field that lets the gossiper know of the outpoint
-// of a channel.
-func ChannelPoint(op wire.OutPoint) OptionalMsgField {
- return func(f *optionalMsgFields) {
- f.channelPoint = &op
- }
-}
-
-// networkMsg couples a routing related wire message with the peer that
-// originally sent it.
-type networkMsg struct {
- peer lnpeer.Peer
- source *btcec.PublicKey
- msg lnwire.Message
- optionalMsgFields *optionalMsgFields
-
- isRemote bool
-
- err chan er.R
-}
-
-// chanPolicyUpdateRequest is a request that is sent to the server when a caller
-// wishes to update a particular set of channels. New ChannelUpdate messages
-// will be crafted to be sent out during the next broadcast epoch and the fee
-// updates committed to the lower layer.
-type chanPolicyUpdateRequest struct {
- edgesToUpdate []EdgeWithInfo
- errChan chan er.R
-}
-
-// Config defines the configuration for the service. ALL elements within the
-// configuration MUST be non-nil for the service to carry out its duties.
-type Config struct {
- // ChainHash is a hash that indicates which resident chain of the
- // AuthenticatedGossiper. Any announcements that don't match this
- // chain hash will be ignored.
- //
- // TODO(roasbeef): eventually make into map so can de-multiplex
- // incoming announcements
- // * also need to do same for Notifier
- ChainHash chainhash.Hash
-
- // Router is the subsystem which is responsible for managing the
- // topology of lightning network. After incoming channel, node, channel
- // updates announcements are validated they are sent to the router in
- // order to be included in the LN graph.
- Router routing.ChannelGraphSource
-
- // ChanSeries is an interfaces that provides access to a time series
- // view of the current known channel graph. Each GossipSyncer enabled
- // peer will utilize this in order to create and respond to channel
- // graph time series queries.
- ChanSeries ChannelGraphTimeSeries
-
- // Notifier is used for receiving notifications of incoming blocks.
- // With each new incoming block found we process previously premature
- // announcements.
- //
- // TODO(roasbeef): could possibly just replace this with an epoch
- // channel.
- Notifier chainntnfs.ChainNotifier
-
- // Broadcast broadcasts a particular set of announcements to all peers
- // that the daemon is connected to. If supplied, the exclude parameter
- // indicates that the target peer should be excluded from the
- // broadcast.
- Broadcast func(skips map[route.Vertex]struct{},
- msg ...lnwire.Message) er.R
-
- // NotifyWhenOnline is a function that allows the gossiper to be
- // notified when a certain peer comes online, allowing it to
- // retry sending a peer message.
- //
- // NOTE: The peerChan channel must be buffered.
- NotifyWhenOnline func(peerPubKey [33]byte, peerChan chan<- lnpeer.Peer)
-
- // NotifyWhenOffline is a function that allows the gossiper to be
- // notified when a certain peer disconnects, allowing it to request a
- // notification for when it reconnects.
- NotifyWhenOffline func(peerPubKey [33]byte) <-chan struct{}
-
- // SelfNodeAnnouncement is a function that fetches our own current node
- // announcement, for use when determining whether we should update our
- // peers about our presence on the network. If the refresh is true, a
- // new and updated announcement will be returned.
- SelfNodeAnnouncement func(refresh bool) (lnwire.NodeAnnouncement, er.R)
-
- // ProofMatureDelta the number of confirmations which is needed before
- // exchange the channel announcement proofs.
- ProofMatureDelta uint32
-
- // TrickleDelay the period of trickle timer which flushes to the
- // network the pending batch of new announcements we've received since
- // the last trickle tick.
- TrickleDelay time.Duration
-
- // RetransmitTicker is a ticker that ticks with a period which
- // indicates that we should check if we need re-broadcast any of our
- // personal channels.
- RetransmitTicker ticker.Ticker
-
- // RebroadcastInterval is the maximum time we wait between sending out
- // channel updates for our active channels and our own node
- // announcement. We do this to ensure our active presence on the
- // network is known, and we are not being considered a zombie node or
- // having zombie channels.
- RebroadcastInterval time.Duration
-
- // WaitingProofStore is a persistent storage of partial channel proof
- // announcement messages. We use it to buffer half of the material
- // needed to reconstruct a full authenticated channel announcement.
- // Once we receive the other half the channel proof, we'll be able to
- // properly validate it and re-broadcast it out to the network.
- //
- // TODO(wilmer): make interface to prevent channeldb dependency.
- WaitingProofStore *channeldb.WaitingProofStore
-
- // MessageStore is a persistent storage of gossip messages which we will
- // use to determine which messages need to be resent for a given peer.
- MessageStore GossipMessageStore
-
- // AnnSigner is an instance of the MessageSigner interface which will
- // be used to manually sign any outgoing channel updates. The signer
- // implementation should be backed by the public key of the backing
- // Lightning node.
- //
- // TODO(roasbeef): extract ann crafting + sign from fundingMgr into
- // here?
- AnnSigner lnwallet.MessageSigner
-
- // NumActiveSyncers is the number of peers for which we should have
- // active syncers with. After reaching NumActiveSyncers, any future
- // gossip syncers will be passive.
- NumActiveSyncers int
-
- // RotateTicker is a ticker responsible for notifying the SyncManager
- // when it should rotate its active syncers. A single active syncer with
- // a chansSynced state will be exchanged for a passive syncer in order
- // to ensure we don't keep syncing with the same peers.
- RotateTicker ticker.Ticker
-
- // HistoricalSyncTicker is a ticker responsible for notifying the
- // syncManager when it should attempt a historical sync with a gossip
- // sync peer.
- HistoricalSyncTicker ticker.Ticker
-
- // ActiveSyncerTimeoutTicker is a ticker responsible for notifying the
- // syncManager when it should attempt to start the next pending
- // activeSyncer due to the current one not completing its state machine
- // within the timeout.
- ActiveSyncerTimeoutTicker ticker.Ticker
-
- // MinimumBatchSize is minimum size of a sub batch of announcement
- // messages.
- MinimumBatchSize int
-
- // SubBatchDelay is the delay between sending sub batches of
- // gossip messages.
- SubBatchDelay time.Duration
-
- // IgnoreHistoricalFilters will prevent syncers from replying with
- // historical data when the remote peer sets a gossip_timestamp_range.
- // This prevents ranges with old start times from causing us to dump the
- // graph on connect.
- IgnoreHistoricalFilters bool
-}
-
-// AuthenticatedGossiper is a subsystem which is responsible for receiving
-// announcements, validating them and applying the changes to router, syncing
-// lightning network with newly connected nodes, broadcasting announcements
-// after validation, negotiating the channel announcement proofs exchange and
-// handling the premature announcements. All outgoing announcements are
-// expected to be properly signed as dictated in BOLT#7, additionally, all
-// incoming message are expected to be well formed and signed. Invalid messages
-// will be rejected by this struct.
-type AuthenticatedGossiper struct {
- // Parameters which are needed to properly handle the start and stop of
- // the service.
- started sync.Once
- stopped sync.Once
-
- // bestHeight is the height of the block at the tip of the main chain
- // as we know it. Accesses *MUST* be done with the gossiper's lock
- // held.
- bestHeight uint32
-
- quit chan struct{}
- wg sync.WaitGroup
-
- // cfg is a copy of the configuration struct that the gossiper service
- // was initialized with.
- cfg *Config
-
- // blockEpochs encapsulates a stream of block epochs that are sent at
- // every new block height.
- blockEpochs *chainntnfs.BlockEpochEvent
-
- // prematureAnnouncements maps a block height to a set of network
- // messages which are "premature" from our PoV. A message is premature
- // if it claims to be anchored in a block which is beyond the current
- // main chain tip as we know it. Premature network messages will be
- // processed once the chain tip as we know it extends to/past the
- // premature height.
- //
- // TODO(roasbeef): limit premature networkMsgs to N
- prematureAnnouncements map[uint32][]*networkMsg
-
- // prematureChannelUpdates is a map of ChannelUpdates we have received
- // that wasn't associated with any channel we know about. We store
- // them temporarily, such that we can reprocess them when a
- // ChannelAnnouncement for the channel is received.
- prematureChannelUpdates map[uint64][]*networkMsg
- pChanUpdMtx sync.Mutex
-
- // networkMsgs is a channel that carries new network broadcasted
- // message from outside the gossiper service to be processed by the
- // networkHandler.
- networkMsgs chan *networkMsg
-
- // chanPolicyUpdates is a channel that requests to update the
- // forwarding policy of a set of channels is sent over.
- chanPolicyUpdates chan *chanPolicyUpdateRequest
-
- // selfKey is the identity public key of the backing Lightning node.
- selfKey *btcec.PublicKey
-
- // channelMtx is used to restrict the database access to one
- // goroutine per channel ID. This is done to ensure that when
- // the gossiper is handling an announcement, the db state stays
- // consistent between when the DB is first read until it's written.
- channelMtx *multimutex.Mutex
-
- rejectMtx sync.RWMutex
- recentRejects map[uint64]struct{}
-
- // syncMgr is a subsystem responsible for managing the gossip syncers
- // for peers currently connected. When a new peer is connected, the
- // manager will create its accompanying gossip syncer and determine
- // whether it should have an activeSync or passiveSync sync type based
- // on how many other gossip syncers are currently active. Any activeSync
- // gossip syncers are started in a round-robin manner to ensure we're
- // not syncing with multiple peers at the same time.
- syncMgr *SyncManager
-
- // reliableSender is a subsystem responsible for handling reliable
- // message send requests to peers. This should only be used for channels
- // that are unadvertised at the time of handling the message since if it
- // is advertised, then peers should be able to get the message from the
- // network.
- reliableSender *reliableSender
-
- sync.Mutex
-}
-
-// New creates a new AuthenticatedGossiper instance, initialized with the
-// passed configuration parameters.
-func New(cfg Config, selfKey *btcec.PublicKey) *AuthenticatedGossiper {
- gossiper := &AuthenticatedGossiper{
- selfKey: selfKey,
- cfg: &cfg,
- networkMsgs: make(chan *networkMsg),
- quit: make(chan struct{}),
- chanPolicyUpdates: make(chan *chanPolicyUpdateRequest),
- prematureAnnouncements: make(map[uint32][]*networkMsg),
- prematureChannelUpdates: make(map[uint64][]*networkMsg),
- channelMtx: multimutex.NewMutex(),
- recentRejects: make(map[uint64]struct{}),
- syncMgr: newSyncManager(&SyncManagerCfg{
- ChainHash: cfg.ChainHash,
- ChanSeries: cfg.ChanSeries,
- RotateTicker: cfg.RotateTicker,
- HistoricalSyncTicker: cfg.HistoricalSyncTicker,
- NumActiveSyncers: cfg.NumActiveSyncers,
- IgnoreHistoricalFilters: cfg.IgnoreHistoricalFilters,
- }),
- }
-
- gossiper.reliableSender = newReliableSender(&reliableSenderCfg{
- NotifyWhenOnline: cfg.NotifyWhenOnline,
- NotifyWhenOffline: cfg.NotifyWhenOffline,
- MessageStore: cfg.MessageStore,
- IsMsgStale: gossiper.isMsgStale,
- })
-
- return gossiper
-}
-
-// EdgeWithInfo contains the information that is required to update an edge.
-type EdgeWithInfo struct {
- // Info describes the channel.
- Info *channeldb.ChannelEdgeInfo
-
- // Edge describes the policy in one direction of the channel.
- Edge *channeldb.ChannelEdgePolicy
-}
-
-// PropagateChanPolicyUpdate signals the AuthenticatedGossiper to perform the
-// specified edge updates. Updates are done in two stages: first, the
-// AuthenticatedGossiper ensures the update has been committed by dependent
-// sub-systems, then it signs and broadcasts new updates to the network. A
-// mapping between outpoints and updated channel policies is returned, which is
-// used to update the forwarding policies of the underlying links.
-func (d *AuthenticatedGossiper) PropagateChanPolicyUpdate(
- edgesToUpdate []EdgeWithInfo) er.R {
-
- errChan := make(chan er.R, 1)
- policyUpdate := &chanPolicyUpdateRequest{
- edgesToUpdate: edgesToUpdate,
- errChan: errChan,
- }
-
- select {
- case d.chanPolicyUpdates <- policyUpdate:
- err := <-errChan
- return err
- case <-d.quit:
- return er.Errorf("AuthenticatedGossiper shutting down")
- }
-}
-
-// Start spawns network messages handler goroutine and registers on new block
-// notifications in order to properly handle the premature announcements.
-func (d *AuthenticatedGossiper) Start() er.R {
- var err er.R
- d.started.Do(func() {
- err = d.start()
- })
- return err
-}
-
-func (d *AuthenticatedGossiper) start() er.R {
- log.Info("Authenticated Gossiper is starting")
-
- // First we register for new notifications of newly discovered blocks.
- // We do this immediately so we'll later be able to consume any/all
- // blocks which were discovered.
- blockEpochs, err := d.cfg.Notifier.RegisterBlockEpochNtfn(nil)
- if err != nil {
- return err
- }
- d.blockEpochs = blockEpochs
-
- height, err := d.cfg.Router.CurrentBlockHeight()
- if err != nil {
- return err
- }
- d.bestHeight = height
-
- // Start the reliable sender. In case we had any pending messages ready
- // to be sent when the gossiper was last shut down, we must continue on
- // our quest to deliver them to their respective peers.
- if err := d.reliableSender.Start(); err != nil {
- return err
- }
-
- d.syncMgr.Start()
-
- d.wg.Add(1)
- go d.networkHandler()
-
- return nil
-}
-
-// Stop signals any active goroutines for a graceful closure.
-func (d *AuthenticatedGossiper) Stop() {
- d.stopped.Do(d.stop)
-}
-
-func (d *AuthenticatedGossiper) stop() {
- log.Info("Authenticated Gossiper is stopping")
-
- d.blockEpochs.Cancel()
-
- d.syncMgr.Stop()
-
- close(d.quit)
- d.wg.Wait()
-
- // We'll stop our reliable sender after all of the gossiper's goroutines
- // have exited to ensure nothing can cause it to continue executing.
- d.reliableSender.Stop()
-}
-
-// TODO(roasbeef): need method to get current gossip timestamp?
-// * using mtx, check time rotate forward is needed?
-
-// ProcessRemoteAnnouncement sends a new remote announcement message along with
-// the peer that sent the routing message. The announcement will be processed
-// then added to a queue for batched trickled announcement to all connected
-// peers. Remote channel announcements should contain the announcement proof
-// and be fully validated.
-func (d *AuthenticatedGossiper) ProcessRemoteAnnouncement(msg lnwire.Message,
- peer lnpeer.Peer) chan er.R {
-
- errChan := make(chan er.R, 1)
-
- // For messages in the known set of channel series queries, we'll
- // dispatch the message directly to the GossipSyncer, and skip the main
- // processing loop.
- switch m := msg.(type) {
- case *lnwire.QueryShortChanIDs,
- *lnwire.QueryChannelRange,
- *lnwire.ReplyChannelRange,
- *lnwire.ReplyShortChanIDsEnd:
-
- syncer, ok := d.syncMgr.GossipSyncer(peer.PubKey())
- if !ok {
- log.Warnf("Gossip syncer for peer=%x not found",
- peer.PubKey())
-
- errChan <- ErrGossipSyncerNotFound.Default()
- return errChan
- }
-
- // If we've found the message target, then we'll dispatch the
- // message directly to it.
- syncer.ProcessQueryMsg(m, peer.QuitSignal())
-
- errChan <- nil
- return errChan
-
- // If a peer is updating its current update horizon, then we'll dispatch
- // that directly to the proper GossipSyncer.
- case *lnwire.GossipTimestampRange:
- syncer, ok := d.syncMgr.GossipSyncer(peer.PubKey())
- if !ok {
- log.Warnf("Gossip syncer for peer=%x not found",
- peer.PubKey())
-
- errChan <- ErrGossipSyncerNotFound.Default()
- return errChan
- }
-
- // If we've found the message target, then we'll dispatch the
- // message directly to it.
- if err := syncer.ApplyGossipFilter(m); err != nil {
- log.Warnf("Unable to apply gossip filter for peer=%x: "+
- "%v", peer.PubKey(), err)
-
- errChan <- err
- return errChan
- }
-
- errChan <- nil
- return errChan
- }
-
- nMsg := &networkMsg{
- msg: msg,
- isRemote: true,
- peer: peer,
- source: peer.IdentityKey(),
- err: errChan,
- }
-
- select {
- case d.networkMsgs <- nMsg:
-
- // If the peer that sent us this error is quitting, then we don't need
- // to send back an error and can return immediately.
- case <-peer.QuitSignal():
- return nil
- case <-d.quit:
- nMsg.err <- ErrGossiperShuttingDown.Default()
- }
-
- return nMsg.err
-}
-
-// ProcessLocalAnnouncement sends a new remote announcement message along with
-// the peer that sent the routing message. The announcement will be processed
-// then added to a queue for batched trickled announcement to all connected
-// peers. Local channel announcements don't contain the announcement proof and
-// will not be fully validated. Once the channel proofs are received, the
-// entire channel announcement and update messages will be re-constructed and
-// broadcast to the rest of the network.
-func (d *AuthenticatedGossiper) ProcessLocalAnnouncement(msg lnwire.Message,
- source *btcec.PublicKey, optionalFields ...OptionalMsgField) chan er.R {
-
- optionalMsgFields := &optionalMsgFields{}
- optionalMsgFields.apply(optionalFields...)
-
- nMsg := &networkMsg{
- msg: msg,
- optionalMsgFields: optionalMsgFields,
- isRemote: false,
- source: source,
- err: make(chan er.R, 1),
- }
-
- select {
- case d.networkMsgs <- nMsg:
- case <-d.quit:
- nMsg.err <- ErrGossiperShuttingDown.Default()
- }
-
- return nMsg.err
-}
-
-// channelUpdateID is a unique identifier for ChannelUpdate messages, as
-// channel updates can be identified by the (ShortChannelID, ChannelFlags)
-// tuple.
-type channelUpdateID struct {
- // channelID represents the set of data which is needed to
- // retrieve all necessary data to validate the channel existence.
- channelID lnwire.ShortChannelID
-
- // Flags least-significant bit must be set to 0 if the creating node
- // corresponds to the first node in the previously sent channel
- // announcement and 1 otherwise.
- flags lnwire.ChanUpdateChanFlags
-}
-
-// msgWithSenders is a wrapper struct around a message, and the set of peers
-// that originally sent us this message. Using this struct, we can ensure that
-// we don't re-send a message to the peer that sent it to us in the first
-// place.
-type msgWithSenders struct {
- // msg is the wire message itself.
- msg lnwire.Message
-
- // sender is the set of peers that sent us this message.
- senders map[route.Vertex]struct{}
-}
-
-// mergeSyncerMap is used to merge the set of senders of a particular message
-// with peers that we have an active GossipSyncer with. We do this to ensure
-// that we don't broadcast messages to any peers that we have active gossip
-// syncers for.
-func (m *msgWithSenders) mergeSyncerMap(syncers map[route.Vertex]*GossipSyncer) {
- for peerPub := range syncers {
- m.senders[peerPub] = struct{}{}
- }
-}
-
-// deDupedAnnouncements de-duplicates announcements that have been added to the
-// batch. Internally, announcements are stored in three maps
-// (one each for channel announcements, channel updates, and node
-// announcements). These maps keep track of unique announcements and ensure no
-// announcements are duplicated. We keep the three message types separate, such
-// that we can send channel announcements first, then channel updates, and
-// finally node announcements when it's time to broadcast them.
-type deDupedAnnouncements struct {
- // channelAnnouncements are identified by the short channel id field.
- channelAnnouncements map[lnwire.ShortChannelID]msgWithSenders
-
- // channelUpdates are identified by the channel update id field.
- channelUpdates map[channelUpdateID]msgWithSenders
-
- // nodeAnnouncements are identified by the Vertex field.
- nodeAnnouncements map[route.Vertex]msgWithSenders
-
- sync.Mutex
-}
-
-// Reset operates on deDupedAnnouncements to reset the storage of
-// announcements.
-func (d *deDupedAnnouncements) Reset() {
- d.Lock()
- defer d.Unlock()
-
- d.reset()
-}
-
-// reset is the private version of the Reset method. We have this so we can
-// call this method within method that are already holding the lock.
-func (d *deDupedAnnouncements) reset() {
- // Storage of each type of announcement (channel announcements, channel
- // updates, node announcements) is set to an empty map where the
- // appropriate key points to the corresponding lnwire.Message.
- d.channelAnnouncements = make(map[lnwire.ShortChannelID]msgWithSenders)
- d.channelUpdates = make(map[channelUpdateID]msgWithSenders)
- d.nodeAnnouncements = make(map[route.Vertex]msgWithSenders)
-}
-
-// addMsg adds a new message to the current batch. If the message is already
-// present in the current batch, then this new instance replaces the latter,
-// and the set of senders is updated to reflect which node sent us this
-// message.
-func (d *deDupedAnnouncements) addMsg(message networkMsg) {
- // Depending on the message type (channel announcement, channel update,
- // or node announcement), the message is added to the corresponding map
- // in deDupedAnnouncements. Because each identifying key can have at
- // most one value, the announcements are de-duplicated, with newer ones
- // replacing older ones.
- switch msg := message.msg.(type) {
-
- // Channel announcements are identified by the short channel id field.
- case *lnwire.ChannelAnnouncement:
- deDupKey := msg.ShortChannelID
- sender := route.NewVertex(message.source)
-
- mws, ok := d.channelAnnouncements[deDupKey]
- if !ok {
- mws = msgWithSenders{
- msg: msg,
- senders: make(map[route.Vertex]struct{}),
- }
- mws.senders[sender] = struct{}{}
-
- d.channelAnnouncements[deDupKey] = mws
-
- return
- }
-
- mws.msg = msg
- mws.senders[sender] = struct{}{}
- d.channelAnnouncements[deDupKey] = mws
-
- // Channel updates are identified by the (short channel id,
- // channelflags) tuple.
- case *lnwire.ChannelUpdate:
- sender := route.NewVertex(message.source)
- deDupKey := channelUpdateID{
- msg.ShortChannelID,
- msg.ChannelFlags,
- }
-
- oldTimestamp := uint32(0)
- mws, ok := d.channelUpdates[deDupKey]
- if ok {
- // If we already have seen this message, record its
- // timestamp.
- oldTimestamp = mws.msg.(*lnwire.ChannelUpdate).Timestamp
- }
-
- // If we already had this message with a strictly newer
- // timestamp, then we'll just discard the message we got.
- if oldTimestamp > msg.Timestamp {
- return
- }
-
- // If the message we just got is newer than what we previously
- // have seen, or this is the first time we see it, then we'll
- // add it to our map of announcements.
- if oldTimestamp < msg.Timestamp {
- mws = msgWithSenders{
- msg: msg,
- senders: make(map[route.Vertex]struct{}),
- }
-
- // We'll mark the sender of the message in the
- // senders map.
- mws.senders[sender] = struct{}{}
-
- d.channelUpdates[deDupKey] = mws
-
- return
- }
-
- // Lastly, if we had seen this exact message from before, with
- // the same timestamp, we'll add the sender to the map of
- // senders, such that we can skip sending this message back in
- // the next batch.
- mws.msg = msg
- mws.senders[sender] = struct{}{}
- d.channelUpdates[deDupKey] = mws
-
- // Node announcements are identified by the Vertex field. Use the
- // NodeID to create the corresponding Vertex.
- case *lnwire.NodeAnnouncement:
- sender := route.NewVertex(message.source)
- deDupKey := route.Vertex(msg.NodeID)
-
- // We do the same for node announcements as we did for channel
- // updates, as they also carry a timestamp.
- oldTimestamp := uint32(0)
- mws, ok := d.nodeAnnouncements[deDupKey]
- if ok {
- oldTimestamp = mws.msg.(*lnwire.NodeAnnouncement).Timestamp
- }
-
- // Discard the message if it's old.
- if oldTimestamp > msg.Timestamp {
- return
- }
-
- // Replace if it's newer.
- if oldTimestamp < msg.Timestamp {
- mws = msgWithSenders{
- msg: msg,
- senders: make(map[route.Vertex]struct{}),
- }
-
- mws.senders[sender] = struct{}{}
-
- d.nodeAnnouncements[deDupKey] = mws
-
- return
- }
-
- // Add to senders map if it's the same as we had.
- mws.msg = msg
- mws.senders[sender] = struct{}{}
- d.nodeAnnouncements[deDupKey] = mws
- }
-}
-
-// AddMsgs is a helper method to add multiple messages to the announcement
-// batch.
-func (d *deDupedAnnouncements) AddMsgs(msgs ...networkMsg) {
- d.Lock()
- defer d.Unlock()
-
- for _, msg := range msgs {
- d.addMsg(msg)
- }
-}
-
-// Emit returns the set of de-duplicated announcements to be sent out during
-// the next announcement epoch, in the order of channel announcements, channel
-// updates, and node announcements. Each message emitted, contains the set of
-// peers that sent us the message. This way, we can ensure that we don't waste
-// bandwidth by re-sending a message to the peer that sent it to us in the
-// first place. Additionally, the set of stored messages are reset.
-func (d *deDupedAnnouncements) Emit() []msgWithSenders {
- d.Lock()
- defer d.Unlock()
-
- // Get the total number of announcements.
- numAnnouncements := len(d.channelAnnouncements) + len(d.channelUpdates) +
- len(d.nodeAnnouncements)
-
- // Create an empty array of lnwire.Messages with a length equal to
- // the total number of announcements.
- msgs := make([]msgWithSenders, 0, numAnnouncements)
-
- // Add the channel announcements to the array first.
- for _, message := range d.channelAnnouncements {
- msgs = append(msgs, message)
- }
-
- // Then add the channel updates.
- for _, message := range d.channelUpdates {
- msgs = append(msgs, message)
- }
-
- // Finally add the node announcements.
- for _, message := range d.nodeAnnouncements {
- msgs = append(msgs, message)
- }
-
- d.reset()
-
- // Return the array of lnwire.messages.
- return msgs
-}
-
-// calculateSubBatchSize is a helper function that calculates the size to break
-// down the batchSize into.
-func calculateSubBatchSize(totalDelay, subBatchDelay time.Duration,
- minimumBatchSize, batchSize int) int {
- if subBatchDelay > totalDelay {
- return batchSize
- }
-
- subBatchSize := (int(batchSize)*int(subBatchDelay) + int(totalDelay) - 1) /
- int(totalDelay)
-
- if subBatchSize < minimumBatchSize {
- return minimumBatchSize
- }
-
- return subBatchSize
-}
-
-// splitAnnouncementBatches takes an exiting list of announcements and
-// decomposes it into sub batches controlled by the `subBatchSize`.
-func splitAnnouncementBatches(subBatchSize int,
- announcementBatch []msgWithSenders) [][]msgWithSenders {
- var splitAnnouncementBatch [][]msgWithSenders
-
- for subBatchSize < len(announcementBatch) {
- // For slicing with minimal allocation
- // https://github.com/golang/go/wiki/SliceTricks
- announcementBatch, splitAnnouncementBatch =
- announcementBatch[subBatchSize:],
- append(splitAnnouncementBatch,
- announcementBatch[0:subBatchSize:subBatchSize])
- }
- splitAnnouncementBatch = append(splitAnnouncementBatch, announcementBatch)
-
- return splitAnnouncementBatch
-}
-
-// sendBatch broadcasts a list of announcements to our peers.
-func (d *AuthenticatedGossiper) sendBatch(announcementBatch []msgWithSenders) {
- syncerPeers := d.syncMgr.GossipSyncers()
-
- // We'll first attempt to filter out this new message
- // for all peers that have active gossip syncers
- // active.
- for _, syncer := range syncerPeers {
- syncer.FilterGossipMsgs(announcementBatch...)
- }
-
- for _, msgChunk := range announcementBatch {
- // With the syncers taken care of, we'll merge
- // the sender map with the set of syncers, so
- // we don't send out duplicate messages.
- msgChunk.mergeSyncerMap(syncerPeers)
-
- err := d.cfg.Broadcast(
- msgChunk.senders, msgChunk.msg,
- )
- if err != nil {
- log.Errorf("Unable to send batch "+
- "announcements: %v", err)
- continue
- }
- }
-}
-
-// networkHandler is the primary goroutine that drives this service. The roles
-// of this goroutine includes answering queries related to the state of the
-// network, syncing up newly connected peers, and also periodically
-// broadcasting our latest topology state to all connected peers.
-//
-// NOTE: This MUST be run as a goroutine.
-func (d *AuthenticatedGossiper) networkHandler() {
- defer d.wg.Done()
-
- // Initialize empty deDupedAnnouncements to store announcement batch.
- announcements := deDupedAnnouncements{}
- announcements.Reset()
-
- d.cfg.RetransmitTicker.Resume()
- defer d.cfg.RetransmitTicker.Stop()
-
- trickleTimer := time.NewTicker(d.cfg.TrickleDelay)
- defer trickleTimer.Stop()
-
- // To start, we'll first check to see if there are any stale channel or
- // node announcements that we need to re-transmit.
- if err := d.retransmitStaleAnns(time.Now()); err != nil {
- log.Errorf("Unable to rebroadcast stale announcements: %v", err)
- }
-
- // We'll use this validation to ensure that we process jobs in their
- // dependency order during parallel validation.
- validationBarrier := routing.NewValidationBarrier(
- runtime.NumCPU()*4, d.quit,
- )
-
- for {
- select {
- // A new policy update has arrived. We'll commit it to the
- // sub-systems below us, then craft, sign, and broadcast a new
- // ChannelUpdate for the set of affected clients.
- case policyUpdate := <-d.chanPolicyUpdates:
- // First, we'll now create new fully signed updates for
- // the affected channels and also update the underlying
- // graph with the new state.
- newChanUpdates, err := d.processChanPolicyUpdate(
- policyUpdate.edgesToUpdate,
- )
- policyUpdate.errChan <- err
- if err != nil {
- log.Errorf("Unable to craft policy updates: %v",
- err)
- continue
- }
-
- // Finally, with the updates committed, we'll now add
- // them to the announcement batch to be flushed at the
- // start of the next epoch.
- announcements.AddMsgs(newChanUpdates...)
-
- case announcement := <-d.networkMsgs:
- // We should only broadcast this message forward if it
- // originated from us or it wasn't received as part of
- // our initial historical sync.
- shouldBroadcast := !announcement.isRemote ||
- d.syncMgr.IsGraphSynced()
-
- switch announcement.msg.(type) {
- // Channel announcement signatures are amongst the only
- // messages that we'll process serially.
- case *lnwire.AnnounceSignatures:
- emittedAnnouncements := d.processNetworkAnnouncement(
- announcement,
- )
- if emittedAnnouncements != nil {
- announcements.AddMsgs(
- emittedAnnouncements...,
- )
- }
- continue
- }
-
- // If this message was recently rejected, then we won't
- // attempt to re-process it.
- if d.isRecentlyRejectedMsg(announcement.msg) {
- announcement.err <- er.Errorf("recently " +
- "rejected")
- continue
- }
-
- // We'll set up any dependent, and wait until a free
- // slot for this job opens up, this allow us to not
- // have thousands of goroutines active.
- validationBarrier.InitJobDependencies(announcement.msg)
-
- d.wg.Add(1)
- go func() {
- defer d.wg.Done()
- defer validationBarrier.CompleteJob()
-
- // If this message has an existing dependency,
- // then we'll wait until that has been fully
- // validated before we proceed.
- err := validationBarrier.WaitForDependants(
- announcement.msg,
- )
- if err != nil {
- if !routing.ErrVBarrierShuttingDown.Is(err) {
- log.Warnf("unexpected error "+
- "during validation "+
- "barrier shutdown: %v",
- err)
- }
- announcement.err <- err
- return
- }
-
- // Process the network announcement to
- // determine if this is either a new
- // announcement from our PoV or an edges to a
- // prior vertex/edge we previously proceeded.
- emittedAnnouncements := d.processNetworkAnnouncement(
- announcement,
- )
-
- // If this message had any dependencies, then
- // we can now signal them to continue.
- validationBarrier.SignalDependants(
- announcement.msg,
- )
-
- // If the announcement was accepted, then add
- // the emitted announcements to our announce
- // batch to be broadcast once the trickle timer
- // ticks gain.
- if emittedAnnouncements != nil && shouldBroadcast {
- // TODO(roasbeef): exclude peer that
- // sent.
- announcements.AddMsgs(
- emittedAnnouncements...,
- )
- } else if emittedAnnouncements != nil {
- log.Trace("Skipping broadcast of " +
- "announcements received " +
- "during initial graph sync")
- }
-
- }()
-
- // A new block has arrived, so we can re-process the previously
- // premature announcements.
- case newBlock, ok := <-d.blockEpochs.Epochs:
- // If the channel has been closed, then this indicates
- // the daemon is shutting down, so we exit ourselves.
- if !ok {
- return
- }
-
- // Once a new block arrives, we update our running
- // track of the height of the chain tip.
- d.Lock()
- blockHeight := uint32(newBlock.Height)
- d.bestHeight = blockHeight
-
- log.Debugf("New block: height=%d, hash=%s", blockHeight,
- newBlock.Hash)
-
- // Next we check if we have any premature announcements
- // for this height, if so, then we process them once
- // more as normal announcements.
- premature := d.prematureAnnouncements[blockHeight]
- if len(premature) == 0 {
- d.Unlock()
- continue
- }
- delete(d.prematureAnnouncements, blockHeight)
- d.Unlock()
-
- log.Infof("Re-processing %v premature announcements "+
- "for height %v", len(premature), blockHeight)
-
- for _, ann := range premature {
- emittedAnnouncements := d.processNetworkAnnouncement(ann)
- if emittedAnnouncements != nil {
- announcements.AddMsgs(
- emittedAnnouncements...,
- )
- }
- }
-
- // The trickle timer has ticked, which indicates we should
- // flush to the network the pending batch of new announcements
- // we've received since the last trickle tick.
- case <-trickleTimer.C:
- // Emit the current batch of announcements from
- // deDupedAnnouncements.
- announcementBatch := announcements.Emit()
-
- // If the current announcements batch is nil, then we
- // have no further work here.
- if len(announcementBatch) == 0 {
- continue
- }
-
- // Next, If we have new things to announce then
- // broadcast them to all our immediately connected
- // peers.
- subBatchSize := calculateSubBatchSize(
- d.cfg.TrickleDelay, d.cfg.SubBatchDelay, d.cfg.MinimumBatchSize,
- len(announcementBatch),
- )
-
- splitAnnouncementBatch := splitAnnouncementBatches(
- subBatchSize, announcementBatch,
- )
-
- d.wg.Add(1)
- go func() {
- defer d.wg.Done()
- log.Infof("Broadcasting %v new announcements in %d sub batches",
- len(announcementBatch), len(splitAnnouncementBatch))
-
- for _, announcementBatch := range splitAnnouncementBatch {
- d.sendBatch(announcementBatch)
- select {
- case <-time.After(d.cfg.SubBatchDelay):
- case <-d.quit:
- return
- }
- }
- }()
-
- // The retransmission timer has ticked which indicates that we
- // should check if we need to prune or re-broadcast any of our
- // personal channels or node announcement. This addresses the
- // case of "zombie" channels and channel advertisements that
- // have been dropped, or not properly propagated through the
- // network.
- case tick := <-d.cfg.RetransmitTicker.Ticks():
- if err := d.retransmitStaleAnns(tick); err != nil {
- log.Errorf("unable to rebroadcast stale "+
- "announcements: %v", err)
- }
-
- // The gossiper has been signalled to exit, to we exit our
- // main loop so the wait group can be decremented.
- case <-d.quit:
- return
- }
- }
-}
-
-// TODO(roasbeef): d/c peers that send updates not on our chain
-
-// InitSyncState is called by outside sub-systems when a connection is
-// established to a new peer that understands how to perform channel range
-// queries. We'll allocate a new gossip syncer for it, and start any goroutines
-// needed to handle new queries.
-func (d *AuthenticatedGossiper) InitSyncState(syncPeer lnpeer.Peer) {
- d.syncMgr.InitSyncState(syncPeer)
-}
-
-// PruneSyncState is called by outside sub-systems once a peer that we were
-// previously connected to has been disconnected. In this case we can stop the
-// existing GossipSyncer assigned to the peer and free up resources.
-func (d *AuthenticatedGossiper) PruneSyncState(peer route.Vertex) {
- d.syncMgr.PruneSyncState(peer)
-}
-
-// isRecentlyRejectedMsg returns true if we recently rejected a message, and
-// false otherwise, This avoids expensive reprocessing of the message.
-func (d *AuthenticatedGossiper) isRecentlyRejectedMsg(msg lnwire.Message) bool {
- d.rejectMtx.RLock()
- defer d.rejectMtx.RUnlock()
-
- switch m := msg.(type) {
- case *lnwire.ChannelUpdate:
- _, ok := d.recentRejects[m.ShortChannelID.ToUint64()]
- return ok
-
- case *lnwire.ChannelAnnouncement:
- _, ok := d.recentRejects[m.ShortChannelID.ToUint64()]
- return ok
-
- default:
- return false
- }
-}
-
-// retransmitStaleAnns examines all outgoing channels that the source node is
-// known to maintain to check to see if any of them are "stale". A channel is
-// stale iff, the last timestamp of its rebroadcast is older than the
-// RebroadcastInterval. We also check if a refreshed node announcement should
-// be resent.
-func (d *AuthenticatedGossiper) retransmitStaleAnns(now time.Time) er.R {
- // Iterate over all of our channels and check if any of them fall
- // within the prune interval or re-broadcast interval.
- type updateTuple struct {
- info *channeldb.ChannelEdgeInfo
- edge *channeldb.ChannelEdgePolicy
- }
-
- var (
- havePublicChannels bool
- edgesToUpdate []updateTuple
- )
- err := d.cfg.Router.ForAllOutgoingChannels(func(
- info *channeldb.ChannelEdgeInfo,
- edge *channeldb.ChannelEdgePolicy) er.R {
-
- // If there's no auth proof attached to this edge, it means
- // that it is a private channel not meant to be announced to
- // the greater network, so avoid sending channel updates for
- // this channel to not leak its
- // existence.
- if info.AuthProof == nil {
- log.Debugf("Skipping retransmission of channel "+
- "without AuthProof: %v", info.ChannelID)
- return nil
- }
-
- // We make a note that we have at least one public channel. We
- // use this to determine whether we should send a node
- // announcement below.
- havePublicChannels = true
-
- // If this edge has a ChannelUpdate that was created before the
- // introduction of the MaxHTLC field, then we'll update this
- // edge to propagate this information in the network.
- if !edge.MessageFlags.HasMaxHtlc() {
- // We'll make sure we support the new max_htlc field if
- // not already present.
- edge.MessageFlags |= lnwire.ChanUpdateOptionMaxHtlc
- edge.MaxHTLC = lnwire.NewMSatFromSatoshis(info.Capacity)
-
- edgesToUpdate = append(edgesToUpdate, updateTuple{
- info: info,
- edge: edge,
- })
- return nil
- }
-
- timeElapsed := now.Sub(edge.LastUpdate)
-
- // If it's been longer than RebroadcastInterval since we've
- // re-broadcasted the channel, add the channel to the set of
- // edges we need to update.
- if timeElapsed >= d.cfg.RebroadcastInterval {
- edgesToUpdate = append(edgesToUpdate, updateTuple{
- info: info,
- edge: edge,
- })
- }
-
- return nil
- })
- if err != nil && !channeldb.ErrGraphNoEdgesFound.Is(err) {
- return er.Errorf("unable to retrieve outgoing channels: %v",
- err)
- }
-
- var signedUpdates []lnwire.Message
- for _, chanToUpdate := range edgesToUpdate {
- // Re-sign and update the channel on disk and retrieve our
- // ChannelUpdate to broadcast.
- chanAnn, chanUpdate, err := d.updateChannel(
- chanToUpdate.info, chanToUpdate.edge,
- )
- if err != nil {
- return er.Errorf("unable to update channel: %v", err)
- }
-
- // If we have a valid announcement to transmit, then we'll send
- // that along with the update.
- if chanAnn != nil {
- signedUpdates = append(signedUpdates, chanAnn)
- }
-
- signedUpdates = append(signedUpdates, chanUpdate)
- }
-
- // If we don't have any public channels, we return as we don't want to
- // broadcast anything that would reveal our existence.
- if !havePublicChannels {
- return nil
- }
-
- // We'll also check that our NodeAnnouncement is not too old.
- currentNodeAnn, err := d.cfg.SelfNodeAnnouncement(false)
- if err != nil {
- return er.Errorf("unable to get current node announment: %v",
- err)
- }
-
- timestamp := time.Unix(int64(currentNodeAnn.Timestamp), 0)
- timeElapsed := now.Sub(timestamp)
-
- // If it's been a full day since we've re-broadcasted the
- // node announcement, refresh it and resend it.
- nodeAnnStr := ""
- if timeElapsed >= d.cfg.RebroadcastInterval {
- newNodeAnn, err := d.cfg.SelfNodeAnnouncement(true)
- if err != nil {
- return er.Errorf("unable to get refreshed node "+
- "announcement: %v", err)
- }
-
- signedUpdates = append(signedUpdates, &newNodeAnn)
- nodeAnnStr = " and our refreshed node announcement"
-
- // Before broadcasting the refreshed node announcement, add it
- // to our own graph.
- if err := d.addNode(&newNodeAnn); err != nil {
- log.Errorf("Unable to add refreshed node announcement "+
- "to graph: %v", err)
- }
- }
-
- // If we don't have any updates to re-broadcast, then we'll exit
- // early.
- if len(signedUpdates) == 0 {
- return nil
- }
-
- log.Infof("Retransmitting %v outgoing channels%v",
- len(edgesToUpdate), nodeAnnStr)
-
- // With all the wire announcements properly crafted, we'll broadcast
- // our known outgoing channels to all our immediate peers.
- if err := d.cfg.Broadcast(nil, signedUpdates...); err != nil {
- return er.Errorf("unable to re-broadcast channels: %v", err)
- }
-
- return nil
-}
-
-// processChanPolicyUpdate generates a new set of channel updates for the
-// provided list of edges and updates the backing ChannelGraphSource.
-func (d *AuthenticatedGossiper) processChanPolicyUpdate(
- edgesToUpdate []EdgeWithInfo) ([]networkMsg, er.R) {
-
- var chanUpdates []networkMsg
- for _, edgeInfo := range edgesToUpdate {
- // Now that we've collected all the channels we need to update,
- // we'll re-sign and update the backing ChannelGraphSource, and
- // retrieve our ChannelUpdate to broadcast.
- _, chanUpdate, err := d.updateChannel(
- edgeInfo.Info, edgeInfo.Edge,
- )
- if err != nil {
- return nil, err
- }
-
- // We'll avoid broadcasting any updates for private channels to
- // avoid directly giving away their existence. Instead, we'll
- // send the update directly to the remote party.
- if edgeInfo.Info.AuthProof == nil {
- remotePubKey := remotePubFromChanInfo(
- edgeInfo.Info, chanUpdate.ChannelFlags,
- )
- err := d.reliableSender.sendMessage(
- chanUpdate, remotePubKey,
- )
- if err != nil {
- log.Errorf("Unable to reliably send %v for "+
- "channel=%v to peer=%x: %v",
- chanUpdate.MsgType(),
- chanUpdate.ShortChannelID,
- remotePubKey, err)
- }
- continue
- }
-
- // We set ourselves as the source of this message to indicate
- // that we shouldn't skip any peers when sending this message.
- chanUpdates = append(chanUpdates, networkMsg{
- source: d.selfKey,
- msg: chanUpdate,
- })
- }
-
- return chanUpdates, nil
-}
-
-// remotePubFromChanInfo returns the public key of the remote peer given a
-// ChannelEdgeInfo that describe a channel we have with them.
-func remotePubFromChanInfo(chanInfo *channeldb.ChannelEdgeInfo,
- chanFlags lnwire.ChanUpdateChanFlags) [33]byte {
-
- var remotePubKey [33]byte
- switch {
- case chanFlags&lnwire.ChanUpdateDirection == 0:
- remotePubKey = chanInfo.NodeKey2Bytes
- case chanFlags&lnwire.ChanUpdateDirection == 1:
- remotePubKey = chanInfo.NodeKey1Bytes
- }
-
- return remotePubKey
-}
-
-// processRejectedEdge examines a rejected edge to see if we can extract any
-// new announcements from it. An edge will get rejected if we already added
-// the same edge without AuthProof to the graph. If the received announcement
-// contains a proof, we can add this proof to our edge. We can end up in this
-// situation in the case where we create a channel, but for some reason fail
-// to receive the remote peer's proof, while the remote peer is able to fully
-// assemble the proof and craft the ChannelAnnouncement.
-func (d *AuthenticatedGossiper) processRejectedEdge(
- chanAnnMsg *lnwire.ChannelAnnouncement,
- proof *channeldb.ChannelAuthProof) ([]networkMsg, er.R) {
-
- // First, we'll fetch the state of the channel as we know if from the
- // database.
- chanInfo, e1, e2, err := d.cfg.Router.GetChannelByID(
- chanAnnMsg.ShortChannelID,
- )
- if err != nil {
- return nil, err
- }
-
- // The edge is in the graph, and has a proof attached, then we'll just
- // reject it as normal.
- if chanInfo.AuthProof != nil {
- return nil, nil
- }
-
- // Otherwise, this means that the edge is within the graph, but it
- // doesn't yet have a proper proof attached. If we did not receive
- // the proof such that we now can add it, there's nothing more we
- // can do.
- if proof == nil {
- return nil, nil
- }
-
- // We'll then create then validate the new fully assembled
- // announcement.
- chanAnn, e1Ann, e2Ann, err := netann.CreateChanAnnouncement(
- proof, chanInfo, e1, e2,
- )
- if err != nil {
- return nil, err
- }
- err = routing.ValidateChannelAnn(chanAnn)
- if err != nil {
- err := er.Errorf("assembled channel announcement proof "+
- "for shortChanID=%v isn't valid: %v",
- chanAnnMsg.ShortChannelID, err)
- log.Error(err)
- return nil, err
- }
-
- // If everything checks out, then we'll add the fully assembled proof
- // to the database.
- err = d.cfg.Router.AddProof(chanAnnMsg.ShortChannelID, proof)
- if err != nil {
- err := er.Errorf("unable add proof to shortChanID=%v: %v",
- chanAnnMsg.ShortChannelID, err)
- log.Error(err)
- return nil, err
- }
-
- // As we now have a complete channel announcement for this channel,
- // we'll construct the announcement so they can be broadcast out to all
- // our peers.
- announcements := make([]networkMsg, 0, 3)
- announcements = append(announcements, networkMsg{
- source: d.selfKey,
- msg: chanAnn,
- })
- if e1Ann != nil {
- announcements = append(announcements, networkMsg{
- source: d.selfKey,
- msg: e1Ann,
- })
- }
- if e2Ann != nil {
- announcements = append(announcements, networkMsg{
- source: d.selfKey,
- msg: e2Ann,
- })
-
- }
-
- return announcements, nil
-}
-
-// addNode processes the given node announcement, and adds it to our channel
-// graph.
-func (d *AuthenticatedGossiper) addNode(msg *lnwire.NodeAnnouncement) er.R {
- if err := routing.ValidateNodeAnn(msg); err != nil {
- return er.Errorf("unable to validate node announcement: %v",
- err)
- }
-
- timestamp := time.Unix(int64(msg.Timestamp), 0)
- features := lnwire.NewFeatureVector(msg.Features, lnwire.Features)
- node := &channeldb.LightningNode{
- HaveNodeAnnouncement: true,
- LastUpdate: timestamp,
- Addresses: msg.Addresses,
- PubKeyBytes: msg.NodeID,
- Alias: msg.Alias.String(),
- AuthSigBytes: msg.Signature.ToSignatureBytes(),
- Features: features,
- Color: msg.RGBColor,
- ExtraOpaqueData: msg.ExtraOpaqueData,
- }
-
- return d.cfg.Router.AddNode(node)
-}
-
-// processNetworkAnnouncement processes a new network relate authenticated
-// channel or node announcement or announcements proofs. If the announcement
-// didn't affect the internal state due to either being out of date, invalid,
-// or redundant, then nil is returned. Otherwise, the set of announcements will
-// be returned which should be broadcasted to the rest of the network.
-func (d *AuthenticatedGossiper) processNetworkAnnouncement(
- nMsg *networkMsg) []networkMsg {
-
- // isPremature *MUST* be called with the gossiper's lock held.
- isPremature := func(chanID lnwire.ShortChannelID, delta uint32) bool {
- // TODO(roasbeef) make height delta 6
- // * or configurable
- return chanID.BlockHeight+delta > d.bestHeight
- }
-
- var announcements []networkMsg
-
- switch msg := nMsg.msg.(type) {
-
- // A new node announcement has arrived which either presents new
- // information about a node in one of the channels we know about, or a
- // updating previously advertised information.
- case *lnwire.NodeAnnouncement:
- timestamp := time.Unix(int64(msg.Timestamp), 0)
-
- // We'll quickly ask the router if it already has a
- // newer update for this node so we can skip validating
- // signatures if not required.
- if d.cfg.Router.IsStaleNode(msg.NodeID, timestamp) {
- nMsg.err <- nil
- return nil
- }
-
- if err := d.addNode(msg); err != nil {
- if routing.IsError(er.Wrapped(err), routing.ErrOutdated,
- routing.ErrIgnored) {
-
- log.Debug(err)
- } else {
- log.Error(err)
- }
-
- nMsg.err <- err
- return nil
- }
-
- // In order to ensure we don't leak unadvertised nodes, we'll
- // make a quick check to ensure this node intends to publicly
- // advertise itself to the network.
- isPublic, err := d.cfg.Router.IsPublicNode(msg.NodeID)
- if err != nil {
- log.Errorf("Unable to determine if node %x is "+
- "advertised: %v", msg.NodeID, err)
- nMsg.err <- err
- return nil
- }
-
- // If it does, we'll add their announcement to our batch so that
- // it can be broadcast to the rest of our peers.
- if isPublic {
- announcements = append(announcements, networkMsg{
- peer: nMsg.peer,
- source: nMsg.source,
- msg: msg,
- })
- } else {
- log.Tracef("Skipping broadcasting node announcement "+
- "for %x due to being unadvertised", msg.NodeID)
- }
-
- nMsg.err <- nil
- // TODO(roasbeef): get rid of the above
- return announcements
-
- // A new channel announcement has arrived, this indicates the
- // *creation* of a new channel within the network. This only advertises
- // the existence of a channel and not yet the routing policies in
- // either direction of the channel.
- case *lnwire.ChannelAnnouncement:
- // We'll ignore any channel announcements that target any chain
- // other than the set of chains we know of.
- if !bytes.Equal(msg.ChainHash[:], d.cfg.ChainHash[:]) {
- err := er.Errorf("ignoring ChannelAnnouncement from "+
- "chain=%v, gossiper on chain=%v", msg.ChainHash,
- d.cfg.ChainHash)
- log.Errorf(err.String())
-
- d.rejectMtx.Lock()
- d.recentRejects[msg.ShortChannelID.ToUint64()] = struct{}{}
- d.rejectMtx.Unlock()
-
- nMsg.err <- err
- return nil
- }
-
- // If the advertised inclusionary block is beyond our knowledge
- // of the chain tip, then we'll put the announcement in limbo
- // to be fully verified once we advance forward in the chain.
- d.Lock()
- if nMsg.isRemote && isPremature(msg.ShortChannelID, 0) {
- blockHeight := msg.ShortChannelID.BlockHeight
- log.Infof("Announcement for chan_id=(%v), is "+
- "premature: advertises height %v, only "+
- "height %v is known",
- msg.ShortChannelID.ToUint64(),
- msg.ShortChannelID.BlockHeight,
- d.bestHeight)
-
- d.prematureAnnouncements[blockHeight] = append(
- d.prematureAnnouncements[blockHeight],
- nMsg,
- )
- d.Unlock()
- return nil
- }
- d.Unlock()
-
- // At this point, we'll now ask the router if this is a
- // zombie/known edge. If so we can skip all the processing
- // below.
- if d.cfg.Router.IsKnownEdge(msg.ShortChannelID) {
- nMsg.err <- nil
- return nil
- }
-
- // If this is a remote channel announcement, then we'll validate
- // all the signatures within the proof as it should be well
- // formed.
- var proof *channeldb.ChannelAuthProof
- if nMsg.isRemote {
- if err := routing.ValidateChannelAnn(msg); err != nil {
- err := er.Errorf("unable to validate "+
- "announcement: %v", err)
- d.rejectMtx.Lock()
- d.recentRejects[msg.ShortChannelID.ToUint64()] = struct{}{}
- d.rejectMtx.Unlock()
-
- log.Error(err)
- nMsg.err <- err
- return nil
- }
-
- // If the proof checks out, then we'll save the proof
- // itself to the database so we can fetch it later when
- // gossiping with other nodes.
- proof = &channeldb.ChannelAuthProof{
- NodeSig1Bytes: msg.NodeSig1.ToSignatureBytes(),
- NodeSig2Bytes: msg.NodeSig2.ToSignatureBytes(),
- BitcoinSig1Bytes: msg.BitcoinSig1.ToSignatureBytes(),
- BitcoinSig2Bytes: msg.BitcoinSig2.ToSignatureBytes(),
- }
- }
-
- // With the proof validate (if necessary), we can now store it
- // within the database for our path finding and syncing needs.
- var featureBuf bytes.Buffer
- if err := msg.Features.Encode(&featureBuf); err != nil {
- log.Errorf("unable to encode features: %v", err)
- nMsg.err <- err
- return nil
- }
-
- edge := &channeldb.ChannelEdgeInfo{
- ChannelID: msg.ShortChannelID.ToUint64(),
- ChainHash: msg.ChainHash,
- NodeKey1Bytes: msg.NodeID1,
- NodeKey2Bytes: msg.NodeID2,
- BitcoinKey1Bytes: msg.BitcoinKey1,
- BitcoinKey2Bytes: msg.BitcoinKey2,
- AuthProof: proof,
- Features: featureBuf.Bytes(),
- ExtraOpaqueData: msg.ExtraOpaqueData,
- }
-
- // If there were any optional message fields provided, we'll
- // include them in its serialized disk representation now.
- if nMsg.optionalMsgFields != nil {
- if nMsg.optionalMsgFields.capacity != nil {
- edge.Capacity = *nMsg.optionalMsgFields.capacity
- }
- if nMsg.optionalMsgFields.channelPoint != nil {
- edge.ChannelPoint = *nMsg.optionalMsgFields.channelPoint
- }
- }
-
- // We will add the edge to the channel router. If the nodes
- // present in this channel are not present in the database, a
- // partial node will be added to represent each node while we
- // wait for a node announcement.
- //
- // Before we add the edge to the database, we obtain
- // the mutex for this channel ID. We do this to ensure
- // no other goroutine has read the database and is now
- // making decisions based on this DB state, before it
- // writes to the DB.
- d.channelMtx.Lock(msg.ShortChannelID.ToUint64())
- defer d.channelMtx.Unlock(msg.ShortChannelID.ToUint64())
- if err := d.cfg.Router.AddEdge(edge); err != nil {
- // If the edge was rejected due to already being known,
- // then it may be that case that this new message has a
- // fresh channel proof, so we'll check.
- if routing.IsError(er.Wrapped(err), routing.ErrOutdated,
- routing.ErrIgnored) {
-
- // Attempt to process the rejected message to
- // see if we get any new announcements.
- anns, rErr := d.processRejectedEdge(msg, proof)
- if rErr != nil {
- d.rejectMtx.Lock()
- d.recentRejects[msg.ShortChannelID.ToUint64()] = struct{}{}
- d.rejectMtx.Unlock()
- nMsg.err <- rErr
- return nil
- }
-
- // If while processing this rejected edge, we
- // realized there's a set of announcements we
- // could extract, then we'll return those
- // directly.
- if len(anns) != 0 {
- nMsg.err <- nil
- return anns
- }
-
- // Otherwise, this is just a regular rejected
- // edge.
- log.Debugf("Router rejected channel "+
- "edge: %v", err)
- } else {
- log.Tracef("Router rejected channel "+
- "edge: %v", err)
- }
-
- nMsg.err <- err
- return nil
- }
-
- // If we earlier received any ChannelUpdates for this channel,
- // we can now process them, as the channel is added to the
- // graph.
- shortChanID := msg.ShortChannelID.ToUint64()
- var channelUpdates []*networkMsg
-
- d.pChanUpdMtx.Lock()
- channelUpdates = append(channelUpdates, d.prematureChannelUpdates[shortChanID]...)
-
- // Now delete the premature ChannelUpdates, since we added them
- // all to the queue of network messages.
- delete(d.prematureChannelUpdates, shortChanID)
- d.pChanUpdMtx.Unlock()
-
- // Launch a new goroutine to handle each ChannelUpdate, this to
- // ensure we don't block here, as we can handle only one
- // announcement at a time.
- for _, cu := range channelUpdates {
- d.wg.Add(1)
- go func(nMsg *networkMsg) {
- defer d.wg.Done()
-
- switch msg := nMsg.msg.(type) {
-
- // Reprocess the message, making sure we return
- // an error to the original caller in case the
- // gossiper shuts down.
- case *lnwire.ChannelUpdate:
- log.Debugf("Reprocessing"+
- " ChannelUpdate for "+
- "shortChanID=%v",
- msg.ShortChannelID.ToUint64())
-
- select {
- case d.networkMsgs <- nMsg:
- case <-d.quit:
- nMsg.err <- ErrGossiperShuttingDown.Default()
- }
-
- // We don't expect any other message type than
- // ChannelUpdate to be in this map.
- default:
- log.Errorf("Unsupported message type "+
- "found among ChannelUpdates: "+
- "%T", msg)
- }
- }(cu)
- }
-
- // Channel announcement was successfully proceeded and know it
- // might be broadcast to other connected nodes if it was
- // announcement with proof (remote).
- if proof != nil {
- announcements = append(announcements, networkMsg{
- peer: nMsg.peer,
- source: nMsg.source,
- msg: msg,
- })
- }
-
- nMsg.err <- nil
- return announcements
-
- // A new authenticated channel edge update has arrived. This indicates
- // that the directional information for an already known channel has
- // been updated.
- case *lnwire.ChannelUpdate:
- // We'll ignore any channel announcements that target any chain
- // other than the set of chains we know of.
- if !bytes.Equal(msg.ChainHash[:], d.cfg.ChainHash[:]) {
- err := er.Errorf("ignoring ChannelUpdate from "+
- "chain=%v, gossiper on chain=%v", msg.ChainHash,
- d.cfg.ChainHash)
- log.Errorf(err.String())
-
- d.rejectMtx.Lock()
- d.recentRejects[msg.ShortChannelID.ToUint64()] = struct{}{}
- d.rejectMtx.Unlock()
-
- nMsg.err <- err
- return nil
- }
-
- blockHeight := msg.ShortChannelID.BlockHeight
- shortChanID := msg.ShortChannelID.ToUint64()
-
- // If the advertised inclusionary block is beyond our knowledge
- // of the chain tip, then we'll put the announcement in limbo
- // to be fully verified once we advance forward in the chain.
- d.Lock()
- if nMsg.isRemote && isPremature(msg.ShortChannelID, 0) {
- log.Infof("Update announcement for "+
- "short_chan_id(%v), is premature: advertises "+
- "height %v, only height %v is known",
- shortChanID, blockHeight,
- d.bestHeight)
-
- d.prematureAnnouncements[blockHeight] = append(
- d.prematureAnnouncements[blockHeight],
- nMsg,
- )
- d.Unlock()
- return nil
- }
- d.Unlock()
-
- // Before we perform any of the expensive checks below, we'll
- // check whether this update is stale or is for a zombie
- // channel in order to quickly reject it.
- timestamp := time.Unix(int64(msg.Timestamp), 0)
- if d.cfg.Router.IsStaleEdgePolicy(
- msg.ShortChannelID, timestamp, msg.ChannelFlags,
- ) {
- nMsg.err <- nil
- return nil
- }
-
- // Get the node pub key as far as we don't have it in channel
- // update announcement message. We'll need this to properly
- // verify message signature.
- //
- // We make sure to obtain the mutex for this channel ID
- // before we access the database. This ensures the state
- // we read from the database has not changed between this
- // point and when we call UpdateEdge() later.
- d.channelMtx.Lock(msg.ShortChannelID.ToUint64())
- defer d.channelMtx.Unlock(msg.ShortChannelID.ToUint64())
- chanInfo, _, _, err := d.cfg.Router.GetChannelByID(msg.ShortChannelID)
- switch {
- // No error, break.
- case err == nil:
- break
-
- case channeldb.ErrZombieEdge.Is(err):
- // Since we've deemed the update as not stale above,
- // before marking it live, we'll make sure it has been
- // signed by the correct party. The least-significant
- // bit in the flag on the channel update tells us which
- // edge is being updated.
- var pubKey *btcec.PublicKey
- switch {
- case msg.ChannelFlags&lnwire.ChanUpdateDirection == 0:
- pubKey, _ = chanInfo.NodeKey1()
- case msg.ChannelFlags&lnwire.ChanUpdateDirection == 1:
- pubKey, _ = chanInfo.NodeKey2()
- }
-
- err := routing.VerifyChannelUpdateSignature(msg, pubKey)
- if err != nil {
- err := er.Errorf("unable to verify channel "+
- "update signature: %v", err)
- log.Error(err)
- nMsg.err <- err
- return nil
- }
-
- // With the signature valid, we'll proceed to mark the
- // edge as live and wait for the channel announcement to
- // come through again.
- err = d.cfg.Router.MarkEdgeLive(msg.ShortChannelID)
- if err != nil {
- err := er.Errorf("unable to remove edge with "+
- "chan_id=%v from zombie index: %v",
- msg.ShortChannelID, err)
- log.Error(err)
- nMsg.err <- err
- return nil
- }
-
- log.Debugf("Removed edge with chan_id=%v from zombie "+
- "index", msg.ShortChannelID)
-
- // We'll fallthrough to ensure we stash the update until
- // we receive its corresponding ChannelAnnouncement.
- // This is needed to ensure the edge exists in the graph
- // before applying the update.
- fallthrough
- case channeldb.ErrGraphNotFound.Is(err):
- fallthrough
- case channeldb.ErrGraphNoEdgesFound.Is(err):
- fallthrough
- case channeldb.ErrEdgeNotFound.Is(err):
- // If the edge corresponding to this ChannelUpdate was
- // not found in the graph, this might be a channel in
- // the process of being opened, and we haven't processed
- // our own ChannelAnnouncement yet, hence it is not
- // found in the graph. This usually gets resolved after
- // the channel proofs are exchanged and the channel is
- // broadcasted to the rest of the network, but in case
- // this is a private channel this won't ever happen.
- // This can also happen in the case of a zombie channel
- // with a fresh update for which we don't have a
- // ChannelAnnouncement for since we reject them. Because
- // of this, we temporarily add it to a map, and
- // reprocess it after our own ChannelAnnouncement has
- // been processed.
- d.pChanUpdMtx.Lock()
- d.prematureChannelUpdates[shortChanID] = append(
- d.prematureChannelUpdates[shortChanID], nMsg,
- )
- d.pChanUpdMtx.Unlock()
-
- log.Debugf("Got ChannelUpdate for edge not found in "+
- "graph(shortChanID=%v), saving for "+
- "reprocessing later", shortChanID)
-
- // NOTE: We don't return anything on the error channel
- // for this message, as we expect that will be done when
- // this ChannelUpdate is later reprocessed.
- return nil
-
- default:
- err := er.Errorf("unable to validate channel update "+
- "short_chan_id=%v: %v", shortChanID, err)
- log.Error(err)
- nMsg.err <- err
-
- d.rejectMtx.Lock()
- d.recentRejects[msg.ShortChannelID.ToUint64()] = struct{}{}
- d.rejectMtx.Unlock()
- return nil
- }
-
- // The least-significant bit in the flag on the channel update
- // announcement tells us "which" side of the channels directed
- // edge is being updated.
- var pubKey *btcec.PublicKey
- switch {
- case msg.ChannelFlags&lnwire.ChanUpdateDirection == 0:
- pubKey, _ = chanInfo.NodeKey1()
- case msg.ChannelFlags&lnwire.ChanUpdateDirection == 1:
- pubKey, _ = chanInfo.NodeKey2()
- }
-
- // Validate the channel announcement with the expected public key and
- // channel capacity. In the case of an invalid channel update, we'll
- // return an error to the caller and exit early.
- err = routing.ValidateChannelUpdateAnn(pubKey, chanInfo.Capacity, msg)
- if err != nil {
- rErr := er.Errorf("unable to validate channel "+
- "update announcement for short_chan_id=%v: %v",
- spew.Sdump(msg.ShortChannelID), err)
-
- log.Error(rErr)
- nMsg.err <- rErr
- return nil
- }
-
- update := &channeldb.ChannelEdgePolicy{
- SigBytes: msg.Signature.ToSignatureBytes(),
- ChannelID: shortChanID,
- LastUpdate: timestamp,
- MessageFlags: msg.MessageFlags,
- ChannelFlags: msg.ChannelFlags,
- TimeLockDelta: msg.TimeLockDelta,
- MinHTLC: msg.HtlcMinimumMsat,
- MaxHTLC: msg.HtlcMaximumMsat,
- FeeBaseMSat: lnwire.MilliSatoshi(msg.BaseFee),
- FeeProportionalMillionths: lnwire.MilliSatoshi(msg.FeeRate),
- ExtraOpaqueData: msg.ExtraOpaqueData,
- }
-
- if err := d.cfg.Router.UpdateEdge(update); err != nil {
- if routing.IsError(er.Wrapped(err), routing.ErrOutdated,
- routing.ErrIgnored) {
- log.Debug(err)
- } else {
- d.rejectMtx.Lock()
- d.recentRejects[msg.ShortChannelID.ToUint64()] = struct{}{}
- d.rejectMtx.Unlock()
- log.Error(err)
- }
-
- nMsg.err <- err
- return nil
- }
-
- // If this is a local ChannelUpdate without an AuthProof, it
- // means it is an update to a channel that is not (yet)
- // supposed to be announced to the greater network. However,
- // our channel counter party will need to be given the update,
- // so we'll try sending the update directly to the remote peer.
- if !nMsg.isRemote && chanInfo.AuthProof == nil {
- // Get our peer's public key.
- remotePubKey := remotePubFromChanInfo(
- chanInfo, msg.ChannelFlags,
- )
-
- // Now, we'll attempt to send the channel update message
- // reliably to the remote peer in the background, so
- // that we don't block if the peer happens to be offline
- // at the moment.
- err := d.reliableSender.sendMessage(msg, remotePubKey)
- if err != nil {
- err := er.Errorf("unable to reliably send %v "+
- "for channel=%v to peer=%x: %v",
- msg.MsgType(), msg.ShortChannelID,
- remotePubKey, err)
- nMsg.err <- err
- return nil
- }
- }
-
- // Channel update announcement was successfully processed and
- // now it can be broadcast to the rest of the network. However,
- // we'll only broadcast the channel update announcement if it
- // has an attached authentication proof.
- if chanInfo.AuthProof != nil {
- announcements = append(announcements, networkMsg{
- peer: nMsg.peer,
- source: nMsg.source,
- msg: msg,
- })
- }
-
- nMsg.err <- nil
- return announcements
-
- // A new signature announcement has been received. This indicates
- // willingness of nodes involved in the funding of a channel to
- // announce this new channel to the rest of the world.
- case *lnwire.AnnounceSignatures:
- needBlockHeight := msg.ShortChannelID.BlockHeight +
- d.cfg.ProofMatureDelta
- shortChanID := msg.ShortChannelID.ToUint64()
-
- prefix := "local"
- if nMsg.isRemote {
- prefix = "remote"
- }
-
- log.Infof("Received new %v channel announcement for %v", prefix,
- msg.ShortChannelID)
-
- // By the specification, channel announcement proofs should be
- // sent after some number of confirmations after channel was
- // registered in bitcoin blockchain. Therefore, we check if the
- // proof is premature. If so we'll halt processing until the
- // expected announcement height. This allows us to be tolerant
- // to other clients if this constraint was changed.
- d.Lock()
- if isPremature(msg.ShortChannelID, d.cfg.ProofMatureDelta) {
- d.prematureAnnouncements[needBlockHeight] = append(
- d.prematureAnnouncements[needBlockHeight],
- nMsg,
- )
- log.Infof("Premature proof announcement, "+
- "current block height lower than needed: %v <"+
- " %v, add announcement to reprocessing batch",
- d.bestHeight, needBlockHeight)
- d.Unlock()
- return nil
- }
- d.Unlock()
-
- // Ensure that we know of a channel with the target channel ID
- // before proceeding further.
- //
- // We must acquire the mutex for this channel ID before getting
- // the channel from the database, to ensure what we read does
- // not change before we call AddProof() later.
- d.channelMtx.Lock(msg.ShortChannelID.ToUint64())
- defer d.channelMtx.Unlock(msg.ShortChannelID.ToUint64())
-
- chanInfo, e1, e2, err := d.cfg.Router.GetChannelByID(
- msg.ShortChannelID)
- if err != nil {
- // TODO(andrew.shvv) this is dangerous because remote
- // node might rewrite the waiting proof.
- proof := channeldb.NewWaitingProof(nMsg.isRemote, msg)
- err := d.cfg.WaitingProofStore.Add(proof)
- if err != nil {
- err := er.Errorf("unable to store "+
- "the proof for short_chan_id=%v: %v",
- shortChanID, err)
- log.Error(err)
- nMsg.err <- err
- return nil
- }
-
- log.Infof("Orphan %v proof announcement with "+
- "short_chan_id=%v, adding "+
- "to waiting batch", prefix, shortChanID)
- nMsg.err <- nil
- return nil
- }
-
- nodeID := nMsg.source.SerializeCompressed()
- isFirstNode := bytes.Equal(nodeID, chanInfo.NodeKey1Bytes[:])
- isSecondNode := bytes.Equal(nodeID, chanInfo.NodeKey2Bytes[:])
-
- // Ensure that channel that was retrieved belongs to the peer
- // which sent the proof announcement.
- if !(isFirstNode || isSecondNode) {
- err := er.Errorf("channel that was received not "+
- "belongs to the peer which sent the proof, "+
- "short_chan_id=%v", shortChanID)
- log.Error(err)
- nMsg.err <- err
- return nil
- }
-
- // If proof was sent by a local sub-system, then we'll
- // send the announcement signature to the remote node
- // so they can also reconstruct the full channel
- // announcement.
- if !nMsg.isRemote {
- var remotePubKey [33]byte
- if isFirstNode {
- remotePubKey = chanInfo.NodeKey2Bytes
- } else {
- remotePubKey = chanInfo.NodeKey1Bytes
- }
- // Since the remote peer might not be online
- // we'll call a method that will attempt to
- // deliver the proof when it comes online.
- err := d.reliableSender.sendMessage(msg, remotePubKey)
- if err != nil {
- err := er.Errorf("unable to reliably send %v "+
- "for channel=%v to peer=%x: %v",
- msg.MsgType(), msg.ShortChannelID,
- remotePubKey, err)
- nMsg.err <- err
- return nil
- }
- }
-
- // Check if we already have the full proof for this channel.
- if chanInfo.AuthProof != nil {
- // If we already have the fully assembled proof, then
- // the peer sending us their proof has probably not
- // received our local proof yet. So be kind and send
- // them the full proof.
- if nMsg.isRemote {
- peerID := nMsg.source.SerializeCompressed()
- log.Debugf("Got AnnounceSignatures for " +
- "channel with full proof.")
-
- d.wg.Add(1)
- go func() {
- defer d.wg.Done()
- log.Debugf("Received half proof for "+
- "channel %v with existing "+
- "full proof. Sending full "+
- "proof to peer=%x",
- msg.ChannelID,
- peerID)
-
- chanAnn, _, _, err := netann.CreateChanAnnouncement(
- chanInfo.AuthProof, chanInfo,
- e1, e2,
- )
- if err != nil {
- log.Errorf("unable to gen "+
- "ann: %v", err)
- return
- }
- err = nMsg.peer.SendMessage(
- false, chanAnn,
- )
- if err != nil {
- log.Errorf("Failed sending "+
- "full proof to "+
- "peer=%x: %v",
- peerID, err)
- return
- }
- log.Debugf("Full proof sent to peer=%x"+
- " for chanID=%v", peerID,
- msg.ChannelID)
- }()
- }
-
- log.Debugf("Already have proof for channel "+
- "with chanID=%v", msg.ChannelID)
- nMsg.err <- nil
- return nil
- }
-
- // Check that we received the opposite proof. If so, then we're
- // now able to construct the full proof, and create the channel
- // announcement. If we didn't receive the opposite half of the
- // proof than we should store it this one, and wait for
- // opposite to be received.
- proof := channeldb.NewWaitingProof(nMsg.isRemote, msg)
- oppositeProof, err := d.cfg.WaitingProofStore.Get(
- proof.OppositeKey(),
- )
- if err != nil && !channeldb.ErrWaitingProofNotFound.Is(err) {
- err := er.Errorf("unable to get "+
- "the opposite proof for short_chan_id=%v: %v",
- shortChanID, err)
- log.Error(err)
- nMsg.err <- err
- return nil
- }
-
- if channeldb.ErrWaitingProofNotFound.Is(err) {
- err := d.cfg.WaitingProofStore.Add(proof)
- if err != nil {
- err := er.Errorf("unable to store "+
- "the proof for short_chan_id=%v: %v",
- shortChanID, err)
- log.Error(err)
- nMsg.err <- err
- return nil
- }
-
- log.Infof("1/2 of channel ann proof received for "+
- "short_chan_id=%v, waiting for other half",
- shortChanID)
-
- nMsg.err <- nil
- return nil
- }
-
- // We now have both halves of the channel announcement proof,
- // then we'll reconstruct the initial announcement so we can
- // validate it shortly below.
- var dbProof channeldb.ChannelAuthProof
- if isFirstNode {
- dbProof.NodeSig1Bytes = msg.NodeSignature.ToSignatureBytes()
- dbProof.NodeSig2Bytes = oppositeProof.NodeSignature.ToSignatureBytes()
- dbProof.BitcoinSig1Bytes = msg.BitcoinSignature.ToSignatureBytes()
- dbProof.BitcoinSig2Bytes = oppositeProof.BitcoinSignature.ToSignatureBytes()
- } else {
- dbProof.NodeSig1Bytes = oppositeProof.NodeSignature.ToSignatureBytes()
- dbProof.NodeSig2Bytes = msg.NodeSignature.ToSignatureBytes()
- dbProof.BitcoinSig1Bytes = oppositeProof.BitcoinSignature.ToSignatureBytes()
- dbProof.BitcoinSig2Bytes = msg.BitcoinSignature.ToSignatureBytes()
- }
- chanAnn, e1Ann, e2Ann, err := netann.CreateChanAnnouncement(
- &dbProof, chanInfo, e1, e2,
- )
- if err != nil {
- log.Error(err)
- nMsg.err <- err
- return nil
- }
-
- // With all the necessary components assembled validate the
- // full channel announcement proof.
- if err := routing.ValidateChannelAnn(chanAnn); err != nil {
- err := er.Errorf("channel announcement proof "+
- "for short_chan_id=%v isn't valid: %v",
- shortChanID, err)
-
- log.Error(err)
- nMsg.err <- err
- return nil
- }
-
- // If the channel was returned by the router it means that
- // existence of funding point and inclusion of nodes bitcoin
- // keys in it already checked by the router. In this stage we
- // should check that node keys are attest to the bitcoin keys
- // by validating the signatures of announcement. If proof is
- // valid then we'll populate the channel edge with it, so we
- // can announce it on peer connect.
- err = d.cfg.Router.AddProof(msg.ShortChannelID, &dbProof)
- if err != nil {
- err := er.Errorf("unable add proof to the "+
- "channel chanID=%v: %v", msg.ChannelID, err)
- log.Error(err)
- nMsg.err <- err
- return nil
- }
-
- err = d.cfg.WaitingProofStore.Remove(proof.OppositeKey())
- if err != nil {
- err := er.Errorf("unable remove opposite proof "+
- "for the channel with chanID=%v: %v",
- msg.ChannelID, err)
- log.Error(err)
- nMsg.err <- err
- return nil
- }
-
- // Proof was successfully created and now can announce the
- // channel to the remain network.
- log.Infof("Fully valid channel proof for short_chan_id=%v "+
- "constructed, adding to next ann batch",
- shortChanID)
-
- // Assemble the necessary announcements to add to the next
- // broadcasting batch.
- announcements = append(announcements, networkMsg{
- peer: nMsg.peer,
- source: nMsg.source,
- msg: chanAnn,
- })
- if e1Ann != nil {
- announcements = append(announcements, networkMsg{
- peer: nMsg.peer,
- source: nMsg.source,
- msg: e1Ann,
- })
- }
- if e2Ann != nil {
- announcements = append(announcements, networkMsg{
- peer: nMsg.peer,
- source: nMsg.source,
- msg: e2Ann,
- })
- }
-
- // We'll also send along the node announcements for each channel
- // participant if we know of them. To ensure our node
- // announcement propagates to our channel counterparty, we'll
- // set the source for each announcement to the node it belongs
- // to, otherwise we won't send it since the source gets skipped.
- // This isn't necessary for channel updates and announcement
- // signatures since we send those directly to our channel
- // counterparty through the gossiper's reliable sender.
- node1Ann, err := d.fetchNodeAnn(chanInfo.NodeKey1Bytes)
- if err != nil {
- log.Debugf("Unable to fetch node announcement for "+
- "%x: %v", chanInfo.NodeKey1Bytes, err)
- } else {
- if nodeKey1, err := chanInfo.NodeKey1(); err == nil {
- announcements = append(announcements, networkMsg{
- peer: nMsg.peer,
- source: nodeKey1,
- msg: node1Ann,
- })
- }
- }
- node2Ann, err := d.fetchNodeAnn(chanInfo.NodeKey2Bytes)
- if err != nil {
- log.Debugf("Unable to fetch node announcement for "+
- "%x: %v", chanInfo.NodeKey2Bytes, err)
- } else {
- if nodeKey2, err := chanInfo.NodeKey2(); err == nil {
- announcements = append(announcements, networkMsg{
- peer: nMsg.peer,
- source: nodeKey2,
- msg: node2Ann,
- })
- }
- }
-
- nMsg.err <- nil
- return announcements
-
- default:
- nMsg.err <- er.New("wrong type of the announcement")
- return nil
- }
-}
-
-// fetchNodeAnn fetches the latest signed node announcement from our point of
-// view for the node with the given public key.
-func (d *AuthenticatedGossiper) fetchNodeAnn(
- pubKey [33]byte) (*lnwire.NodeAnnouncement, er.R) {
-
- node, err := d.cfg.Router.FetchLightningNode(pubKey)
- if err != nil {
- return nil, err
- }
-
- return node.NodeAnnouncement(true)
-}
-
-// isMsgStale determines whether a message retrieved from the backing
-// MessageStore is seen as stale by the current graph.
-func (d *AuthenticatedGossiper) isMsgStale(msg lnwire.Message) bool {
- switch msg := msg.(type) {
- case *lnwire.AnnounceSignatures:
- chanInfo, _, _, err := d.cfg.Router.GetChannelByID(
- msg.ShortChannelID,
- )
-
- // If the channel cannot be found, it is most likely a leftover
- // message for a channel that was closed, so we can consider it
- // stale.
- if channeldb.ErrEdgeNotFound.Is(err) {
- return true
- }
- if err != nil {
- log.Debugf("Unable to retrieve channel=%v from graph: "+
- "%v", err)
- return false
- }
-
- // If the proof exists in the graph, then we have successfully
- // received the remote proof and assembled the full proof, so we
- // can safely delete the local proof from the database.
- return chanInfo.AuthProof != nil
-
- case *lnwire.ChannelUpdate:
- _, p1, p2, err := d.cfg.Router.GetChannelByID(msg.ShortChannelID)
-
- // If the channel cannot be found, it is most likely a leftover
- // message for a channel that was closed, so we can consider it
- // stale.
- if channeldb.ErrEdgeNotFound.Is(err) {
- return true
- }
- if err != nil {
- log.Debugf("Unable to retrieve channel=%v from graph: "+
- "%v", msg.ShortChannelID, err)
- return false
- }
-
- // Otherwise, we'll retrieve the correct policy that we
- // currently have stored within our graph to check if this
- // message is stale by comparing its timestamp.
- var p *channeldb.ChannelEdgePolicy
- if msg.ChannelFlags&lnwire.ChanUpdateDirection == 0 {
- p = p1
- } else {
- p = p2
- }
-
- // If the policy is still unknown, then we can consider this
- // policy fresh.
- if p == nil {
- return false
- }
-
- timestamp := time.Unix(int64(msg.Timestamp), 0)
- return p.LastUpdate.After(timestamp)
-
- default:
- // We'll make sure to not mark any unsupported messages as stale
- // to ensure they are not removed.
- return false
- }
-}
-
-// updateChannel creates a new fully signed update for the channel, and updates
-// the underlying graph with the new state.
-func (d *AuthenticatedGossiper) updateChannel(info *channeldb.ChannelEdgeInfo,
- edge *channeldb.ChannelEdgePolicy) (*lnwire.ChannelAnnouncement,
- *lnwire.ChannelUpdate, er.R) {
-
- // Parse the unsigned edge into a channel update.
- chanUpdate := netann.UnsignedChannelUpdateFromEdge(info, edge)
-
- // We'll generate a new signature over a digest of the channel
- // announcement itself and update the timestamp to ensure it propagate.
- err := netann.SignChannelUpdate(
- d.cfg.AnnSigner, d.selfKey, chanUpdate,
- netann.ChanUpdSetTimestamp,
- )
- if err != nil {
- return nil, nil, err
- }
-
- // Next, we'll set the new signature in place, and update the reference
- // in the backing slice.
- edge.LastUpdate = time.Unix(int64(chanUpdate.Timestamp), 0)
- edge.SigBytes = chanUpdate.Signature.ToSignatureBytes()
-
- // To ensure that our signature is valid, we'll verify it ourself
- // before committing it to the slice returned.
- err = routing.ValidateChannelUpdateAnn(d.selfKey, info.Capacity, chanUpdate)
- if err != nil {
- return nil, nil, er.Errorf("generated invalid channel "+
- "update sig: %v", err)
- }
-
- // Finally, we'll write the new edge policy to disk.
- if err := d.cfg.Router.UpdateEdge(edge); err != nil {
- return nil, nil, err
- }
-
- // We'll also create the original channel announcement so the two can
- // be broadcast along side each other (if necessary), but only if we
- // have a full channel announcement for this channel.
- var chanAnn *lnwire.ChannelAnnouncement
- if info.AuthProof != nil {
- chanID := lnwire.NewShortChanIDFromInt(info.ChannelID)
- chanAnn = &lnwire.ChannelAnnouncement{
- ShortChannelID: chanID,
- NodeID1: info.NodeKey1Bytes,
- NodeID2: info.NodeKey2Bytes,
- ChainHash: info.ChainHash,
- BitcoinKey1: info.BitcoinKey1Bytes,
- Features: lnwire.NewRawFeatureVector(),
- BitcoinKey2: info.BitcoinKey2Bytes,
- ExtraOpaqueData: edge.ExtraOpaqueData,
- }
- chanAnn.NodeSig1, err = lnwire.NewSigFromRawSignature(
- info.AuthProof.NodeSig1Bytes,
- )
- if err != nil {
- return nil, nil, err
- }
- chanAnn.NodeSig2, err = lnwire.NewSigFromRawSignature(
- info.AuthProof.NodeSig2Bytes,
- )
- if err != nil {
- return nil, nil, err
- }
- chanAnn.BitcoinSig1, err = lnwire.NewSigFromRawSignature(
- info.AuthProof.BitcoinSig1Bytes,
- )
- if err != nil {
- return nil, nil, err
- }
- chanAnn.BitcoinSig2, err = lnwire.NewSigFromRawSignature(
- info.AuthProof.BitcoinSig2Bytes,
- )
- if err != nil {
- return nil, nil, err
- }
- }
-
- return chanAnn, chanUpdate, err
-}
-
-// SyncManager returns the gossiper's SyncManager instance.
-func (d *AuthenticatedGossiper) SyncManager() *SyncManager {
- return d.syncMgr
-}
diff --git a/lnd/discovery/gossiper_test.go b/lnd/discovery/gossiper_test.go
deleted file mode 100644
index 9bc142c8..00000000
--- a/lnd/discovery/gossiper_test.go
+++ /dev/null
@@ -1,3943 +0,0 @@
-package discovery
-
-import (
- "bytes"
- "encoding/hex"
- "io/ioutil"
- "math/big"
- prand "math/rand"
- "net"
- "os"
- "reflect"
- "strings"
- "sync"
- "testing"
- "time"
-
- "github.com/davecgh/go-spew/spew"
- "github.com/pkt-cash/pktd/btcec"
- "github.com/pkt-cash/pktd/btcutil"
- "github.com/pkt-cash/pktd/btcutil/er"
- "github.com/pkt-cash/pktd/chaincfg/chainhash"
- "github.com/pkt-cash/pktd/lnd/chainntnfs"
- "github.com/pkt-cash/pktd/lnd/channeldb"
- "github.com/pkt-cash/pktd/lnd/lnpeer"
- "github.com/pkt-cash/pktd/lnd/lntest/mock"
- "github.com/pkt-cash/pktd/lnd/lntest/wait"
- "github.com/pkt-cash/pktd/lnd/lnwire"
- "github.com/pkt-cash/pktd/lnd/netann"
- "github.com/pkt-cash/pktd/lnd/routing"
- "github.com/pkt-cash/pktd/lnd/routing/route"
- "github.com/pkt-cash/pktd/lnd/ticker"
- "github.com/pkt-cash/pktd/wire"
-)
-
-var (
- testAddr = &net.TCPAddr{IP: (net.IP)([]byte{0xA, 0x0, 0x0, 0x1}),
- Port: 9000}
- testAddrs = []net.Addr{testAddr}
- testFeatures = lnwire.NewRawFeatureVector()
- testSig = &btcec.Signature{
- R: new(big.Int),
- S: new(big.Int),
- }
- _, _ = testSig.R.SetString("63724406601629180062774974542967536251589935445068131219452686511677818569431", 10)
- _, _ = testSig.S.SetString("18801056069249825825291287104931333862866033135609736119018462340006816851118", 10)
-
- bitcoinKeyPriv1, _ = btcec.NewPrivateKey(btcec.S256())
- bitcoinKeyPub1 = bitcoinKeyPriv1.PubKey()
-
- nodeKeyPriv1, _ = btcec.NewPrivateKey(btcec.S256())
- nodeKeyPub1 = nodeKeyPriv1.PubKey()
-
- bitcoinKeyPriv2, _ = btcec.NewPrivateKey(btcec.S256())
- bitcoinKeyPub2 = bitcoinKeyPriv2.PubKey()
-
- nodeKeyPriv2, _ = btcec.NewPrivateKey(btcec.S256())
- nodeKeyPub2 = nodeKeyPriv2.PubKey()
-
- trickleDelay = time.Millisecond * 100
- retransmitDelay = time.Hour * 1
- proofMatureDelta uint32
-
- // The test timestamp + rebroadcast interval makes sure messages won't
- // be rebroadcasted automaticallty during the tests.
- testTimestamp = uint32(1234567890)
- rebroadcastInterval = time.Hour * 1000000
-)
-
-// makeTestDB creates a new instance of the ChannelDB for testing purposes. A
-// callback which cleans up the created temporary directories is also returned
-// and intended to be executed after the test completes.
-func makeTestDB() (*channeldb.DB, func(), er.R) {
- // First, create a temporary directory to be used for the duration of
- // this test.
- tempDirName, errr := ioutil.TempDir("", "channeldb")
- if errr != nil {
- return nil, nil, er.E(errr)
- }
-
- // Next, create channeldb for the first time.
- cdb, err := channeldb.Open(tempDirName)
- if err != nil {
- return nil, nil, err
- }
-
- cleanUp := func() {
- cdb.Close()
- os.RemoveAll(tempDirName)
- }
-
- return cdb, cleanUp, nil
-}
-
-type mockGraphSource struct {
- bestHeight uint32
-
- mu sync.Mutex
- nodes []channeldb.LightningNode
- infos map[uint64]channeldb.ChannelEdgeInfo
- edges map[uint64][]channeldb.ChannelEdgePolicy
- zombies map[uint64][][33]byte
-}
-
-func newMockRouter(height uint32) *mockGraphSource {
- return &mockGraphSource{
- bestHeight: height,
- infos: make(map[uint64]channeldb.ChannelEdgeInfo),
- edges: make(map[uint64][]channeldb.ChannelEdgePolicy),
- zombies: make(map[uint64][][33]byte),
- }
-}
-
-var _ routing.ChannelGraphSource = (*mockGraphSource)(nil)
-
-func (r *mockGraphSource) AddNode(node *channeldb.LightningNode) er.R {
- r.mu.Lock()
- defer r.mu.Unlock()
-
- r.nodes = append(r.nodes, *node)
- return nil
-}
-
-func (r *mockGraphSource) AddEdge(info *channeldb.ChannelEdgeInfo) er.R {
- r.mu.Lock()
- defer r.mu.Unlock()
-
- if _, ok := r.infos[info.ChannelID]; ok {
- return er.New("info already exist")
- }
-
- r.infos[info.ChannelID] = *info
- return nil
-}
-
-func (r *mockGraphSource) UpdateEdge(edge *channeldb.ChannelEdgePolicy) er.R {
- r.mu.Lock()
- defer r.mu.Unlock()
-
- if len(r.edges[edge.ChannelID]) == 0 {
- r.edges[edge.ChannelID] = make([]channeldb.ChannelEdgePolicy, 2)
- }
-
- if edge.ChannelFlags&lnwire.ChanUpdateDirection == 0 {
- r.edges[edge.ChannelID][0] = *edge
- } else {
- r.edges[edge.ChannelID][1] = *edge
- }
-
- return nil
-}
-
-func (r *mockGraphSource) CurrentBlockHeight() (uint32, er.R) {
- return r.bestHeight, nil
-}
-
-func (r *mockGraphSource) AddProof(chanID lnwire.ShortChannelID,
- proof *channeldb.ChannelAuthProof) er.R {
-
- r.mu.Lock()
- defer r.mu.Unlock()
-
- chanIDInt := chanID.ToUint64()
- info, ok := r.infos[chanIDInt]
- if !ok {
- return er.New("channel does not exist")
- }
-
- info.AuthProof = proof
- r.infos[chanIDInt] = info
-
- return nil
-}
-
-func (r *mockGraphSource) ForEachNode(func(node *channeldb.LightningNode) er.R) er.R {
- return nil
-}
-
-func (r *mockGraphSource) ForAllOutgoingChannels(cb func(i *channeldb.ChannelEdgeInfo,
- c *channeldb.ChannelEdgePolicy) er.R) er.R {
-
- r.mu.Lock()
- defer r.mu.Unlock()
-
- chans := make(map[uint64]channeldb.ChannelEdge)
- for _, info := range r.infos {
- info := info
-
- edgeInfo := chans[info.ChannelID]
- edgeInfo.Info = &info
- chans[info.ChannelID] = edgeInfo
- }
- for _, edges := range r.edges {
- edges := edges
-
- edge := chans[edges[0].ChannelID]
- edge.Policy1 = &edges[0]
- chans[edges[0].ChannelID] = edge
- }
-
- for _, channel := range chans {
- cb(channel.Info, channel.Policy1)
- }
-
- return nil
-}
-
-func (r *mockGraphSource) ForEachChannel(func(chanInfo *channeldb.ChannelEdgeInfo,
- e1, e2 *channeldb.ChannelEdgePolicy) er.R) er.R {
- return nil
-}
-
-func (r *mockGraphSource) GetChannelByID(chanID lnwire.ShortChannelID) (
- *channeldb.ChannelEdgeInfo,
- *channeldb.ChannelEdgePolicy,
- *channeldb.ChannelEdgePolicy, er.R) {
-
- r.mu.Lock()
- defer r.mu.Unlock()
-
- chanIDInt := chanID.ToUint64()
- chanInfo, ok := r.infos[chanIDInt]
- if !ok {
- pubKeys, isZombie := r.zombies[chanIDInt]
- if !isZombie {
- return nil, nil, nil, channeldb.ErrEdgeNotFound.Default()
- }
-
- return &channeldb.ChannelEdgeInfo{
- NodeKey1Bytes: pubKeys[0],
- NodeKey2Bytes: pubKeys[1],
- }, nil, nil, channeldb.ErrZombieEdge.Default()
- }
-
- edges := r.edges[chanID.ToUint64()]
- if len(edges) == 0 {
- return &chanInfo, nil, nil, nil
- }
-
- var edge1 *channeldb.ChannelEdgePolicy
- if !reflect.DeepEqual(edges[0], channeldb.ChannelEdgePolicy{}) {
- edge1 = &edges[0]
- }
-
- var edge2 *channeldb.ChannelEdgePolicy
- if !reflect.DeepEqual(edges[1], channeldb.ChannelEdgePolicy{}) {
- edge2 = &edges[1]
- }
-
- return &chanInfo, edge1, edge2, nil
-}
-
-func (r *mockGraphSource) FetchLightningNode(
- nodePub route.Vertex) (*channeldb.LightningNode, er.R) {
-
- for _, node := range r.nodes {
- if bytes.Equal(nodePub[:], node.PubKeyBytes[:]) {
- return &node, nil
- }
- }
-
- return nil, channeldb.ErrGraphNodeNotFound.Default()
-}
-
-// IsStaleNode returns true if the graph source has a node announcement for the
-// target node with a more recent timestamp.
-func (r *mockGraphSource) IsStaleNode(nodePub route.Vertex, timestamp time.Time) bool {
- r.mu.Lock()
- defer r.mu.Unlock()
-
- for _, node := range r.nodes {
- if node.PubKeyBytes == nodePub {
- return node.LastUpdate.After(timestamp) ||
- node.LastUpdate.Equal(timestamp)
- }
- }
-
- // If we did not find the node among our existing graph nodes, we
- // require the node to already have a channel in the graph to not be
- // considered stale.
- for _, info := range r.infos {
- if info.NodeKey1Bytes == nodePub {
- return false
- }
- if info.NodeKey2Bytes == nodePub {
- return false
- }
- }
- return true
-}
-
-// IsPublicNode determines whether the given vertex is seen as a public node in
-// the graph from the graph's source node's point of view.
-func (r *mockGraphSource) IsPublicNode(node route.Vertex) (bool, er.R) {
- for _, info := range r.infos {
- if !bytes.Equal(node[:], info.NodeKey1Bytes[:]) &&
- !bytes.Equal(node[:], info.NodeKey2Bytes[:]) {
- continue
- }
-
- if info.AuthProof != nil {
- return true, nil
- }
- }
- return false, nil
-}
-
-// IsKnownEdge returns true if the graph source already knows of the passed
-// channel ID either as a live or zombie channel.
-func (r *mockGraphSource) IsKnownEdge(chanID lnwire.ShortChannelID) bool {
- r.mu.Lock()
- defer r.mu.Unlock()
-
- chanIDInt := chanID.ToUint64()
- _, exists := r.infos[chanIDInt]
- _, isZombie := r.zombies[chanIDInt]
- return exists || isZombie
-}
-
-// IsStaleEdgePolicy returns true if the graph source has a channel edge for
-// the passed channel ID (and flags) that have a more recent timestamp.
-func (r *mockGraphSource) IsStaleEdgePolicy(chanID lnwire.ShortChannelID,
- timestamp time.Time, flags lnwire.ChanUpdateChanFlags) bool {
-
- r.mu.Lock()
- defer r.mu.Unlock()
-
- chanIDInt := chanID.ToUint64()
- edges, ok := r.edges[chanIDInt]
- if !ok {
- // Since the edge doesn't exist, we'll check our zombie index as
- // well.
- _, isZombie := r.zombies[chanIDInt]
- if !isZombie {
- return false
- }
-
- // Since it exists within our zombie index, we'll check that it
- // respects the router's live edge horizon to determine whether
- // it is stale or not.
- return time.Since(timestamp) > routing.DefaultChannelPruneExpiry
- }
-
- switch {
- case flags&lnwire.ChanUpdateDirection == 0 &&
- !reflect.DeepEqual(edges[0], channeldb.ChannelEdgePolicy{}):
-
- return !timestamp.After(edges[0].LastUpdate)
-
- case flags&lnwire.ChanUpdateDirection == 1 &&
- !reflect.DeepEqual(edges[1], channeldb.ChannelEdgePolicy{}):
-
- return !timestamp.After(edges[1].LastUpdate)
-
- default:
- return false
- }
-}
-
-// MarkEdgeLive clears an edge from our zombie index, deeming it as live.
-//
-// NOTE: This method is part of the ChannelGraphSource interface.
-func (r *mockGraphSource) MarkEdgeLive(chanID lnwire.ShortChannelID) er.R {
- r.mu.Lock()
- defer r.mu.Unlock()
- delete(r.zombies, chanID.ToUint64())
- return nil
-}
-
-// MarkEdgeZombie marks an edge as a zombie within our zombie index.
-func (r *mockGraphSource) MarkEdgeZombie(chanID lnwire.ShortChannelID, pubKey1,
- pubKey2 [33]byte) er.R {
-
- r.mu.Lock()
- defer r.mu.Unlock()
- r.zombies[chanID.ToUint64()] = [][33]byte{pubKey1, pubKey2}
- return nil
-}
-
-type mockNotifier struct {
- clientCounter uint32
- epochClients map[uint32]chan *chainntnfs.BlockEpoch
-
- sync.RWMutex
-}
-
-func newMockNotifier() *mockNotifier {
- return &mockNotifier{
- epochClients: make(map[uint32]chan *chainntnfs.BlockEpoch),
- }
-}
-
-func (m *mockNotifier) RegisterConfirmationsNtfn(txid *chainhash.Hash,
- _ []byte, numConfs, _ uint32) (*chainntnfs.ConfirmationEvent, er.R) {
-
- return nil, nil
-}
-
-func (m *mockNotifier) RegisterSpendNtfn(outpoint *wire.OutPoint, _ []byte,
- _ uint32) (*chainntnfs.SpendEvent, er.R) {
- return nil, nil
-}
-
-func (m *mockNotifier) notifyBlock(hash chainhash.Hash, height uint32) {
- m.RLock()
- defer m.RUnlock()
-
- for _, client := range m.epochClients {
- client <- &chainntnfs.BlockEpoch{
- Height: int32(height),
- Hash: &hash,
- }
- }
-}
-
-func (m *mockNotifier) RegisterBlockEpochNtfn(
- bestBlock *chainntnfs.BlockEpoch) (*chainntnfs.BlockEpochEvent, er.R) {
- m.RLock()
- defer m.RUnlock()
-
- epochChan := make(chan *chainntnfs.BlockEpoch)
- clientID := m.clientCounter
- m.clientCounter++
- m.epochClients[clientID] = epochChan
-
- return &chainntnfs.BlockEpochEvent{
- Epochs: epochChan,
- Cancel: func() {},
- }, nil
-}
-
-func (m *mockNotifier) Start() er.R {
- return nil
-}
-
-func (m *mockNotifier) Started() bool {
- return true
-}
-
-func (m *mockNotifier) Stop() er.R {
- return nil
-}
-
-type annBatch struct {
- nodeAnn1 *lnwire.NodeAnnouncement
- nodeAnn2 *lnwire.NodeAnnouncement
-
- localChanAnn *lnwire.ChannelAnnouncement
- remoteChanAnn *lnwire.ChannelAnnouncement
-
- chanUpdAnn1 *lnwire.ChannelUpdate
- chanUpdAnn2 *lnwire.ChannelUpdate
-
- localProofAnn *lnwire.AnnounceSignatures
- remoteProofAnn *lnwire.AnnounceSignatures
-}
-
-func createAnnouncements(blockHeight uint32) (*annBatch, er.R) {
- var err er.R
- var batch annBatch
- timestamp := testTimestamp
-
- batch.nodeAnn1, err = createNodeAnnouncement(nodeKeyPriv1, timestamp)
- if err != nil {
- return nil, err
- }
-
- batch.nodeAnn2, err = createNodeAnnouncement(nodeKeyPriv2, timestamp)
- if err != nil {
- return nil, err
- }
-
- batch.remoteChanAnn, err = createRemoteChannelAnnouncement(blockHeight)
- if err != nil {
- return nil, err
- }
-
- batch.remoteProofAnn = &lnwire.AnnounceSignatures{
- ShortChannelID: lnwire.ShortChannelID{
- BlockHeight: blockHeight,
- },
- NodeSignature: batch.remoteChanAnn.NodeSig2,
- BitcoinSignature: batch.remoteChanAnn.BitcoinSig2,
- }
-
- batch.localChanAnn, err = createRemoteChannelAnnouncement(blockHeight)
- if err != nil {
- return nil, err
- }
-
- batch.localProofAnn = &lnwire.AnnounceSignatures{
- ShortChannelID: lnwire.ShortChannelID{
- BlockHeight: blockHeight,
- },
- NodeSignature: batch.localChanAnn.NodeSig1,
- BitcoinSignature: batch.localChanAnn.BitcoinSig1,
- }
-
- batch.chanUpdAnn1, err = createUpdateAnnouncement(
- blockHeight, 0, nodeKeyPriv1, timestamp,
- )
- if err != nil {
- return nil, err
- }
-
- batch.chanUpdAnn2, err = createUpdateAnnouncement(
- blockHeight, 1, nodeKeyPriv2, timestamp,
- )
- if err != nil {
- return nil, err
- }
-
- return &batch, nil
-
-}
-
-func createNodeAnnouncement(priv *btcec.PrivateKey,
- timestamp uint32, extraBytes ...[]byte) (*lnwire.NodeAnnouncement, er.R) {
-
- var err er.R
- k := hex.EncodeToString(priv.Serialize())
- alias, err := lnwire.NewNodeAlias("kek" + k[:10])
- if err != nil {
- return nil, err
- }
-
- a := &lnwire.NodeAnnouncement{
- Timestamp: timestamp,
- Addresses: testAddrs,
- Alias: alias,
- Features: testFeatures,
- }
- copy(a.NodeID[:], priv.PubKey().SerializeCompressed())
- if len(extraBytes) == 1 {
- a.ExtraOpaqueData = extraBytes[0]
- }
-
- signer := mock.SingleSigner{Privkey: priv}
- sig, err := netann.SignAnnouncement(&signer, priv.PubKey(), a)
- if err != nil {
- return nil, err
- }
-
- a.Signature, err = lnwire.NewSigFromSignature(sig)
- if err != nil {
- return nil, err
- }
-
- return a, nil
-}
-
-func createUpdateAnnouncement(blockHeight uint32,
- flags lnwire.ChanUpdateChanFlags,
- nodeKey *btcec.PrivateKey, timestamp uint32,
- extraBytes ...[]byte) (*lnwire.ChannelUpdate, er.R) {
-
- var err er.R
-
- htlcMinMsat := lnwire.MilliSatoshi(prand.Int63())
- a := &lnwire.ChannelUpdate{
- ShortChannelID: lnwire.ShortChannelID{
- BlockHeight: blockHeight,
- },
- Timestamp: timestamp,
- MessageFlags: lnwire.ChanUpdateOptionMaxHtlc,
- ChannelFlags: flags,
- TimeLockDelta: uint16(prand.Int63()),
- HtlcMinimumMsat: htlcMinMsat,
-
- // Since the max HTLC must be greater than the min HTLC to pass channel
- // update validation, set it to double the min htlc.
- HtlcMaximumMsat: 2 * htlcMinMsat,
- FeeRate: uint32(prand.Int31()),
- BaseFee: uint32(prand.Int31()),
- }
- if len(extraBytes) == 1 {
- a.ExtraOpaqueData = extraBytes[0]
- }
-
- err = signUpdate(nodeKey, a)
- if err != nil {
- return nil, err
- }
-
- return a, nil
-}
-
-func signUpdate(nodeKey *btcec.PrivateKey, a *lnwire.ChannelUpdate) er.R {
- pub := nodeKey.PubKey()
- signer := mock.SingleSigner{Privkey: nodeKey}
- sig, err := netann.SignAnnouncement(&signer, pub, a)
- if err != nil {
- return err
- }
-
- a.Signature, err = lnwire.NewSigFromSignature(sig)
- if err != nil {
- return err
- }
-
- return nil
-}
-
-func createAnnouncementWithoutProof(blockHeight uint32,
- extraBytes ...[]byte) *lnwire.ChannelAnnouncement {
-
- a := &lnwire.ChannelAnnouncement{
- ShortChannelID: lnwire.ShortChannelID{
- BlockHeight: blockHeight,
- TxIndex: 0,
- TxPosition: 0,
- },
- Features: testFeatures,
- }
- copy(a.NodeID1[:], nodeKeyPub1.SerializeCompressed())
- copy(a.NodeID2[:], nodeKeyPub2.SerializeCompressed())
- copy(a.BitcoinKey1[:], bitcoinKeyPub1.SerializeCompressed())
- copy(a.BitcoinKey2[:], bitcoinKeyPub2.SerializeCompressed())
- if len(extraBytes) == 1 {
- a.ExtraOpaqueData = extraBytes[0]
- }
-
- return a
-}
-
-func createRemoteChannelAnnouncement(blockHeight uint32,
- extraBytes ...[]byte) (*lnwire.ChannelAnnouncement, er.R) {
-
- a := createAnnouncementWithoutProof(blockHeight, extraBytes...)
-
- pub := nodeKeyPriv1.PubKey()
- signer := mock.SingleSigner{Privkey: nodeKeyPriv1}
- sig, err := netann.SignAnnouncement(&signer, pub, a)
- if err != nil {
- return nil, err
- }
- a.NodeSig1, err = lnwire.NewSigFromSignature(sig)
- if err != nil {
- return nil, err
- }
-
- pub = nodeKeyPriv2.PubKey()
- signer = mock.SingleSigner{Privkey: nodeKeyPriv2}
- sig, err = netann.SignAnnouncement(&signer, pub, a)
- if err != nil {
- return nil, err
- }
- a.NodeSig2, err = lnwire.NewSigFromSignature(sig)
- if err != nil {
- return nil, err
- }
-
- pub = bitcoinKeyPriv1.PubKey()
- signer = mock.SingleSigner{Privkey: bitcoinKeyPriv1}
- sig, err = netann.SignAnnouncement(&signer, pub, a)
- if err != nil {
- return nil, err
- }
- a.BitcoinSig1, err = lnwire.NewSigFromSignature(sig)
- if err != nil {
- return nil, err
- }
-
- pub = bitcoinKeyPriv2.PubKey()
- signer = mock.SingleSigner{Privkey: bitcoinKeyPriv2}
- sig, err = netann.SignAnnouncement(&signer, pub, a)
- if err != nil {
- return nil, err
- }
- a.BitcoinSig2, err = lnwire.NewSigFromSignature(sig)
- if err != nil {
- return nil, err
- }
-
- return a, nil
-}
-
-type testCtx struct {
- gossiper *AuthenticatedGossiper
- router *mockGraphSource
- notifier *mockNotifier
- broadcastedMessage chan msgWithSenders
-}
-
-func createTestCtx(startHeight uint32) (*testCtx, func(), er.R) {
- // Next we'll initialize an instance of the channel router with mock
- // versions of the chain and channel notifier. As we don't need to test
- // any p2p functionality, the peer send and switch send,
- // broadcast functions won't be populated.
- notifier := newMockNotifier()
- router := newMockRouter(startHeight)
-
- db, cleanUpDb, err := makeTestDB()
- if err != nil {
- return nil, nil, err
- }
-
- waitingProofStore, err := channeldb.NewWaitingProofStore(db)
- if err != nil {
- cleanUpDb()
- return nil, nil, err
- }
-
- broadcastedMessage := make(chan msgWithSenders, 10)
- gossiper := New(Config{
- Notifier: notifier,
- Broadcast: func(senders map[route.Vertex]struct{},
- msgs ...lnwire.Message) er.R {
-
- for _, msg := range msgs {
- broadcastedMessage <- msgWithSenders{
- msg: msg,
- senders: senders,
- }
- }
-
- return nil
- },
- NotifyWhenOnline: func(target [33]byte,
- peerChan chan<- lnpeer.Peer) {
-
- pk, _ := btcec.ParsePubKey(target[:], btcec.S256())
- peerChan <- &mockPeer{pk, nil, nil}
- },
- NotifyWhenOffline: func(_ [33]byte) <-chan struct{} {
- c := make(chan struct{})
- return c
- },
- SelfNodeAnnouncement: func(bool) (lnwire.NodeAnnouncement, er.R) {
- return lnwire.NodeAnnouncement{
- Timestamp: testTimestamp,
- }, nil
- },
- Router: router,
- TrickleDelay: trickleDelay,
- RetransmitTicker: ticker.NewForce(retransmitDelay),
- RebroadcastInterval: rebroadcastInterval,
- ProofMatureDelta: proofMatureDelta,
- WaitingProofStore: waitingProofStore,
- MessageStore: newMockMessageStore(),
- RotateTicker: ticker.NewForce(DefaultSyncerRotationInterval),
- HistoricalSyncTicker: ticker.NewForce(DefaultHistoricalSyncInterval),
- NumActiveSyncers: 3,
- AnnSigner: &mock.SingleSigner{Privkey: nodeKeyPriv1},
- SubBatchDelay: time.Second * 5,
- MinimumBatchSize: 10,
- }, nodeKeyPub1)
-
- if err := gossiper.Start(); err != nil {
- cleanUpDb()
- return nil, nil, er.Errorf("unable to start router: %v", err)
- }
-
- // Mark the graph as synced in order to allow the announcements to be
- // broadcast.
- gossiper.syncMgr.markGraphSynced()
-
- cleanUp := func() {
- gossiper.Stop()
- cleanUpDb()
- }
-
- return &testCtx{
- router: router,
- notifier: notifier,
- gossiper: gossiper,
- broadcastedMessage: broadcastedMessage,
- }, cleanUp, nil
-}
-
-// TestProcessAnnouncement checks that mature announcements are propagated to
-// the router subsystem.
-func TestProcessAnnouncement(t *testing.T) {
- t.Parallel()
-
- timestamp := testTimestamp
- ctx, cleanup, err := createTestCtx(0)
- if err != nil {
- t.Fatalf("can't create context: %v", err)
- }
- defer cleanup()
-
- assertSenderExistence := func(sender *btcec.PublicKey, msg msgWithSenders) {
- if _, ok := msg.senders[route.NewVertex(sender)]; !ok {
- t.Fatalf("sender=%x not present in %v",
- sender.SerializeCompressed(), spew.Sdump(msg))
- }
- }
-
- nodePeer := &mockPeer{nodeKeyPriv1.PubKey(), nil, nil}
-
- // First, we'll craft a valid remote channel announcement and send it to
- // the gossiper so that it can be processed.
- ca, err := createRemoteChannelAnnouncement(0)
- if err != nil {
- t.Fatalf("can't create channel announcement: %v", err)
- }
-
- select {
- case err = <-ctx.gossiper.ProcessRemoteAnnouncement(ca, nodePeer):
- case <-time.After(2 * time.Second):
- t.Fatal("remote announcement not processed")
- }
- if err != nil {
- t.Fatalf("can't process remote announcement: %v", err)
- }
-
- // The announcement should be broadcast and included in our local view
- // of the graph.
- select {
- case msg := <-ctx.broadcastedMessage:
- assertSenderExistence(nodePeer.IdentityKey(), msg)
- case <-time.After(2 * trickleDelay):
- t.Fatal("announcement wasn't proceeded")
- }
-
- if len(ctx.router.infos) != 1 {
- t.Fatalf("edge wasn't added to router: %v", err)
- }
-
- // We'll then craft the channel policy of the remote party and also send
- // it to the gossiper.
- ua, err := createUpdateAnnouncement(0, 0, nodeKeyPriv1, timestamp)
- if err != nil {
- t.Fatalf("can't create update announcement: %v", err)
- }
-
- select {
- case err = <-ctx.gossiper.ProcessRemoteAnnouncement(ua, nodePeer):
- case <-time.After(2 * time.Second):
- t.Fatal("remote announcement not processed")
- }
- if err != nil {
- t.Fatalf("can't process remote announcement: %v", err)
- }
-
- // The channel policy should be broadcast to the rest of the network.
- select {
- case msg := <-ctx.broadcastedMessage:
- assertSenderExistence(nodePeer.IdentityKey(), msg)
- case <-time.After(2 * trickleDelay):
- t.Fatal("announcement wasn't proceeded")
- }
-
- if len(ctx.router.edges) != 1 {
- t.Fatalf("edge update wasn't added to router: %v", err)
- }
-
- // Finally, we'll craft the remote party's node announcement.
- na, err := createNodeAnnouncement(nodeKeyPriv1, timestamp)
- if err != nil {
- t.Fatalf("can't create node announcement: %v", err)
- }
-
- select {
- case err = <-ctx.gossiper.ProcessRemoteAnnouncement(na, nodePeer):
- case <-time.After(2 * time.Second):
- t.Fatal("remote announcement not processed")
- }
- if err != nil {
- t.Fatalf("can't process remote announcement: %v", err)
- }
-
- // It should also be broadcast to the network and included in our local
- // view of the graph.
- select {
- case msg := <-ctx.broadcastedMessage:
- assertSenderExistence(nodePeer.IdentityKey(), msg)
- case <-time.After(2 * trickleDelay):
- t.Fatal("announcement wasn't proceeded")
- }
-
- if len(ctx.router.nodes) != 1 {
- t.Fatalf("node wasn't added to router: %v", err)
- }
-}
-
-// TestPrematureAnnouncement checks that premature announcements are
-// not propagated to the router subsystem until block with according
-// block height received.
-func TestPrematureAnnouncement(t *testing.T) {
- t.Parallel()
-
- timestamp := testTimestamp
-
- ctx, cleanup, err := createTestCtx(0)
- if err != nil {
- t.Fatalf("can't create context: %v", err)
- }
- defer cleanup()
-
- _, err = createNodeAnnouncement(nodeKeyPriv1, timestamp)
- if err != nil {
- t.Fatalf("can't create node announcement: %v", err)
- }
-
- nodePeer := &mockPeer{nodeKeyPriv1.PubKey(), nil, nil}
-
- // Pretending that we receive the valid channel announcement from
- // remote side, but block height of this announcement is greater than
- // highest know to us, for that reason it should be added to the
- // repeat/premature batch.
- ca, err := createRemoteChannelAnnouncement(1)
- if err != nil {
- t.Fatalf("can't create channel announcement: %v", err)
- }
-
- select {
- case <-ctx.gossiper.ProcessRemoteAnnouncement(ca, nodePeer):
- t.Fatal("announcement was proceeded")
- case <-time.After(100 * time.Millisecond):
- }
-
- if len(ctx.router.infos) != 0 {
- t.Fatal("edge was added to router")
- }
-
- // Pretending that we receive the valid channel update announcement from
- // remote side, but block height of this announcement is greater than
- // highest know to us, for that reason it should be added to the
- // repeat/premature batch.
- ua, err := createUpdateAnnouncement(1, 0, nodeKeyPriv1, timestamp)
- if err != nil {
- t.Fatalf("can't create update announcement: %v", err)
- }
-
- select {
- case <-ctx.gossiper.ProcessRemoteAnnouncement(ua, nodePeer):
- t.Fatal("announcement was proceeded")
- case <-time.After(100 * time.Millisecond):
- }
-
- if len(ctx.router.edges) != 0 {
- t.Fatal("edge update was added to router")
- }
-
- // Generate new block and waiting the previously added announcements
- // to be proceeded.
- newBlock := &wire.MsgBlock{}
- ctx.notifier.notifyBlock(newBlock.Header.BlockHash(), 1)
-
- select {
- case <-ctx.broadcastedMessage:
- case <-time.After(2 * trickleDelay):
- t.Fatal("announcement wasn't broadcasted")
- }
-
- if len(ctx.router.infos) != 1 {
- t.Fatalf("edge wasn't added to router: %v", err)
- }
-
- select {
- case <-ctx.broadcastedMessage:
- case <-time.After(2 * trickleDelay):
- t.Fatal("announcement wasn't broadcasted")
- }
-
- if len(ctx.router.edges) != 1 {
- t.Fatalf("edge update wasn't added to router: %v", err)
- }
-}
-
-// TestSignatureAnnouncementLocalFirst ensures that the AuthenticatedGossiper
-// properly processes partial and fully announcement signatures message.
-func TestSignatureAnnouncementLocalFirst(t *testing.T) {
- t.Parallel()
-
- ctx, cleanup, err := createTestCtx(uint32(proofMatureDelta))
- if err != nil {
- t.Fatalf("can't create context: %v", err)
- }
- defer cleanup()
-
- // Set up a channel that we can use to inspect the messages sent
- // directly from the gossiper.
- sentMsgs := make(chan lnwire.Message, 10)
- ctx.gossiper.reliableSender.cfg.NotifyWhenOnline = func(target [33]byte,
- peerChan chan<- lnpeer.Peer) {
-
- pk, _ := btcec.ParsePubKey(target[:], btcec.S256())
-
- select {
- case peerChan <- &mockPeer{pk, sentMsgs, ctx.gossiper.quit}:
- case <-ctx.gossiper.quit:
- }
- }
-
- batch, err := createAnnouncements(0)
- if err != nil {
- t.Fatalf("can't generate announcements: %v", err)
- }
-
- localKey, err := btcec.ParsePubKey(batch.nodeAnn1.NodeID[:], btcec.S256())
- if err != nil {
- t.Fatalf("unable to parse pubkey: %v", err)
- }
- remoteKey, err := btcec.ParsePubKey(batch.nodeAnn2.NodeID[:], btcec.S256())
- if err != nil {
- t.Fatalf("unable to parse pubkey: %v", err)
- }
- remotePeer := &mockPeer{remoteKey, sentMsgs, ctx.gossiper.quit}
-
- // Recreate lightning network topology. Initialize router with channel
- // between two nodes.
- select {
- case err = <-ctx.gossiper.ProcessLocalAnnouncement(
- batch.localChanAnn, localKey,
- ):
- case <-time.After(2 * time.Second):
- t.Fatal("did not process local announcement")
- }
- if err != nil {
- t.Fatalf("unable to process channel ann: %v", err)
- }
- select {
- case <-ctx.broadcastedMessage:
- t.Fatal("channel announcement was broadcast")
- case <-time.After(2 * trickleDelay):
- }
-
- select {
- case err = <-ctx.gossiper.ProcessLocalAnnouncement(
- batch.chanUpdAnn1, localKey,
- ):
- case <-time.After(2 * time.Second):
- t.Fatal("did not process local announcement")
- }
- if err != nil {
- t.Fatalf("unable to process channel update: %v", err)
- }
- select {
- case <-ctx.broadcastedMessage:
- t.Fatal("channel update announcement was broadcast")
- case <-time.After(2 * trickleDelay):
- }
-
- select {
- case err = <-ctx.gossiper.ProcessLocalAnnouncement(
- batch.nodeAnn1, localKey,
- ):
- case <-time.After(2 * time.Second):
- t.Fatal("did not process local announcement")
- }
- if err != nil {
- t.Fatalf("unable to process node ann: %v", err)
- }
- select {
- case <-ctx.broadcastedMessage:
- t.Fatal("node announcement was broadcast")
- case <-time.After(2 * trickleDelay):
- }
-
- // The local ChannelUpdate should now be sent directly to the remote peer,
- // such that the edge can be used for routing, regardless if this channel
- // is announced or not (private channel).
- select {
- case msg := <-sentMsgs:
- assertMessage(t, batch.chanUpdAnn1, msg)
- case <-time.After(1 * time.Second):
- t.Fatal("gossiper did not send channel update to peer")
- }
-
- select {
- case err = <-ctx.gossiper.ProcessRemoteAnnouncement(
- batch.chanUpdAnn2, remotePeer,
- ):
- case <-time.After(2 * time.Second):
- t.Fatal("did not process remote announcement")
- }
- if err != nil {
- t.Fatalf("unable to process channel update: %v", err)
- }
- select {
- case <-ctx.broadcastedMessage:
- t.Fatal("channel update announcement was broadcast")
- case <-time.After(2 * trickleDelay):
- }
-
- select {
- case err = <-ctx.gossiper.ProcessRemoteAnnouncement(
- batch.nodeAnn2, remotePeer,
- ):
- case <-time.After(2 * time.Second):
- t.Fatal("did not process remote announcement")
- }
- if err != nil {
- t.Fatalf("unable to process node ann: %v", err)
- }
- select {
- case <-ctx.broadcastedMessage:
- t.Fatal("node announcement was broadcast")
- case <-time.After(2 * trickleDelay):
- }
-
- // Pretending that we receive local channel announcement from funding
- // manager, thereby kick off the announcement exchange process.
- select {
- case err = <-ctx.gossiper.ProcessLocalAnnouncement(
- batch.localProofAnn, localKey,
- ):
- case <-time.After(2 * time.Second):
- t.Fatal("did not process remote announcement")
- }
- if err != nil {
- t.Fatalf("unable to process local proof: %v", err)
- }
-
- select {
- case <-ctx.broadcastedMessage:
- t.Fatal("announcements were broadcast")
- case <-time.After(2 * trickleDelay):
- }
-
- number := 0
- if err := ctx.gossiper.cfg.WaitingProofStore.ForAll(
- func(*channeldb.WaitingProof) er.R {
- number++
- return nil
- },
- func() {
- number = 0
- },
- ); err != nil {
- t.Fatalf("unable to retrieve objects from store: %v", err)
- }
-
- if number != 1 {
- t.Fatal("wrong number of objects in storage")
- }
-
- select {
- case err = <-ctx.gossiper.ProcessRemoteAnnouncement(
- batch.remoteProofAnn, remotePeer,
- ):
- case <-time.After(2 * time.Second):
- t.Fatal("did not process remote announcement")
- }
- if err != nil {
- t.Fatalf("unable to process remote proof: %v", err)
- }
-
- for i := 0; i < 5; i++ {
- select {
- case <-ctx.broadcastedMessage:
- case <-time.After(time.Second):
- t.Fatal("announcement wasn't broadcast")
- }
- }
-
- number = 0
- if err := ctx.gossiper.cfg.WaitingProofStore.ForAll(
- func(*channeldb.WaitingProof) er.R {
- number++
- return nil
- },
- func() {
- number = 0
- },
- ); err != nil && !channeldb.ErrWaitingProofNotFound.Is(err) {
- t.Fatalf("unable to retrieve objects from store: %v", err)
- }
-
- if number != 0 {
- t.Fatal("waiting proof should be removed from storage")
- }
-}
-
-// TestOrphanSignatureAnnouncement ensures that the gossiper properly
-// processes announcement with unknown channel ids.
-func TestOrphanSignatureAnnouncement(t *testing.T) {
- t.Parallel()
-
- ctx, cleanup, err := createTestCtx(uint32(proofMatureDelta))
- if err != nil {
- t.Fatalf("can't create context: %v", err)
- }
- defer cleanup()
-
- // Set up a channel that we can use to inspect the messages sent
- // directly from the gossiper.
- sentMsgs := make(chan lnwire.Message, 10)
- ctx.gossiper.reliableSender.cfg.NotifyWhenOnline = func(target [33]byte,
- peerChan chan<- lnpeer.Peer) {
-
- pk, _ := btcec.ParsePubKey(target[:], btcec.S256())
-
- select {
- case peerChan <- &mockPeer{pk, sentMsgs, ctx.gossiper.quit}:
- case <-ctx.gossiper.quit:
- }
- }
-
- batch, err := createAnnouncements(0)
- if err != nil {
- t.Fatalf("can't generate announcements: %v", err)
- }
-
- localKey, err := btcec.ParsePubKey(batch.nodeAnn1.NodeID[:], btcec.S256())
- if err != nil {
- t.Fatalf("unable to parse pubkey: %v", err)
- }
- remoteKey, err := btcec.ParsePubKey(batch.nodeAnn2.NodeID[:], btcec.S256())
- if err != nil {
- t.Fatalf("unable to parse pubkey: %v", err)
- }
- remotePeer := &mockPeer{remoteKey, sentMsgs, ctx.gossiper.quit}
-
- // Pretending that we receive local channel announcement from funding
- // manager, thereby kick off the announcement exchange process, in
- // this case the announcement should be added in the orphan batch
- // because we haven't announce the channel yet.
- select {
- case err = <-ctx.gossiper.ProcessRemoteAnnouncement(batch.remoteProofAnn,
- remotePeer):
- case <-time.After(2 * time.Second):
- t.Fatal("did not process remote announcement")
- }
- if err != nil {
- t.Fatalf("unable to proceed announcement: %v", err)
- }
-
- number := 0
- if err := ctx.gossiper.cfg.WaitingProofStore.ForAll(
- func(*channeldb.WaitingProof) er.R {
- number++
- return nil
- },
- func() {
- number = 0
- },
- ); err != nil {
- t.Fatalf("unable to retrieve objects from store: %v", err)
- }
-
- if number != 1 {
- t.Fatal("wrong number of objects in storage")
- }
-
- // Recreate lightning network topology. Initialize router with channel
- // between two nodes.
- select {
- case err = <-ctx.gossiper.ProcessLocalAnnouncement(batch.localChanAnn,
- localKey):
- case <-time.After(2 * time.Second):
- t.Fatal("did not process local announcement")
- }
-
- if err != nil {
- t.Fatalf("unable to process: %v", err)
- }
-
- select {
- case <-ctx.broadcastedMessage:
- t.Fatal("channel announcement was broadcast")
- case <-time.After(2 * trickleDelay):
- }
-
- select {
- case err = <-ctx.gossiper.ProcessLocalAnnouncement(batch.chanUpdAnn1,
- localKey):
- case <-time.After(2 * time.Second):
- t.Fatal("did not process local announcement")
- }
- if err != nil {
- t.Fatalf("unable to process: %v", err)
- }
-
- select {
- case <-ctx.broadcastedMessage:
- t.Fatal("channel update announcement was broadcast")
- case <-time.After(2 * trickleDelay):
- }
-
- select {
- case err = <-ctx.gossiper.ProcessLocalAnnouncement(
- batch.nodeAnn1, localKey,
- ):
- case <-time.After(2 * time.Second):
- t.Fatal("did not process local announcement")
- }
- if err != nil {
- t.Fatalf("unable to process node ann: %v", err)
- }
- select {
- case <-ctx.broadcastedMessage:
- t.Fatal("node announcement was broadcast")
- case <-time.After(2 * trickleDelay):
- }
-
- // The local ChannelUpdate should now be sent directly to the remote peer,
- // such that the edge can be used for routing, regardless if this channel
- // is announced or not (private channel).
- select {
- case msg := <-sentMsgs:
- assertMessage(t, batch.chanUpdAnn1, msg)
- case <-time.After(1 * time.Second):
- t.Fatal("gossiper did not send channel update to peer")
- }
-
- select {
- case err = <-ctx.gossiper.ProcessRemoteAnnouncement(batch.chanUpdAnn2,
- remotePeer):
- case <-time.After(2 * time.Second):
- t.Fatal("did not process remote announcement")
- }
- if err != nil {
- t.Fatalf("unable to process node ann: %v", err)
- }
- select {
- case <-ctx.broadcastedMessage:
- t.Fatal("channel update announcement was broadcast")
- case <-time.After(2 * trickleDelay):
- }
-
- select {
- case err = <-ctx.gossiper.ProcessRemoteAnnouncement(
- batch.nodeAnn2, remotePeer,
- ):
- case <-time.After(2 * time.Second):
- t.Fatal("did not process remote announcement")
- }
- if err != nil {
- t.Fatalf("unable to process: %v", err)
- }
- select {
- case <-ctx.broadcastedMessage:
- t.Fatal("node announcement announcement was broadcast")
- case <-time.After(2 * trickleDelay):
- }
-
- // After that we process local announcement, and waiting to receive
- // the channel announcement.
- select {
- case err = <-ctx.gossiper.ProcessLocalAnnouncement(batch.localProofAnn,
- localKey):
- case <-time.After(2 * time.Second):
- t.Fatal("did not process remote announcement")
- }
- if err != nil {
- t.Fatalf("unable to process: %v", err)
- }
-
- // The local proof should be sent to the remote peer.
- select {
- case msg := <-sentMsgs:
- assertMessage(t, batch.localProofAnn, msg)
- case <-time.After(2 * time.Second):
- t.Fatalf("local proof was not sent to peer")
- }
-
- // And since both remote and local announcements are processed, we
- // should be broadcasting the final channel announcements.
- for i := 0; i < 5; i++ {
- select {
- case <-ctx.broadcastedMessage:
- case <-time.After(time.Second):
- t.Fatal("announcement wasn't broadcast")
- }
- }
-
- number = 0
- if err := ctx.gossiper.cfg.WaitingProofStore.ForAll(
- func(p *channeldb.WaitingProof) er.R {
- number++
- return nil
- },
- func() {
- number = 0
- },
- ); err != nil {
- t.Fatalf("unable to retrieve objects from store: %v", err)
- }
-
- if number != 0 {
- t.Fatalf("wrong number of objects in storage: %v", number)
- }
-}
-
-// TestSignatureAnnouncementRetryAtStartup tests that if we restart the
-// gossiper, it will retry sending the AnnounceSignatures to the peer if it did
-// not succeed before shutting down, and the full channel proof is not yet
-// assembled.
-func TestSignatureAnnouncementRetryAtStartup(t *testing.T) {
- t.Parallel()
-
- ctx, cleanup, err := createTestCtx(uint32(proofMatureDelta))
- if err != nil {
- t.Fatalf("can't create context: %v", err)
- }
- defer cleanup()
-
- batch, err := createAnnouncements(0)
- if err != nil {
- t.Fatalf("can't generate announcements: %v", err)
- }
-
- localKey, err := btcec.ParsePubKey(batch.nodeAnn1.NodeID[:], btcec.S256())
- if err != nil {
- t.Fatalf("unable to parse pubkey: %v", err)
- }
- remoteKey, err := btcec.ParsePubKey(batch.nodeAnn2.NodeID[:], btcec.S256())
- if err != nil {
- t.Fatalf("unable to parse pubkey: %v", err)
- }
-
- // Set up a channel to intercept the messages sent to the remote peer.
- sentToPeer := make(chan lnwire.Message, 1)
- remotePeer := &mockPeer{remoteKey, sentToPeer, ctx.gossiper.quit}
-
- // Since the reliable send to the remote peer of the local channel proof
- // requires a notification when the peer comes online, we'll capture the
- // channel through which it gets sent to control exactly when to
- // dispatch it.
- notifyPeers := make(chan chan<- lnpeer.Peer, 1)
- ctx.gossiper.reliableSender.cfg.NotifyWhenOnline = func(peer [33]byte,
- connectedChan chan<- lnpeer.Peer) {
- notifyPeers <- connectedChan
- }
-
- // Recreate lightning network topology. Initialize router with channel
- // between two nodes.
- select {
- case err = <-ctx.gossiper.ProcessLocalAnnouncement(
- batch.localChanAnn, localKey,
- ):
- case <-time.After(2 * time.Second):
- t.Fatal("did not process local announcement")
- }
- if err != nil {
- t.Fatalf("unable to process channel ann: %v", err)
- }
- select {
- case <-ctx.broadcastedMessage:
- t.Fatal("channel announcement was broadcast")
- case <-time.After(2 * trickleDelay):
- }
-
- // Pretending that we receive local channel announcement from funding
- // manager, thereby kick off the announcement exchange process.
- select {
- case err = <-ctx.gossiper.ProcessLocalAnnouncement(
- batch.localProofAnn, localKey,
- ):
- case <-time.After(2 * time.Second):
- t.Fatal("did not process remote announcement")
- }
- if err != nil {
- t.Fatalf("unable to process :%v", err)
- }
-
- // The gossiper should register for a notification for when the peer is
- // online.
- select {
- case <-notifyPeers:
- case <-time.After(2 * time.Second):
- t.Fatalf("gossiper did not ask to get notified when " +
- "peer is online")
- }
-
- // The proof should not be broadcast yet since we're still missing the
- // remote party's.
- select {
- case <-ctx.broadcastedMessage:
- t.Fatal("announcements were broadcast")
- case <-time.After(2 * trickleDelay):
- }
-
- // And it shouldn't be sent to the peer either as they are offline.
- select {
- case msg := <-sentToPeer:
- t.Fatalf("received unexpected message: %v", spew.Sdump(msg))
- case <-time.After(time.Second):
- }
-
- number := 0
- if err := ctx.gossiper.cfg.WaitingProofStore.ForAll(
- func(*channeldb.WaitingProof) er.R {
- number++
- return nil
- },
- func() {
- number = 0
- },
- ); err != nil {
- t.Fatalf("unable to retrieve objects from store: %v", err)
- }
-
- if number != 1 {
- t.Fatal("wrong number of objects in storage")
- }
-
- // Restart the gossiper and restore its original NotifyWhenOnline and
- // NotifyWhenOffline methods. This should trigger a new attempt to send
- // the message to the peer.
- ctx.gossiper.Stop()
- gossiper := New(Config{
- Notifier: ctx.gossiper.cfg.Notifier,
- Broadcast: ctx.gossiper.cfg.Broadcast,
- NotifyWhenOnline: ctx.gossiper.reliableSender.cfg.NotifyWhenOnline,
- NotifyWhenOffline: ctx.gossiper.reliableSender.cfg.NotifyWhenOffline,
- SelfNodeAnnouncement: ctx.gossiper.cfg.SelfNodeAnnouncement,
- Router: ctx.gossiper.cfg.Router,
- TrickleDelay: trickleDelay,
- RetransmitTicker: ticker.NewForce(retransmitDelay),
- RebroadcastInterval: rebroadcastInterval,
- ProofMatureDelta: proofMatureDelta,
- WaitingProofStore: ctx.gossiper.cfg.WaitingProofStore,
- MessageStore: ctx.gossiper.cfg.MessageStore,
- RotateTicker: ticker.NewForce(DefaultSyncerRotationInterval),
- HistoricalSyncTicker: ticker.NewForce(DefaultHistoricalSyncInterval),
- NumActiveSyncers: 3,
- MinimumBatchSize: 10,
- SubBatchDelay: time.Second * 5,
- }, ctx.gossiper.selfKey)
- if err != nil {
- t.Fatalf("unable to recreate gossiper: %v", err)
- }
- if err := gossiper.Start(); err != nil {
- t.Fatalf("unable to start recreated gossiper: %v", err)
- }
- defer gossiper.Stop()
-
- // Mark the graph as synced in order to allow the announcements to be
- // broadcast.
- gossiper.syncMgr.markGraphSynced()
-
- ctx.gossiper = gossiper
- remotePeer.quit = ctx.gossiper.quit
-
- // After starting up, the gossiper will see that it has a proof in the
- // WaitingProofStore, and will retry sending its part to the remote.
- // It should register for a notification for when the peer is online.
- var peerChan chan<- lnpeer.Peer
- select {
- case peerChan = <-notifyPeers:
- case <-time.After(2 * time.Second):
- t.Fatalf("gossiper did not ask to get notified when " +
- "peer is online")
- }
-
- // Notify that peer is now online. This should allow the proof to be
- // sent.
- peerChan <- remotePeer
-
-out:
- for {
- select {
- case msg := <-sentToPeer:
- // Since the ChannelUpdate will also be resent as it is
- // sent reliably, we'll need to filter it out.
- if _, ok := msg.(*lnwire.AnnounceSignatures); !ok {
- continue
- }
-
- assertMessage(t, batch.localProofAnn, msg)
- break out
- case <-time.After(2 * time.Second):
- t.Fatalf("gossiper did not send message when peer " +
- "came online")
- }
- }
-
- // Now exchanging the remote channel proof, the channel announcement
- // broadcast should continue as normal.
- select {
- case err = <-ctx.gossiper.ProcessRemoteAnnouncement(
- batch.remoteProofAnn, remotePeer,
- ):
- case <-time.After(2 * time.Second):
- t.Fatal("did not process remote announcement")
- }
- if err != nil {
- t.Fatalf("unable to process :%v", err)
- }
-
- select {
- case <-ctx.broadcastedMessage:
- case <-time.After(time.Second):
- t.Fatal("announcement wasn't broadcast")
- }
-
- number = 0
- if err := ctx.gossiper.cfg.WaitingProofStore.ForAll(
- func(*channeldb.WaitingProof) er.R {
- number++
- return nil
- },
- func() {
- number = 0
- },
- ); err != nil && !channeldb.ErrWaitingProofNotFound.Is(err) {
- t.Fatalf("unable to retrieve objects from store: %v", err)
- }
-
- if number != 0 {
- t.Fatal("waiting proof should be removed from storage")
- }
-}
-
-// TestSignatureAnnouncementFullProofWhenRemoteProof tests that if a remote
-// proof is received when we already have the full proof, the gossiper will send
-// the full proof (ChannelAnnouncement) to the remote peer.
-func TestSignatureAnnouncementFullProofWhenRemoteProof(t *testing.T) {
- t.Parallel()
-
- ctx, cleanup, err := createTestCtx(uint32(proofMatureDelta))
- if err != nil {
- t.Fatalf("can't create context: %v", err)
- }
- defer cleanup()
-
- batch, err := createAnnouncements(0)
- if err != nil {
- t.Fatalf("can't generate announcements: %v", err)
- }
-
- localKey, err := btcec.ParsePubKey(batch.nodeAnn1.NodeID[:], btcec.S256())
- if err != nil {
- t.Fatalf("unable to parse pubkey: %v", err)
- }
- remoteKey, err := btcec.ParsePubKey(batch.nodeAnn2.NodeID[:], btcec.S256())
- if err != nil {
- t.Fatalf("unable to parse pubkey: %v", err)
- }
-
- // Set up a channel we can use to inspect messages sent by the
- // gossiper to the remote peer.
- sentToPeer := make(chan lnwire.Message, 1)
- remotePeer := &mockPeer{remoteKey, sentToPeer, ctx.gossiper.quit}
-
- // Override NotifyWhenOnline to return the remote peer which we expect
- // meesages to be sent to.
- ctx.gossiper.reliableSender.cfg.NotifyWhenOnline = func(peer [33]byte,
- peerChan chan<- lnpeer.Peer) {
-
- peerChan <- remotePeer
- }
-
- // Recreate lightning network topology. Initialize router with channel
- // between two nodes.
- select {
- case err = <-ctx.gossiper.ProcessLocalAnnouncement(
- batch.localChanAnn, localKey,
- ):
- case <-time.After(2 * time.Second):
- t.Fatal("did not process local announcement")
- }
- if err != nil {
- t.Fatalf("unable to process channel ann: %v", err)
- }
- select {
- case <-ctx.broadcastedMessage:
- t.Fatal("channel announcement was broadcast")
- case <-time.After(2 * trickleDelay):
- }
-
- select {
- case err = <-ctx.gossiper.ProcessLocalAnnouncement(
- batch.chanUpdAnn1, localKey,
- ):
- case <-time.After(2 * time.Second):
- t.Fatal("did not process local announcement")
- }
- if err != nil {
- t.Fatalf("unable to process channel update: %v", err)
- }
- select {
- case <-ctx.broadcastedMessage:
- t.Fatal("channel update announcement was broadcast")
- case <-time.After(2 * trickleDelay):
- }
-
- select {
- case msg := <-sentToPeer:
- assertMessage(t, batch.chanUpdAnn1, msg)
- case <-time.After(2 * time.Second):
- t.Fatal("gossiper did not send channel update to remove peer")
- }
-
- select {
- case err = <-ctx.gossiper.ProcessLocalAnnouncement(
- batch.nodeAnn1, localKey,
- ):
- case <-time.After(2 * time.Second):
- t.Fatal("did not process local announcement")
- }
- if err != nil {
- t.Fatalf("unable to process node ann:%v", err)
- }
- select {
- case <-ctx.broadcastedMessage:
- t.Fatal("node announcement was broadcast")
- case <-time.After(2 * trickleDelay):
- }
-
- select {
- case err = <-ctx.gossiper.ProcessRemoteAnnouncement(
- batch.chanUpdAnn2, remotePeer,
- ):
- case <-time.After(2 * time.Second):
- t.Fatal("did not process remote announcement")
- }
- if err != nil {
- t.Fatalf("unable to process channel update: %v", err)
- }
- select {
- case <-ctx.broadcastedMessage:
- t.Fatal("channel update announcement was broadcast")
- case <-time.After(2 * trickleDelay):
- }
-
- select {
- case err = <-ctx.gossiper.ProcessRemoteAnnouncement(
- batch.nodeAnn2, remotePeer,
- ):
- case <-time.After(2 * time.Second):
- t.Fatal("did not process remote announcement")
- }
- if err != nil {
- t.Fatalf("unable to process node ann: %v", err)
- }
- select {
- case <-ctx.broadcastedMessage:
- t.Fatal("node announcement was broadcast")
- case <-time.After(2 * trickleDelay):
- }
-
- // Pretending that we receive local channel announcement from funding
- // manager, thereby kick off the announcement exchange process.
- select {
- case err = <-ctx.gossiper.ProcessLocalAnnouncement(
- batch.localProofAnn, localKey,
- ):
- case <-time.After(2 * time.Second):
- t.Fatal("did not process local announcement")
- }
- if err != nil {
- t.Fatalf("unable to process local proof: %v", err)
- }
-
- select {
- case err = <-ctx.gossiper.ProcessRemoteAnnouncement(
- batch.remoteProofAnn, remotePeer,
- ):
- case <-time.After(2 * time.Second):
- t.Fatal("did not process local announcement")
- }
- if err != nil {
- t.Fatalf("unable to process remote proof: %v", err)
- }
-
- // We expect the gossiper to send this message to the remote peer.
- select {
- case msg := <-sentToPeer:
- assertMessage(t, batch.localProofAnn, msg)
- case <-time.After(2 * time.Second):
- t.Fatal("did not send local proof to peer")
- }
-
- // All channel and node announcements should be broadcast.
- for i := 0; i < 5; i++ {
- select {
- case <-ctx.broadcastedMessage:
- case <-time.After(time.Second):
- t.Fatal("announcement wasn't broadcast")
- }
- }
-
- number := 0
- if err := ctx.gossiper.cfg.WaitingProofStore.ForAll(
- func(*channeldb.WaitingProof) er.R {
- number++
- return nil
- },
- func() {
- number = 0
- },
- ); err != nil && !channeldb.ErrWaitingProofNotFound.Is(err) {
- t.Fatalf("unable to retrieve objects from store: %v", err)
- }
-
- if number != 0 {
- t.Fatal("waiting proof should be removed from storage")
- }
-
- // Now give the gossiper the remote proof yet again. This should
- // trigger a send of the full ChannelAnnouncement.
- select {
- case err = <-ctx.gossiper.ProcessRemoteAnnouncement(
- batch.remoteProofAnn, remotePeer,
- ):
- case <-time.After(2 * time.Second):
- t.Fatal("did not process local announcement")
- }
- if err != nil {
- t.Fatalf("unable to process remote proof: %v", err)
- }
-
- // We expect the gossiper to send this message to the remote peer.
- select {
- case msg := <-sentToPeer:
- _, ok := msg.(*lnwire.ChannelAnnouncement)
- if !ok {
- t.Fatalf("expected ChannelAnnouncement, instead got %T", msg)
- }
- case <-time.After(2 * time.Second):
- t.Fatal("did not send local proof to peer")
- }
-}
-
-// TestDeDuplicatedAnnouncements ensures that the deDupedAnnouncements struct
-// properly stores and delivers the set of de-duplicated announcements.
-func TestDeDuplicatedAnnouncements(t *testing.T) {
- t.Parallel()
-
- timestamp := testTimestamp
- announcements := deDupedAnnouncements{}
- announcements.Reset()
-
- // Ensure that after new deDupedAnnouncements struct is created and
- // reset that storage of each announcement type is empty.
- if len(announcements.channelAnnouncements) != 0 {
- t.Fatal("channel announcements map not empty after reset")
- }
- if len(announcements.channelUpdates) != 0 {
- t.Fatal("channel updates map not empty after reset")
- }
- if len(announcements.nodeAnnouncements) != 0 {
- t.Fatal("node announcements map not empty after reset")
- }
-
- // Ensure that remote channel announcements are properly stored
- // and de-duplicated.
- ca, err := createRemoteChannelAnnouncement(0)
- if err != nil {
- t.Fatalf("can't create remote channel announcement: %v", err)
- }
-
- nodePeer := &mockPeer{bitcoinKeyPub2, nil, nil}
- announcements.AddMsgs(networkMsg{
- msg: ca,
- peer: nodePeer,
- source: nodePeer.IdentityKey(),
- })
- if len(announcements.channelAnnouncements) != 1 {
- t.Fatal("new channel announcement not stored in batch")
- }
-
- // We'll create a second instance of the same announcement with the
- // same channel ID. Adding this shouldn't cause an increase in the
- // number of items as they should be de-duplicated.
- ca2, err := createRemoteChannelAnnouncement(0)
- if err != nil {
- t.Fatalf("can't create remote channel announcement: %v", err)
- }
- announcements.AddMsgs(networkMsg{
- msg: ca2,
- peer: nodePeer,
- source: nodePeer.IdentityKey(),
- })
- if len(announcements.channelAnnouncements) != 1 {
- t.Fatal("channel announcement not replaced in batch")
- }
-
- // Next, we'll ensure that channel update announcements are properly
- // stored and de-duplicated. We do this by creating two updates
- // announcements with the same short ID and flag.
- ua, err := createUpdateAnnouncement(0, 0, nodeKeyPriv1, timestamp)
- if err != nil {
- t.Fatalf("can't create update announcement: %v", err)
- }
- announcements.AddMsgs(networkMsg{
- msg: ua,
- peer: nodePeer,
- source: nodePeer.IdentityKey(),
- })
- if len(announcements.channelUpdates) != 1 {
- t.Fatal("new channel update not stored in batch")
- }
-
- // Adding the very same announcement shouldn't cause an increase in the
- // number of ChannelUpdate announcements stored.
- ua2, err := createUpdateAnnouncement(0, 0, nodeKeyPriv1, timestamp)
- if err != nil {
- t.Fatalf("can't create update announcement: %v", err)
- }
- announcements.AddMsgs(networkMsg{
- msg: ua2,
- peer: nodePeer,
- source: nodePeer.IdentityKey(),
- })
- if len(announcements.channelUpdates) != 1 {
- t.Fatal("channel update not replaced in batch")
- }
-
- // Adding an announcement with a later timestamp should replace the
- // stored one.
- ua3, err := createUpdateAnnouncement(0, 0, nodeKeyPriv1, timestamp+1)
- if err != nil {
- t.Fatalf("can't create update announcement: %v", err)
- }
- announcements.AddMsgs(networkMsg{
- msg: ua3,
- peer: nodePeer,
- source: nodePeer.IdentityKey(),
- })
- if len(announcements.channelUpdates) != 1 {
- t.Fatal("channel update not replaced in batch")
- }
-
- assertChannelUpdate := func(channelUpdate *lnwire.ChannelUpdate) {
- channelKey := channelUpdateID{
- ua3.ShortChannelID,
- ua3.ChannelFlags,
- }
-
- mws, ok := announcements.channelUpdates[channelKey]
- if !ok {
- t.Fatal("channel update not in batch")
- }
- if mws.msg != channelUpdate {
- t.Fatalf("expected channel update %v, got %v)",
- channelUpdate, mws.msg)
- }
- }
-
- // Check that ua3 is the currently stored channel update.
- assertChannelUpdate(ua3)
-
- // Adding a channel update with an earlier timestamp should NOT
- // replace the one stored.
- ua4, err := createUpdateAnnouncement(0, 0, nodeKeyPriv1, timestamp)
- if err != nil {
- t.Fatalf("can't create update announcement: %v", err)
- }
- announcements.AddMsgs(networkMsg{
- msg: ua4,
- peer: nodePeer,
- source: nodePeer.IdentityKey(),
- })
- if len(announcements.channelUpdates) != 1 {
- t.Fatal("channel update not in batch")
- }
- assertChannelUpdate(ua3)
-
- // Next well ensure that node announcements are properly de-duplicated.
- // We'll first add a single instance with a node's private key.
- na, err := createNodeAnnouncement(nodeKeyPriv1, timestamp)
- if err != nil {
- t.Fatalf("can't create node announcement: %v", err)
- }
- announcements.AddMsgs(networkMsg{
- msg: na,
- peer: nodePeer,
- source: nodePeer.IdentityKey(),
- })
- if len(announcements.nodeAnnouncements) != 1 {
- t.Fatal("new node announcement not stored in batch")
- }
-
- // We'll now add another node to the batch.
- na2, err := createNodeAnnouncement(nodeKeyPriv2, timestamp)
- if err != nil {
- t.Fatalf("can't create node announcement: %v", err)
- }
- announcements.AddMsgs(networkMsg{
- msg: na2,
- peer: nodePeer,
- source: nodePeer.IdentityKey(),
- })
- if len(announcements.nodeAnnouncements) != 2 {
- t.Fatal("second node announcement not stored in batch")
- }
-
- // Adding a new instance of the _same_ node shouldn't increase the size
- // of the node ann batch.
- na3, err := createNodeAnnouncement(nodeKeyPriv2, timestamp)
- if err != nil {
- t.Fatalf("can't create node announcement: %v", err)
- }
- announcements.AddMsgs(networkMsg{
- msg: na3,
- peer: nodePeer,
- source: nodePeer.IdentityKey(),
- })
- if len(announcements.nodeAnnouncements) != 2 {
- t.Fatal("second node announcement not replaced in batch")
- }
-
- // Ensure that node announcement with different pointer to same public
- // key is still de-duplicated.
- newNodeKeyPointer := nodeKeyPriv2
- na4, err := createNodeAnnouncement(newNodeKeyPointer, timestamp)
- if err != nil {
- t.Fatalf("can't create node announcement: %v", err)
- }
- announcements.AddMsgs(networkMsg{
- msg: na4,
- peer: nodePeer,
- source: nodePeer.IdentityKey(),
- })
- if len(announcements.nodeAnnouncements) != 2 {
- t.Fatal("second node announcement not replaced again in batch")
- }
-
- // Ensure that node announcement with increased timestamp replaces
- // what is currently stored.
- na5, err := createNodeAnnouncement(nodeKeyPriv2, timestamp+1)
- if err != nil {
- t.Fatalf("can't create node announcement: %v", err)
- }
- announcements.AddMsgs(networkMsg{
- msg: na5,
- peer: nodePeer,
- source: nodePeer.IdentityKey(),
- })
- if len(announcements.nodeAnnouncements) != 2 {
- t.Fatal("node announcement not replaced in batch")
- }
- nodeID := route.NewVertex(nodeKeyPriv2.PubKey())
- stored, ok := announcements.nodeAnnouncements[nodeID]
- if !ok {
- t.Fatalf("node announcement not found in batch")
- }
- if stored.msg != na5 {
- t.Fatalf("expected de-duped node announcement to be %v, got %v",
- na5, stored.msg)
- }
-
- // Ensure that announcement batch delivers channel announcements,
- // channel updates, and node announcements in proper order.
- batch := announcements.Emit()
- if len(batch) != 4 {
- t.Fatal("announcement batch incorrect length")
- }
-
- if !reflect.DeepEqual(batch[0].msg, ca2) {
- t.Fatalf("channel announcement not first in batch: got %v, "+
- "expected %v", spew.Sdump(batch[0].msg), spew.Sdump(ca2))
- }
-
- if !reflect.DeepEqual(batch[1].msg, ua3) {
- t.Fatalf("channel update not next in batch: got %v, "+
- "expected %v", spew.Sdump(batch[1].msg), spew.Sdump(ua2))
- }
-
- // We'll ensure that both node announcements are present. We check both
- // indexes as due to the randomized order of map iteration they may be
- // in either place.
- if !reflect.DeepEqual(batch[2].msg, na) && !reflect.DeepEqual(batch[3].msg, na) {
- t.Fatal("first node announcement not in last part of batch: "+
- "got %v, expected %v", batch[2].msg,
- na)
- }
- if !reflect.DeepEqual(batch[2].msg, na5) && !reflect.DeepEqual(batch[3].msg, na5) {
- t.Fatalf("second node announcement not in last part of batch: "+
- "got %v, expected %v", batch[3].msg,
- na5)
- }
-
- // Ensure that after reset, storage of each announcement type
- // in deDupedAnnouncements struct is empty again.
- announcements.Reset()
- if len(announcements.channelAnnouncements) != 0 {
- t.Fatal("channel announcements map not empty after reset")
- }
- if len(announcements.channelUpdates) != 0 {
- t.Fatal("channel updates map not empty after reset")
- }
- if len(announcements.nodeAnnouncements) != 0 {
- t.Fatal("node announcements map not empty after reset")
- }
-}
-
-// TestForwardPrivateNodeAnnouncement ensures that we do not forward node
-// announcements for nodes who do not intend to publicly advertise themselves.
-func TestForwardPrivateNodeAnnouncement(t *testing.T) {
- t.Parallel()
-
- const (
- startingHeight = 100
- timestamp = 123456
- )
-
- ctx, cleanup, err := createTestCtx(startingHeight)
- if err != nil {
- t.Fatalf("can't create context: %v", err)
- }
- defer cleanup()
-
- // We'll start off by processing a channel announcement without a proof
- // (i.e., an unadvertised channel), followed by a node announcement for
- // this same channel announcement.
- chanAnn := createAnnouncementWithoutProof(startingHeight - 2)
- pubKey := nodeKeyPriv1.PubKey()
-
- select {
- case err := <-ctx.gossiper.ProcessLocalAnnouncement(chanAnn, pubKey):
- if err != nil {
- t.Fatalf("unable to process local announcement: %v", err)
- }
- case <-time.After(2 * time.Second):
- t.Fatalf("local announcement not processed")
- }
-
- // The gossiper should not broadcast the announcement due to it not
- // having its announcement signatures.
- select {
- case <-ctx.broadcastedMessage:
- t.Fatal("gossiper should not have broadcast channel announcement")
- case <-time.After(2 * trickleDelay):
- }
-
- nodeAnn, err := createNodeAnnouncement(nodeKeyPriv1, timestamp)
- if err != nil {
- t.Fatalf("unable to create node announcement: %v", err)
- }
-
- select {
- case err := <-ctx.gossiper.ProcessLocalAnnouncement(nodeAnn, pubKey):
- if err != nil {
- t.Fatalf("unable to process remote announcement: %v", err)
- }
- case <-time.After(2 * time.Second):
- t.Fatal("remote announcement not processed")
- }
-
- // The gossiper should also not broadcast the node announcement due to
- // it not being part of any advertised channels.
- select {
- case <-ctx.broadcastedMessage:
- t.Fatal("gossiper should not have broadcast node announcement")
- case <-time.After(2 * trickleDelay):
- }
-
- // Now, we'll attempt to forward the NodeAnnouncement for the same node
- // by opening a public channel on the network. We'll create a
- // ChannelAnnouncement and hand it off to the gossiper in order to
- // process it.
- remoteChanAnn, err := createRemoteChannelAnnouncement(startingHeight - 1)
- if err != nil {
- t.Fatalf("unable to create remote channel announcement: %v", err)
- }
- peer := &mockPeer{pubKey, nil, nil}
-
- select {
- case err := <-ctx.gossiper.ProcessRemoteAnnouncement(remoteChanAnn, peer):
- if err != nil {
- t.Fatalf("unable to process remote announcement: %v", err)
- }
- case <-time.After(2 * time.Second):
- t.Fatal("remote announcement not processed")
- }
-
- select {
- case <-ctx.broadcastedMessage:
- case <-time.After(2 * trickleDelay):
- t.Fatal("gossiper should have broadcast the channel announcement")
- }
-
- // We'll recreate the NodeAnnouncement with an updated timestamp to
- // prevent a stale update. The NodeAnnouncement should now be forwarded.
- nodeAnn, err = createNodeAnnouncement(nodeKeyPriv1, timestamp+1)
- if err != nil {
- t.Fatalf("unable to create node announcement: %v", err)
- }
-
- select {
- case err := <-ctx.gossiper.ProcessRemoteAnnouncement(nodeAnn, peer):
- if err != nil {
- t.Fatalf("unable to process remote announcement: %v", err)
- }
- case <-time.After(2 * time.Second):
- t.Fatal("remote announcement not processed")
- }
-
- select {
- case <-ctx.broadcastedMessage:
- case <-time.After(2 * trickleDelay):
- t.Fatal("gossiper should have broadcast the node announcement")
- }
-}
-
-// TestRejectZombieEdge ensures that we properly reject any announcements for
-// zombie edges.
-func TestRejectZombieEdge(t *testing.T) {
- t.Parallel()
-
- // We'll start by creating our test context with a batch of
- // announcements.
- ctx, cleanup, err := createTestCtx(0)
- if err != nil {
- t.Fatalf("unable to create test context: %v", err)
- }
- defer cleanup()
-
- batch, err := createAnnouncements(0)
- if err != nil {
- t.Fatalf("unable to create announcements: %v", err)
- }
- remotePeer := &mockPeer{pk: nodeKeyPriv2.PubKey()}
-
- // processAnnouncements is a helper closure we'll use to test that we
- // properly process/reject announcements based on whether they're for a
- // zombie edge or not.
- processAnnouncements := func(isZombie bool) {
- t.Helper()
-
- errChan := ctx.gossiper.ProcessRemoteAnnouncement(
- batch.remoteChanAnn, remotePeer,
- )
- select {
- case err := <-errChan:
- if isZombie && err != nil {
- t.Fatalf("expected to reject live channel "+
- "announcement with nil error: %v", err)
- }
- if !isZombie && err != nil {
- t.Fatalf("expected to process live channel "+
- "announcement: %v", err)
- }
- case <-time.After(time.Second):
- t.Fatal("expected to process channel announcement")
- }
- select {
- case <-ctx.broadcastedMessage:
- if isZombie {
- t.Fatal("expected to not broadcast zombie " +
- "channel announcement")
- }
- case <-time.After(2 * trickleDelay):
- if !isZombie {
- t.Fatal("expected to broadcast live channel " +
- "announcement")
- }
- }
-
- errChan = ctx.gossiper.ProcessRemoteAnnouncement(
- batch.chanUpdAnn2, remotePeer,
- )
- select {
- case err := <-errChan:
- if isZombie && err != nil {
- t.Fatalf("expected to reject zombie channel "+
- "update with nil error: %v", err)
- }
- if !isZombie && err != nil {
- t.Fatalf("expected to process live channel "+
- "update: %v", err)
- }
- case <-time.After(time.Second):
- t.Fatal("expected to process channel update")
- }
- select {
- case <-ctx.broadcastedMessage:
- if isZombie {
- t.Fatal("expected to not broadcast zombie " +
- "channel update")
- }
- case <-time.After(2 * trickleDelay):
- if !isZombie {
- t.Fatal("expected to broadcast live channel " +
- "update")
- }
- }
- }
-
- // We'll mark the edge for which we'll process announcements for as a
- // zombie within the router. This should reject any announcements for
- // this edge while it remains as a zombie.
- chanID := batch.remoteChanAnn.ShortChannelID
- err = ctx.router.MarkEdgeZombie(
- chanID, batch.remoteChanAnn.NodeID1, batch.remoteChanAnn.NodeID2,
- )
- if err != nil {
- t.Fatalf("unable to mark channel %v as zombie: %v", chanID, err)
- }
-
- processAnnouncements(true)
-
- // If we then mark the edge as live, the edge's zombie status should be
- // overridden and the announcements should be processed.
- if err := ctx.router.MarkEdgeLive(chanID); err != nil {
- t.Fatalf("unable mark channel %v as zombie: %v", chanID, err)
- }
-
- processAnnouncements(false)
-}
-
-// TestProcessZombieEdgeNowLive ensures that we can detect when a zombie edge
-// becomes live by receiving a fresh update.
-func TestProcessZombieEdgeNowLive(t *testing.T) {
- t.Parallel()
-
- // We'll start by creating our test context with a batch of
- // announcements.
- ctx, cleanup, err := createTestCtx(0)
- if err != nil {
- t.Fatalf("unable to create test context: %v", err)
- }
- defer cleanup()
-
- batch, err := createAnnouncements(0)
- if err != nil {
- t.Fatalf("unable to create announcements: %v", err)
- }
-
- localPrivKey := nodeKeyPriv1
- remotePrivKey := nodeKeyPriv2
-
- remotePeer := &mockPeer{pk: remotePrivKey.PubKey()}
-
- // processAnnouncement is a helper closure we'll use to ensure an
- // announcement is properly processed/rejected based on whether the edge
- // is a zombie or not. The expectsErr boolean can be used to determine
- // whether we should expect an error when processing the message, while
- // the isZombie boolean can be used to determine whether the
- // announcement should be or not be broadcast.
- processAnnouncement := func(ann lnwire.Message, isZombie, expectsErr bool) {
- t.Helper()
-
- errChan := ctx.gossiper.ProcessRemoteAnnouncement(
- ann, remotePeer,
- )
-
- var err er.R
- select {
- case err = <-errChan:
- case <-time.After(time.Second):
- t.Fatal("expected to process announcement")
- }
- if expectsErr && err == nil {
- t.Fatal("expected error when processing announcement")
- }
- if !expectsErr && err != nil {
- t.Fatalf("received unexpected error when processing "+
- "announcement: %v", err)
- }
-
- select {
- case msgWithSenders := <-ctx.broadcastedMessage:
- if isZombie {
- t.Fatal("expected to not broadcast zombie " +
- "channel message")
- }
- assertMessage(t, ann, msgWithSenders.msg)
-
- case <-time.After(2 * trickleDelay):
- if !isZombie {
- t.Fatal("expected to broadcast live channel " +
- "message")
- }
- }
- }
-
- // We'll generate a channel update with a timestamp far enough in the
- // past to consider it a zombie.
- zombieTimestamp := time.Now().Add(-routing.DefaultChannelPruneExpiry)
- batch.chanUpdAnn2.Timestamp = uint32(zombieTimestamp.Unix())
- if err := signUpdate(remotePrivKey, batch.chanUpdAnn2); err != nil {
- t.Fatalf("unable to sign update with new timestamp: %v", err)
- }
-
- // We'll also add the edge to our zombie index.
- chanID := batch.remoteChanAnn.ShortChannelID
- err = ctx.router.MarkEdgeZombie(
- chanID, batch.remoteChanAnn.NodeID1, batch.remoteChanAnn.NodeID2,
- )
- if err != nil {
- t.Fatalf("unable mark channel %v as zombie: %v", chanID, err)
- }
-
- // Attempting to process the current channel update should fail due to
- // its edge being considered a zombie and its timestamp not being within
- // the live horizon. We should not expect an error here since it is just
- // a stale update.
- processAnnouncement(batch.chanUpdAnn2, true, false)
-
- // Now we'll generate a new update with a fresh timestamp. This should
- // allow the channel update to be processed even though it is still
- // marked as a zombie within the index, since it is a fresh new update.
- // This won't work however since we'll sign it with the wrong private
- // key (local rather than remote).
- batch.chanUpdAnn2.Timestamp = uint32(time.Now().Unix())
- if err := signUpdate(localPrivKey, batch.chanUpdAnn2); err != nil {
- t.Fatalf("unable to sign update with new timestamp: %v", err)
- }
-
- // We should expect an error due to the signature being invalid.
- processAnnouncement(batch.chanUpdAnn2, true, true)
-
- // Signing it with the correct private key should allow it to be
- // processed.
- if err := signUpdate(remotePrivKey, batch.chanUpdAnn2); err != nil {
- t.Fatalf("unable to sign update with new timestamp: %v", err)
- }
-
- // The channel update cannot be successfully processed and broadcast
- // until the channel announcement is. Since the channel update indicates
- // a fresh new update, the gossiper should stash it until it sees the
- // corresponding channel announcement.
- updateErrChan := ctx.gossiper.ProcessRemoteAnnouncement(
- batch.chanUpdAnn2, remotePeer,
- )
-
- select {
- case <-ctx.broadcastedMessage:
- t.Fatal("expected to not broadcast live channel update " +
- "without announcement")
- case <-time.After(2 * trickleDelay):
- }
-
- // We'll go ahead and process the channel announcement to ensure the
- // channel update is processed thereafter.
- processAnnouncement(batch.remoteChanAnn, false, false)
-
- // After successfully processing the announcement, the channel update
- // should have been processed and broadcast successfully as well.
- select {
- case err := <-updateErrChan:
- if err != nil {
- t.Fatalf("expected to process live channel update: %v",
- err)
- }
- case <-time.After(time.Second):
- t.Fatal("expected to process announcement")
- }
-
- select {
- case msgWithSenders := <-ctx.broadcastedMessage:
- assertMessage(t, batch.chanUpdAnn2, msgWithSenders.msg)
- case <-time.After(2 * trickleDelay):
- t.Fatal("expected to broadcast live channel update")
- }
-}
-
-// TestReceiveRemoteChannelUpdateFirst tests that if we receive a ChannelUpdate
-// from the remote before we have processed our own ChannelAnnouncement, it will
-// be reprocessed later, after our ChannelAnnouncement.
-func TestReceiveRemoteChannelUpdateFirst(t *testing.T) {
- t.Parallel()
-
- ctx, cleanup, err := createTestCtx(uint32(proofMatureDelta))
- if err != nil {
- t.Fatalf("can't create context: %v", err)
- }
- defer cleanup()
-
- batch, err := createAnnouncements(0)
- if err != nil {
- t.Fatalf("can't generate announcements: %v", err)
- }
-
- localKey, err := btcec.ParsePubKey(batch.nodeAnn1.NodeID[:], btcec.S256())
- if err != nil {
- t.Fatalf("unable to parse pubkey: %v", err)
- }
- remoteKey, err := btcec.ParsePubKey(batch.nodeAnn2.NodeID[:], btcec.S256())
- if err != nil {
- t.Fatalf("unable to parse pubkey: %v", err)
- }
-
- // Set up a channel that we can use to inspect the messages sent
- // directly from the gossiper.
- sentMsgs := make(chan lnwire.Message, 10)
- remotePeer := &mockPeer{remoteKey, sentMsgs, ctx.gossiper.quit}
-
- // Override NotifyWhenOnline to return the remote peer which we expect
- // meesages to be sent to.
- ctx.gossiper.reliableSender.cfg.NotifyWhenOnline = func(peer [33]byte,
- peerChan chan<- lnpeer.Peer) {
-
- peerChan <- remotePeer
- }
-
- // Recreate the case where the remote node is sending us its ChannelUpdate
- // before we have been able to process our own ChannelAnnouncement and
- // ChannelUpdate.
- errRemoteAnn := ctx.gossiper.ProcessRemoteAnnouncement(
- batch.chanUpdAnn2, remotePeer,
- )
- select {
- case <-ctx.broadcastedMessage:
- t.Fatal("channel update announcement was broadcast")
- case <-time.After(2 * trickleDelay):
- }
-
- err = <-ctx.gossiper.ProcessRemoteAnnouncement(batch.nodeAnn2, remotePeer)
- if err != nil {
- t.Fatalf("unable to process node ann: %v", err)
- }
- select {
- case <-ctx.broadcastedMessage:
- t.Fatal("node announcement was broadcast")
- case <-time.After(2 * trickleDelay):
- }
-
- // Since the remote ChannelUpdate was added for an edge that
- // we did not already know about, it should have been added
- // to the map of premature ChannelUpdates. Check that nothing
- // was added to the graph.
- chanInfo, e1, e2, err := ctx.router.GetChannelByID(batch.chanUpdAnn1.ShortChannelID)
- if !channeldb.ErrEdgeNotFound.Is(err) {
- t.Fatalf("Expected ErrEdgeNotFound, got: %v", err)
- }
- if chanInfo != nil {
- t.Fatalf("chanInfo was not nil")
- }
- if e1 != nil {
- t.Fatalf("e1 was not nil")
- }
- if e2 != nil {
- t.Fatalf("e2 was not nil")
- }
-
- // Recreate lightning network topology. Initialize router with channel
- // between two nodes.
- err = <-ctx.gossiper.ProcessLocalAnnouncement(batch.localChanAnn, localKey)
- if err != nil {
- t.Fatalf("unable to process :%v", err)
- }
- select {
- case <-ctx.broadcastedMessage:
- t.Fatal("channel announcement was broadcast")
- case <-time.After(2 * trickleDelay):
- }
-
- err = <-ctx.gossiper.ProcessLocalAnnouncement(batch.chanUpdAnn1, localKey)
- if err != nil {
- t.Fatalf("unable to process :%v", err)
- }
- select {
- case <-ctx.broadcastedMessage:
- t.Fatal("channel update announcement was broadcast")
- case <-time.After(2 * trickleDelay):
- }
-
- err = <-ctx.gossiper.ProcessLocalAnnouncement(batch.nodeAnn1, localKey)
- if err != nil {
- t.Fatalf("unable to process :%v", err)
- }
- select {
- case <-ctx.broadcastedMessage:
- t.Fatal("node announcement was broadcast")
- case <-time.After(2 * trickleDelay):
- }
-
- // The local ChannelUpdate should now be sent directly to the remote peer,
- // such that the edge can be used for routing, regardless if this channel
- // is announced or not (private channel).
- select {
- case msg := <-sentMsgs:
- assertMessage(t, batch.chanUpdAnn1, msg)
- case <-time.After(1 * time.Second):
- t.Fatal("gossiper did not send channel update to peer")
- }
-
- // At this point the remote ChannelUpdate we received earlier should
- // be reprocessed, as we now have the necessary edge entry in the graph.
- select {
- case err := <-errRemoteAnn:
- if err != nil {
- t.Fatalf("error re-processing remote update: %v", err)
- }
- case <-time.After(2 * trickleDelay):
- t.Fatalf("remote update was not processed")
- }
-
- // Check that the ChannelEdgePolicy was added to the graph.
- chanInfo, e1, e2, err = ctx.router.GetChannelByID(
- batch.chanUpdAnn1.ShortChannelID,
- )
- if err != nil {
- t.Fatalf("unable to get channel from router: %v", err)
- }
- if chanInfo == nil {
- t.Fatalf("chanInfo was nil")
- }
- if e1 == nil {
- t.Fatalf("e1 was nil")
- }
- if e2 == nil {
- t.Fatalf("e2 was nil")
- }
-
- // Pretending that we receive local channel announcement from funding
- // manager, thereby kick off the announcement exchange process.
- err = <-ctx.gossiper.ProcessLocalAnnouncement(
- batch.localProofAnn, localKey,
- )
- if err != nil {
- t.Fatalf("unable to process :%v", err)
- }
-
- select {
- case <-ctx.broadcastedMessage:
- t.Fatal("announcements were broadcast")
- case <-time.After(2 * trickleDelay):
- }
-
- number := 0
- if err := ctx.gossiper.cfg.WaitingProofStore.ForAll(
- func(*channeldb.WaitingProof) er.R {
- number++
- return nil
- },
- func() {
- number = 0
- },
- ); err != nil {
- t.Fatalf("unable to retrieve objects from store: %v", err)
- }
-
- if number != 1 {
- t.Fatal("wrong number of objects in storage")
- }
-
- err = <-ctx.gossiper.ProcessRemoteAnnouncement(
- batch.remoteProofAnn, remotePeer,
- )
- if err != nil {
- t.Fatalf("unable to process :%v", err)
- }
-
- for i := 0; i < 4; i++ {
- select {
- case <-ctx.broadcastedMessage:
- case <-time.After(time.Second):
- t.Fatal("announcement wasn't broadcast")
- }
- }
-
- number = 0
- if err := ctx.gossiper.cfg.WaitingProofStore.ForAll(
- func(*channeldb.WaitingProof) er.R {
- number++
- return nil
- },
- func() {
- number = 0
- },
- ); err != nil && !channeldb.ErrWaitingProofNotFound.Is(err) {
- t.Fatalf("unable to retrieve objects from store: %v", err)
- }
-
- if number != 0 {
- t.Fatal("waiting proof should be removed from storage")
- }
-}
-
-// TestExtraDataChannelAnnouncementValidation tests that we're able to properly
-// validate a ChannelAnnouncement that includes opaque bytes that we don't
-// currently know of.
-func TestExtraDataChannelAnnouncementValidation(t *testing.T) {
- t.Parallel()
-
- ctx, cleanup, err := createTestCtx(0)
- if err != nil {
- t.Fatalf("can't create context: %v", err)
- }
- defer cleanup()
-
- remotePeer := &mockPeer{nodeKeyPriv1.PubKey(), nil, nil}
-
- // We'll now create an announcement that contains an extra set of bytes
- // that we don't know of ourselves, but should still include in the
- // final signature check.
- extraBytes := []byte("gotta validate this stil!")
- ca, err := createRemoteChannelAnnouncement(0, extraBytes)
- if err != nil {
- t.Fatalf("can't create channel announcement: %v", err)
- }
-
- // We'll now send the announcement to the main gossiper. We should be
- // able to validate this announcement to problem.
- select {
- case err = <-ctx.gossiper.ProcessRemoteAnnouncement(ca, remotePeer):
- case <-time.After(2 * time.Second):
- t.Fatal("did not process remote announcement")
- }
- if err != nil {
- t.Fatalf("unable to process :%v", err)
- }
-}
-
-// TestExtraDataChannelUpdateValidation tests that we're able to properly
-// validate a ChannelUpdate that includes opaque bytes that we don't currently
-// know of.
-func TestExtraDataChannelUpdateValidation(t *testing.T) {
- t.Parallel()
-
- timestamp := testTimestamp
- ctx, cleanup, err := createTestCtx(0)
- if err != nil {
- t.Fatalf("can't create context: %v", err)
- }
- defer cleanup()
-
- remotePeer := &mockPeer{nodeKeyPriv1.PubKey(), nil, nil}
-
- // In this scenario, we'll create two announcements, one regular
- // channel announcement, and another channel update announcement, that
- // has additional data that we won't be interpreting.
- chanAnn, err := createRemoteChannelAnnouncement(0)
- if err != nil {
- t.Fatalf("unable to create chan ann: %v", err)
- }
- chanUpdAnn1, err := createUpdateAnnouncement(
- 0, 0, nodeKeyPriv1, timestamp,
- []byte("must also validate"),
- )
- if err != nil {
- t.Fatalf("unable to create chan up: %v", err)
- }
- chanUpdAnn2, err := createUpdateAnnouncement(
- 0, 1, nodeKeyPriv2, timestamp,
- []byte("must also validate"),
- )
- if err != nil {
- t.Fatalf("unable to create chan up: %v", err)
- }
-
- // We should be able to properly validate all three messages without
- // any issue.
- select {
- case err = <-ctx.gossiper.ProcessRemoteAnnouncement(chanAnn, remotePeer):
- case <-time.After(2 * time.Second):
- t.Fatal("did not process remote announcement")
- }
- if err != nil {
- t.Fatalf("unable to process announcement: %v", err)
- }
-
- select {
- case err = <-ctx.gossiper.ProcessRemoteAnnouncement(chanUpdAnn1, remotePeer):
- case <-time.After(2 * time.Second):
- t.Fatal("did not process remote announcement")
- }
- if err != nil {
- t.Fatalf("unable to process announcement: %v", err)
- }
-
- select {
- case err = <-ctx.gossiper.ProcessRemoteAnnouncement(chanUpdAnn2, remotePeer):
- case <-time.After(2 * time.Second):
- t.Fatal("did not process remote announcement")
- }
- if err != nil {
- t.Fatalf("unable to process announcement: %v", err)
- }
-}
-
-// TestExtraDataNodeAnnouncementValidation tests that we're able to properly
-// validate a NodeAnnouncement that includes opaque bytes that we don't
-// currently know of.
-func TestExtraDataNodeAnnouncementValidation(t *testing.T) {
- t.Parallel()
-
- ctx, cleanup, err := createTestCtx(0)
- if err != nil {
- t.Fatalf("can't create context: %v", err)
- }
- defer cleanup()
-
- remotePeer := &mockPeer{nodeKeyPriv1.PubKey(), nil, nil}
- timestamp := testTimestamp
-
- // We'll create a node announcement that includes a set of opaque data
- // which we don't know of, but will store anyway in order to ensure
- // upgrades can flow smoothly in the future.
- nodeAnn, err := createNodeAnnouncement(
- nodeKeyPriv1, timestamp, []byte("gotta validate"),
- )
- if err != nil {
- t.Fatalf("can't create node announcement: %v", err)
- }
-
- select {
- case err = <-ctx.gossiper.ProcessRemoteAnnouncement(nodeAnn, remotePeer):
- case <-time.After(2 * time.Second):
- t.Fatal("did not process remote announcement")
- }
- if err != nil {
- t.Fatalf("unable to process announcement: %v", err)
- }
-}
-
-// assertBroadcast checks that num messages are being broadcasted from the
-// gossiper. The broadcasted messages are returned.
-func assertBroadcast(t *testing.T, ctx *testCtx, num int) []lnwire.Message {
- t.Helper()
-
- var msgs []lnwire.Message
- for i := 0; i < num; i++ {
- select {
- case msg := <-ctx.broadcastedMessage:
- msgs = append(msgs, msg.msg)
- case <-time.After(time.Second):
- t.Fatalf("expected %d messages to be broadcast, only "+
- "got %d", num, i)
- }
- }
-
- // No more messages should be broadcast.
- select {
- case msg := <-ctx.broadcastedMessage:
- t.Fatalf("unexpected message was broadcast: %T", msg.msg)
- case <-time.After(2 * trickleDelay):
- }
-
- return msgs
-}
-
-// assertProcessAnnouncemnt is a helper method that checks that the result of
-// processing an announcement is successful.
-func assertProcessAnnouncement(t *testing.T, result chan er.R) {
- t.Helper()
-
- select {
- case err := <-result:
- if err != nil {
- t.Fatalf("unable to process :%v", err)
- }
- case <-time.After(2 * time.Second):
- t.Fatal("did not process announcement")
- }
-}
-
-// TestRetransmit checks that the expected announcements are retransmitted when
-// the retransmit ticker ticks.
-func TestRetransmit(t *testing.T) {
- t.Parallel()
-
- ctx, cleanup, err := createTestCtx(proofMatureDelta)
- if err != nil {
- t.Fatalf("can't create context: %v", err)
- }
- defer cleanup()
-
- batch, err := createAnnouncements(0)
- if err != nil {
- t.Fatalf("can't generate announcements: %v", err)
- }
-
- localKey, err := btcec.ParsePubKey(batch.nodeAnn1.NodeID[:], btcec.S256())
- if err != nil {
- t.Fatalf("unable to parse pubkey: %v", err)
- }
- remoteKey, err := btcec.ParsePubKey(batch.nodeAnn2.NodeID[:], btcec.S256())
- if err != nil {
- t.Fatalf("unable to parse pubkey: %v", err)
- }
- remotePeer := &mockPeer{remoteKey, nil, nil}
-
- // Process a local channel annoucement, channel update and node
- // announcement. No messages should be broadcasted yet, since no proof
- // has been exchanged.
- assertProcessAnnouncement(
- t, ctx.gossiper.ProcessLocalAnnouncement(
- batch.localChanAnn, localKey,
- ),
- )
- assertBroadcast(t, ctx, 0)
-
- assertProcessAnnouncement(
- t, ctx.gossiper.ProcessLocalAnnouncement(
- batch.chanUpdAnn1, localKey,
- ),
- )
- assertBroadcast(t, ctx, 0)
-
- assertProcessAnnouncement(
- t, ctx.gossiper.ProcessLocalAnnouncement(
- batch.nodeAnn1, localKey,
- ),
- )
- assertBroadcast(t, ctx, 0)
-
- // Add the remote channel update to the gossiper. Similarly, nothing
- // should be broadcasted.
- assertProcessAnnouncement(
- t, ctx.gossiper.ProcessRemoteAnnouncement(
- batch.chanUpdAnn2, remotePeer,
- ),
- )
- assertBroadcast(t, ctx, 0)
-
- // Now add the local and remote proof to the gossiper, which should
- // trigger a broadcast of the announcements.
- assertProcessAnnouncement(
- t, ctx.gossiper.ProcessLocalAnnouncement(
- batch.localProofAnn, localKey,
- ),
- )
- assertBroadcast(t, ctx, 0)
-
- assertProcessAnnouncement(
- t, ctx.gossiper.ProcessRemoteAnnouncement(
- batch.remoteProofAnn, remotePeer,
- ),
- )
-
- // checkAnncouncments make sure the expected number of channel
- // announcements + channel updates + node announcements are broadcast.
- checkAnnouncements := func(t *testing.T, chanAnns, chanUpds,
- nodeAnns int) {
-
- t.Helper()
-
- num := chanAnns + chanUpds + nodeAnns
- anns := assertBroadcast(t, ctx, num)
-
- // Count the received announcements.
- var chanAnn, chanUpd, nodeAnn int
- for _, msg := range anns {
- switch msg.(type) {
- case *lnwire.ChannelAnnouncement:
- chanAnn++
- case *lnwire.ChannelUpdate:
- chanUpd++
- case *lnwire.NodeAnnouncement:
- nodeAnn++
- }
- }
-
- if chanAnn != chanAnns || chanUpd != chanUpds ||
- nodeAnn != nodeAnns {
- t.Fatalf("unexpected number of announcements: "+
- "chanAnn=%d, chanUpd=%d, nodeAnn=%d",
- chanAnn, chanUpd, nodeAnn)
- }
- }
-
- // All announcements should be broadcast, including the remote channel
- // update.
- checkAnnouncements(t, 1, 2, 1)
-
- // Now let the retransmit ticker tick, which should trigger updates to
- // be rebroadcast.
- now := time.Unix(int64(testTimestamp), 0)
- future := now.Add(rebroadcastInterval + 10*time.Second)
- select {
- case ctx.gossiper.cfg.RetransmitTicker.(*ticker.Force).Force <- future:
- case <-time.After(2 * time.Second):
- t.Fatalf("unable to force tick")
- }
-
- // The channel announcement + local channel update + node announcement
- // should be re-broadcast.
- checkAnnouncements(t, 1, 1, 1)
-}
-
-// TestNodeAnnouncementNoChannels tests that NodeAnnouncements for nodes with
-// no existing channels in the graph do not get forwarded.
-func TestNodeAnnouncementNoChannels(t *testing.T) {
- t.Parallel()
-
- ctx, cleanup, err := createTestCtx(0)
- if err != nil {
- t.Fatalf("can't create context: %v", err)
- }
- defer cleanup()
-
- batch, err := createAnnouncements(0)
- if err != nil {
- t.Fatalf("can't generate announcements: %v", err)
- }
-
- remoteKey, err := btcec.ParsePubKey(batch.nodeAnn2.NodeID[:],
- btcec.S256())
- if err != nil {
- t.Fatalf("unable to parse pubkey: %v", err)
- }
- remotePeer := &mockPeer{remoteKey, nil, nil}
-
- // Process the remote node announcement.
- select {
- case err = <-ctx.gossiper.ProcessRemoteAnnouncement(batch.nodeAnn2,
- remotePeer):
- case <-time.After(2 * time.Second):
- t.Fatal("did not process remote announcement")
- }
- if err != nil {
- t.Fatalf("unable to process announcement: %v", err)
- }
-
- // Since no channels or node announcements were already in the graph,
- // the node announcement should be ignored, and not forwarded.
- select {
- case <-ctx.broadcastedMessage:
- t.Fatal("node announcement was broadcast")
- case <-time.After(2 * trickleDelay):
- }
-
- // Now add the node's channel to the graph by processing the channel
- // announement and channel update.
- select {
- case err = <-ctx.gossiper.ProcessRemoteAnnouncement(batch.remoteChanAnn,
- remotePeer):
- case <-time.After(2 * time.Second):
- t.Fatal("did not process remote announcement")
- }
- if err != nil {
- t.Fatalf("unable to process announcement: %v", err)
- }
-
- select {
- case err = <-ctx.gossiper.ProcessRemoteAnnouncement(batch.chanUpdAnn2,
- remotePeer):
- case <-time.After(2 * time.Second):
- t.Fatal("did not process remote announcement")
- }
- if err != nil {
- t.Fatalf("unable to process announcement: %v", err)
- }
-
- // Now process the node announcement again.
- select {
- case err = <-ctx.gossiper.ProcessRemoteAnnouncement(batch.nodeAnn2, remotePeer):
- case <-time.After(2 * time.Second):
- t.Fatal("did not process remote announcement")
- }
- if err != nil {
- t.Fatalf("unable to process announcement: %v", err)
- }
-
- // This time the node announcement should be forwarded. The same should
- // the channel announcement and update be.
- for i := 0; i < 3; i++ {
- select {
- case <-ctx.broadcastedMessage:
- case <-time.After(time.Second):
- t.Fatal("announcement wasn't broadcast")
- }
- }
-
- // Processing the same node announement again should be ignored, as it
- // is stale.
- select {
- case err = <-ctx.gossiper.ProcessRemoteAnnouncement(batch.nodeAnn2,
- remotePeer):
- case <-time.After(2 * time.Second):
- t.Fatal("did not process remote announcement")
- }
- if err != nil {
- t.Fatalf("unable to process announcement: %v", err)
- }
-
- select {
- case <-ctx.broadcastedMessage:
- t.Fatal("node announcement was broadcast")
- case <-time.After(2 * trickleDelay):
- }
-}
-
-// TestOptionalFieldsChannelUpdateValidation tests that we're able to properly
-// validate the msg flags and optional max HTLC field of a ChannelUpdate.
-func TestOptionalFieldsChannelUpdateValidation(t *testing.T) {
- t.Parallel()
-
- ctx, cleanup, err := createTestCtx(0)
- if err != nil {
- t.Fatalf("can't create context: %v", err)
- }
- defer cleanup()
-
- chanUpdateHeight := uint32(0)
- timestamp := uint32(123456)
- nodePeer := &mockPeer{nodeKeyPriv1.PubKey(), nil, nil}
-
- // In this scenario, we'll test whether the message flags field in a channel
- // update is properly handled.
- chanAnn, err := createRemoteChannelAnnouncement(chanUpdateHeight)
- if err != nil {
- t.Fatalf("can't create channel announcement: %v", err)
- }
-
- select {
- case err = <-ctx.gossiper.ProcessRemoteAnnouncement(chanAnn, nodePeer):
- case <-time.After(2 * time.Second):
- t.Fatal("did not process remote announcement")
- }
- if err != nil {
- t.Fatalf("unable to process announcement: %v", err)
- }
-
- // The first update should fail from an invalid max HTLC field, which is
- // less than the min HTLC.
- chanUpdAnn, err := createUpdateAnnouncement(0, 0, nodeKeyPriv1, timestamp)
- if err != nil {
- t.Fatalf("unable to create channel update: %v", err)
- }
-
- chanUpdAnn.HtlcMinimumMsat = 5000
- chanUpdAnn.HtlcMaximumMsat = 4000
- if err := signUpdate(nodeKeyPriv1, chanUpdAnn); err != nil {
- t.Fatalf("unable to sign channel update: %v", err)
- }
-
- select {
- case err = <-ctx.gossiper.ProcessRemoteAnnouncement(chanUpdAnn, nodePeer):
- case <-time.After(2 * time.Second):
- t.Fatal("did not process remote announcement")
- }
- if err == nil || !strings.Contains(err.String(), "invalid max htlc") {
- t.Fatalf("expected chan update to error, instead got %v", err)
- }
-
- // The second update should fail because the message flag is set but
- // the max HTLC field is 0.
- chanUpdAnn.HtlcMinimumMsat = 0
- chanUpdAnn.HtlcMaximumMsat = 0
- if err := signUpdate(nodeKeyPriv1, chanUpdAnn); err != nil {
- t.Fatalf("unable to sign channel update: %v", err)
- }
-
- select {
- case err = <-ctx.gossiper.ProcessRemoteAnnouncement(chanUpdAnn, nodePeer):
- case <-time.After(2 * time.Second):
- t.Fatal("did not process remote announcement")
- }
- if err == nil || !strings.Contains(err.String(), "invalid max htlc") {
- t.Fatalf("expected chan update to error, instead got %v", err)
- }
-
- // The final update should succeed, since setting the flag 0 means the
- // nonsense max_htlc field will just be ignored.
- chanUpdAnn.MessageFlags = 0
- if err := signUpdate(nodeKeyPriv1, chanUpdAnn); err != nil {
- t.Fatalf("unable to sign channel update: %v", err)
- }
-
- select {
- case err = <-ctx.gossiper.ProcessRemoteAnnouncement(chanUpdAnn, nodePeer):
- case <-time.After(2 * time.Second):
- t.Fatal("did not process remote announcement")
- }
- if err != nil {
- t.Fatalf("unable to process announcement: %v", err)
- }
-}
-
-// TestSendChannelUpdateReliably ensures that the latest channel update for a
-// channel is always sent upon the remote party reconnecting.
-func TestSendChannelUpdateReliably(t *testing.T) {
- t.Parallel()
-
- // We'll start by creating our test context and a batch of
- // announcements.
- ctx, cleanup, err := createTestCtx(uint32(proofMatureDelta))
- if err != nil {
- t.Fatalf("unable to create test context: %v", err)
- }
- defer cleanup()
-
- batch, err := createAnnouncements(0)
- if err != nil {
- t.Fatalf("can't generate announcements: %v", err)
- }
-
- // We'll also create two keys, one for ourselves and another for the
- // remote party.
- localKey, err := btcec.ParsePubKey(batch.nodeAnn1.NodeID[:], btcec.S256())
- if err != nil {
- t.Fatalf("unable to parse pubkey: %v", err)
- }
- remoteKey, err := btcec.ParsePubKey(batch.nodeAnn2.NodeID[:], btcec.S256())
- if err != nil {
- t.Fatalf("unable to parse pubkey: %v", err)
- }
-
- // Set up a channel we can use to inspect messages sent by the
- // gossiper to the remote peer.
- sentToPeer := make(chan lnwire.Message, 1)
- remotePeer := &mockPeer{remoteKey, sentToPeer, ctx.gossiper.quit}
-
- // Since we first wait to be notified of the peer before attempting to
- // send the message, we'll overwrite NotifyWhenOnline and
- // NotifyWhenOffline to instead give us access to the channel that will
- // receive the notification.
- notifyOnline := make(chan chan<- lnpeer.Peer, 1)
- ctx.gossiper.reliableSender.cfg.NotifyWhenOnline = func(_ [33]byte,
- peerChan chan<- lnpeer.Peer) {
-
- notifyOnline <- peerChan
- }
- notifyOffline := make(chan chan struct{}, 1)
- ctx.gossiper.reliableSender.cfg.NotifyWhenOffline = func(
- _ [33]byte) <-chan struct{} {
-
- c := make(chan struct{}, 1)
- notifyOffline <- c
- return c
- }
-
- // assertMsgSent is a helper closure we'll use to determine if the
- // correct gossip message was sent.
- assertMsgSent := func(msg lnwire.Message) {
- t.Helper()
-
- select {
- case msgSent := <-sentToPeer:
- assertMessage(t, msg, msgSent)
- case <-time.After(2 * time.Second):
- t.Fatalf("did not send %v message to peer",
- msg.MsgType())
- }
- }
-
- // Process the channel announcement for which we'll send a channel
- // update for.
- select {
- case err = <-ctx.gossiper.ProcessLocalAnnouncement(
- batch.localChanAnn, localKey,
- ):
- case <-time.After(2 * time.Second):
- t.Fatal("did not process local channel announcement")
- }
- if err != nil {
- t.Fatalf("unable to process local channel announcement: %v", err)
- }
-
- // It should not be broadcast due to not having an announcement proof.
- select {
- case <-ctx.broadcastedMessage:
- t.Fatal("channel announcement was broadcast")
- case <-time.After(2 * trickleDelay):
- }
-
- // Now, we'll process the channel update.
- select {
- case err = <-ctx.gossiper.ProcessLocalAnnouncement(
- batch.chanUpdAnn1, localKey,
- ):
- case <-time.After(2 * time.Second):
- t.Fatal("did not process local channel update")
- }
- if err != nil {
- t.Fatalf("unable to process local channel update: %v", err)
- }
-
- // It should also not be broadcast due to the announcement not having an
- // announcement proof.
- select {
- case <-ctx.broadcastedMessage:
- t.Fatal("channel announcement was broadcast")
- case <-time.After(2 * trickleDelay):
- }
-
- // It should however send it to the peer directly. In order to do so,
- // it'll request a notification for when the peer is online.
- var peerChan chan<- lnpeer.Peer
- select {
- case peerChan = <-notifyOnline:
- case <-time.After(2 * time.Second):
- t.Fatal("gossiper did not request notification upon peer " +
- "connection")
- }
-
- // We can go ahead and notify the peer, which should trigger the message
- // to be sent.
- peerChan <- remotePeer
- assertMsgSent(batch.chanUpdAnn1)
-
- // The gossiper should now request a notification for when the peer
- // disconnects. We'll also trigger this now.
- var offlineChan chan struct{}
- select {
- case offlineChan = <-notifyOffline:
- case <-time.After(2 * time.Second):
- t.Fatal("gossiper did not request notification upon peer " +
- "disconnection")
- }
-
- close(offlineChan)
-
- // Since it's offline, the gossiper should request another notification
- // for when it comes back online.
- select {
- case peerChan = <-notifyOnline:
- case <-time.After(2 * time.Second):
- t.Fatal("gossiper did not request notification upon peer " +
- "connection")
- }
-
- // Now that the remote peer is offline, we'll send a new channel update.
- batch.chanUpdAnn1.Timestamp++
- if err := signUpdate(nodeKeyPriv1, batch.chanUpdAnn1); err != nil {
- t.Fatalf("unable to sign new channel update: %v", err)
- }
-
- // With the new update created, we'll go ahead and process it.
- select {
- case err = <-ctx.gossiper.ProcessLocalAnnouncement(
- batch.chanUpdAnn1, localKey,
- ):
- case <-time.After(2 * time.Second):
- t.Fatal("did not process local channel update")
- }
- if err != nil {
- t.Fatalf("unable to process local channel update: %v", err)
- }
-
- // It should also not be broadcast due to the announcement not having an
- // announcement proof.
- select {
- case <-ctx.broadcastedMessage:
- t.Fatal("channel announcement was broadcast")
- case <-time.After(2 * trickleDelay):
- }
-
- // The message should not be sent since the peer remains offline.
- select {
- case msg := <-sentToPeer:
- t.Fatalf("received unexpected message: %v", spew.Sdump(msg))
- case <-time.After(time.Second):
- }
-
- // Once again, we'll notify the peer is online and ensure the new
- // channel update is received. This will also cause an offline
- // notification to be requested again.
- peerChan <- remotePeer
- assertMsgSent(batch.chanUpdAnn1)
-
- select {
- case offlineChan = <-notifyOffline:
- case <-time.After(2 * time.Second):
- t.Fatal("gossiper did not request notification upon peer " +
- "disconnection")
- }
-
- // We'll then exchange proofs with the remote peer in order to announce
- // the channel.
- select {
- case err = <-ctx.gossiper.ProcessLocalAnnouncement(
- batch.localProofAnn, localKey,
- ):
- case <-time.After(2 * time.Second):
- t.Fatal("did not process local channel proof")
- }
- if err != nil {
- t.Fatalf("unable to process local channel proof: %v", err)
- }
-
- // No messages should be broadcast as we don't have the full proof yet.
- select {
- case <-ctx.broadcastedMessage:
- t.Fatal("channel announcement was broadcast")
- case <-time.After(2 * trickleDelay):
- }
-
- // Our proof should be sent to the remote peer however.
- assertMsgSent(batch.localProofAnn)
-
- select {
- case err = <-ctx.gossiper.ProcessRemoteAnnouncement(
- batch.remoteProofAnn, remotePeer,
- ):
- case <-time.After(2 * time.Second):
- t.Fatal("did not process remote channel proof")
- }
- if err != nil {
- t.Fatalf("unable to process remote channel proof: %v", err)
- }
-
- // Now that we've constructed our full proof, we can assert that the
- // channel has been announced.
- for i := 0; i < 2; i++ {
- select {
- case <-ctx.broadcastedMessage:
- case <-time.After(2 * trickleDelay):
- t.Fatal("expected channel to be announced")
- }
- }
-
- // With the channel announced, we'll generate a new channel update. This
- // one won't take the path of the reliable sender, as the channel has
- // already been announced. We'll keep track of the old message that is
- // now stale to use later on.
- staleChannelUpdate := batch.chanUpdAnn1
- newChannelUpdate := &lnwire.ChannelUpdate{}
- *newChannelUpdate = *staleChannelUpdate
- newChannelUpdate.Timestamp++
- if err := signUpdate(nodeKeyPriv1, newChannelUpdate); err != nil {
- t.Fatalf("unable to sign new channel update: %v", err)
- }
-
- // Process the new channel update. It should not be sent to the peer
- // directly since the reliable sender only applies when the channel is
- // not announced.
- select {
- case err = <-ctx.gossiper.ProcessLocalAnnouncement(
- newChannelUpdate, localKey,
- ):
- case <-time.After(2 * time.Second):
- t.Fatal("did not process local channel update")
- }
- if err != nil {
- t.Fatalf("unable to process local channel update: %v", err)
- }
- select {
- case <-ctx.broadcastedMessage:
- case <-time.After(2 * trickleDelay):
- t.Fatal("channel update was not broadcast")
- }
- select {
- case msg := <-sentToPeer:
- t.Fatalf("received unexpected message: %v", spew.Sdump(msg))
- case <-time.After(time.Second):
- }
-
- // Then, we'll trigger the reliable sender to send its pending messages
- // by triggering an offline notification for the peer, followed by an
- // online one.
- close(offlineChan)
-
- select {
- case peerChan = <-notifyOnline:
- case <-time.After(2 * time.Second):
- t.Fatal("gossiper did not request notification upon peer " +
- "connection")
- }
-
- peerChan <- remotePeer
-
- // At this point, we should have sent both the AnnounceSignatures and
- // stale ChannelUpdate.
- for i := 0; i < 2; i++ {
- var msg lnwire.Message
- select {
- case msg = <-sentToPeer:
- case <-time.After(time.Second):
- t.Fatal("expected to send message")
- }
-
- switch msg := msg.(type) {
- case *lnwire.ChannelUpdate:
- assertMessage(t, staleChannelUpdate, msg)
- case *lnwire.AnnounceSignatures:
- assertMessage(t, batch.localProofAnn, msg)
- default:
- t.Fatalf("send unexpected %v message", msg.MsgType())
- }
- }
-
- // Since the messages above are now deemed as stale, they should be
- // removed from the message store.
- err = wait.NoError(func() er.R {
- msgs, err := ctx.gossiper.cfg.MessageStore.Messages()
- if err != nil {
- return er.Errorf("unable to retrieve pending "+
- "messages: %v", err)
- }
- if len(msgs) != 0 {
- return er.Errorf("expected no messages left, found %d",
- len(msgs))
- }
- return nil
- }, time.Second)
- if err != nil {
- t.Fatal(err)
- }
-}
-
-func sendLocalMsg(t *testing.T, ctx *testCtx, msg lnwire.Message,
- localPub *btcec.PublicKey, optionalMsgFields ...OptionalMsgField) {
-
- t.Helper()
-
- var err er.R
- select {
- case err = <-ctx.gossiper.ProcessLocalAnnouncement(
- msg, localPub, optionalMsgFields...,
- ):
- case <-time.After(2 * time.Second):
- t.Fatal("did not process local announcement")
- }
- if err != nil {
- t.Fatalf("unable to process channel msg: %v", err)
- }
-}
-
-func sendRemoteMsg(t *testing.T, ctx *testCtx, msg lnwire.Message,
- remotePeer lnpeer.Peer) {
-
- t.Helper()
-
- select {
- case err := <-ctx.gossiper.ProcessRemoteAnnouncement(msg, remotePeer):
- if err != nil {
- t.Fatalf("unable to process channel msg: %v", err)
- }
- case <-time.After(2 * time.Second):
- t.Fatal("did not process local announcement")
- }
-}
-
-func assertBroadcastMsg(t *testing.T, ctx *testCtx,
- predicate func(lnwire.Message) er.R) {
-
- t.Helper()
-
- // We don't care about the order of the broadcast, only that our target
- // predicate returns true for any of the messages, so we'll continue to
- // retry until either we hit our timeout, or it returns with no error
- // (message found).
- err := wait.NoError(func() er.R {
- select {
- case msg := <-ctx.broadcastedMessage:
- return predicate(msg.msg)
- case <-time.After(2 * trickleDelay):
- return er.Errorf("no message broadcast")
- }
- }, time.Second*5)
- if err != nil {
- t.Fatal(err)
- }
-}
-
-// TestPropagateChanPolicyUpdate tests that we're able to issue requests to
-// update policies for all channels and also select target channels.
-// Additionally, we ensure that we don't propagate updates for any private
-// channels.
-func TestPropagateChanPolicyUpdate(t *testing.T) {
- t.Parallel()
-
- // First, we'll make out test context and add 3 random channels to the
- // graph.
- startingHeight := uint32(10)
- ctx, cleanup, err := createTestCtx(startingHeight)
- if err != nil {
- t.Fatalf("unable to create test context: %v", err)
- }
- defer cleanup()
-
- const numChannels = 3
- channelsToAnnounce := make([]*annBatch, 0, numChannels)
- for i := 0; i < numChannels; i++ {
- newChan, err := createAnnouncements(uint32(i + 1))
- if err != nil {
- t.Fatalf("unable to make new channel ann: %v", err)
- }
-
- channelsToAnnounce = append(channelsToAnnounce, newChan)
- }
-
- localKey := nodeKeyPriv1.PubKey()
- remoteKey := nodeKeyPriv2.PubKey()
-
- sentMsgs := make(chan lnwire.Message, 10)
- remotePeer := &mockPeer{remoteKey, sentMsgs, ctx.gossiper.quit}
-
- // The forced code path for sending the private ChannelUpdate to the
- // remote peer will be hit, forcing it to request a notification that
- // the remote peer is active. We'll ensure that it targets the proper
- // pubkey, and hand it our mock peer above.
- notifyErr := make(chan er.R, 1)
- ctx.gossiper.reliableSender.cfg.NotifyWhenOnline = func(
- targetPub [33]byte, peerChan chan<- lnpeer.Peer) {
-
- if !bytes.Equal(targetPub[:], remoteKey.SerializeCompressed()) {
- notifyErr <- er.Errorf("reliableSender attempted to send the "+
- "message to the wrong peer: expected %x got %x",
- remoteKey.SerializeCompressed(),
- targetPub)
- }
-
- peerChan <- remotePeer
- }
-
- // With our channel announcements created, we'll now send them all to
- // the gossiper in order for it to process. However, we'll hold back
- // the channel ann proof from the first channel in order to have it be
- // marked as private channel.
- firstChanID := channelsToAnnounce[0].localChanAnn.ShortChannelID
- for i, batch := range channelsToAnnounce {
- // channelPoint ensures that each channel policy in the map
- // returned by PropagateChanPolicyUpdate has a unique key. Since
- // the map is keyed by wire.OutPoint, we want to ensure that
- // each channel has a unique channel point.
- channelPoint := ChannelPoint(wire.OutPoint{Index: uint32(i)})
-
- sendLocalMsg(t, ctx, batch.localChanAnn, localKey, channelPoint)
- sendLocalMsg(t, ctx, batch.chanUpdAnn1, localKey)
- sendLocalMsg(t, ctx, batch.nodeAnn1, localKey)
-
- sendRemoteMsg(t, ctx, batch.chanUpdAnn2, remotePeer)
- sendRemoteMsg(t, ctx, batch.nodeAnn2, remotePeer)
-
- // We'll skip sending the auth proofs from the first channel to
- // ensure that it's seen as a private channel.
- if batch.localChanAnn.ShortChannelID == firstChanID {
- continue
- }
-
- sendLocalMsg(t, ctx, batch.localProofAnn, localKey)
- sendRemoteMsg(t, ctx, batch.remoteProofAnn, remotePeer)
- }
-
- // Drain out any broadcast or direct messages we might not have read up
- // to this point. We'll also check out notifyErr to detect if the
- // reliable sender had an issue sending to the remote peer.
-out:
- for {
- select {
- case <-ctx.broadcastedMessage:
- case <-sentMsgs:
- case err := <-notifyErr:
- t.Fatal(err)
- default:
- break out
- }
- }
-
- // Now that all of our channels are loaded, we'll attempt to update the
- // policy of all of them.
- const newTimeLockDelta = 100
- var edgesToUpdate []EdgeWithInfo
- err = ctx.router.ForAllOutgoingChannels(func(
- info *channeldb.ChannelEdgeInfo,
- edge *channeldb.ChannelEdgePolicy) er.R {
-
- edge.TimeLockDelta = uint16(newTimeLockDelta)
- edgesToUpdate = append(edgesToUpdate, EdgeWithInfo{
- Info: info,
- Edge: edge,
- })
-
- return nil
- })
- if err != nil {
- t.Fatal(err)
- }
-
- err = ctx.gossiper.PropagateChanPolicyUpdate(edgesToUpdate)
- if err != nil {
- t.Fatalf("unable to chan policies: %v", err)
- }
-
- // Two channel updates should now be broadcast, with neither of them
- // being the channel our first private channel.
- for i := 0; i < numChannels-1; i++ {
- assertBroadcastMsg(t, ctx, func(msg lnwire.Message) er.R {
- upd, ok := msg.(*lnwire.ChannelUpdate)
- if !ok {
- return er.Errorf("channel update not "+
- "broadcast, instead %T was", msg)
- }
-
- if upd.ShortChannelID == firstChanID {
- return er.Errorf("private channel upd " +
- "broadcast")
- }
- if upd.TimeLockDelta != newTimeLockDelta {
- return er.Errorf("wrong delta: expected %v, "+
- "got %v", newTimeLockDelta,
- upd.TimeLockDelta)
- }
-
- return nil
- })
- }
-
- // Finally the ChannelUpdate should have been sent directly to the
- // remote peer via the reliable sender.
- select {
- case msg := <-sentMsgs:
- upd, ok := msg.(*lnwire.ChannelUpdate)
- if !ok {
- t.Fatalf("channel update not "+
- "broadcast, instead %T was", msg)
- }
- if upd.TimeLockDelta != newTimeLockDelta {
- t.Fatalf("wrong delta: expected %v, "+
- "got %v", newTimeLockDelta,
- upd.TimeLockDelta)
- }
- if upd.ShortChannelID != firstChanID {
- t.Fatalf("private channel upd " +
- "broadcast")
- }
- case <-time.After(time.Second * 5):
- t.Fatalf("message not sent directly to peer")
- }
-
- // At this point, no other ChannelUpdate messages should be broadcast
- // as we sent the two public ones to the network, and the private one
- // was sent directly to the peer.
- for {
- select {
- case msg := <-ctx.broadcastedMessage:
- if upd, ok := msg.msg.(*lnwire.ChannelUpdate); ok {
- if upd.ShortChannelID == firstChanID {
- t.Fatalf("chan update msg received: %v",
- spew.Sdump(msg))
- }
- }
- default:
- return
- }
- }
-}
-
-// TestProcessChannelAnnouncementOptionalMsgFields ensures that the gossiper can
-// properly handled optional message fields provided by the caller when
-// processing a channel announcement.
-func TestProcessChannelAnnouncementOptionalMsgFields(t *testing.T) {
- t.Parallel()
-
- // We'll start by creating our test context and a set of test channel
- // announcements.
- ctx, cleanup, err := createTestCtx(0)
- if err != nil {
- t.Fatalf("unable to create test context: %v", err)
- }
- defer cleanup()
-
- chanAnn1 := createAnnouncementWithoutProof(100)
- chanAnn2 := createAnnouncementWithoutProof(101)
- localKey := nodeKeyPriv1.PubKey()
-
- // assertOptionalMsgFields is a helper closure that ensures the optional
- // message fields were set as intended.
- assertOptionalMsgFields := func(chanID lnwire.ShortChannelID,
- capacity btcutil.Amount, channelPoint wire.OutPoint) {
-
- t.Helper()
-
- edge, _, _, err := ctx.router.GetChannelByID(chanID)
- if err != nil {
- t.Fatalf("unable to get channel by id: %v", err)
- }
- if edge.Capacity != capacity {
- t.Fatalf("expected capacity %v, got %v", capacity,
- edge.Capacity)
- }
- if edge.ChannelPoint != channelPoint {
- t.Fatalf("expected channel point %v, got %v",
- channelPoint, edge.ChannelPoint)
- }
- }
-
- // We'll process the first announcement without any optional fields. We
- // should see the channel's capacity and outpoint have a zero value.
- sendLocalMsg(t, ctx, chanAnn1, localKey)
- assertOptionalMsgFields(chanAnn1.ShortChannelID, 0, wire.OutPoint{})
-
- // Providing the capacity and channel point as optional fields should
- // propagate them all the way down to the router.
- capacity := btcutil.Amount(1000)
- channelPoint := wire.OutPoint{Index: 1}
- sendLocalMsg(
- t, ctx, chanAnn2, localKey, ChannelCapacity(capacity),
- ChannelPoint(channelPoint),
- )
- assertOptionalMsgFields(chanAnn2.ShortChannelID, capacity, channelPoint)
-}
-
-func assertMessage(t *testing.T, expected, got lnwire.Message) {
- t.Helper()
-
- if !reflect.DeepEqual(expected, got) {
- t.Fatalf("expected: %v\ngot: %v", spew.Sdump(expected),
- spew.Sdump(got))
- }
-}
-
-// TestSplitAnnouncementsCorrectSubBatches checks that we split a given
-// sizes of announcement list into the correct number of batches.
-func TestSplitAnnouncementsCorrectSubBatches(t *testing.T) {
- t.Parallel()
-
- const subBatchSize = 10
-
- announcementBatchSizes := []int{2, 5, 20, 45, 80, 100, 1005}
- expectedNumberMiniBatches := []int{1, 1, 2, 5, 8, 10, 101}
-
- lengthAnnouncementBatchSizes := len(announcementBatchSizes)
- lengthExpectedNumberMiniBatches := len(expectedNumberMiniBatches)
-
- if lengthAnnouncementBatchSizes != lengthExpectedNumberMiniBatches {
- t.Fatal("Length of announcementBatchSizes and " +
- "expectedNumberMiniBatches should be equal")
- }
-
- for testIndex := range announcementBatchSizes {
- var batchSize = announcementBatchSizes[testIndex]
- announcementBatch := make([]msgWithSenders, batchSize)
-
- splitAnnouncementBatch := splitAnnouncementBatches(
- subBatchSize, announcementBatch,
- )
-
- lengthMiniBatches := len(splitAnnouncementBatch)
-
- if lengthMiniBatches != expectedNumberMiniBatches[testIndex] {
- t.Fatalf("Expecting %d mini batches, actual %d",
- expectedNumberMiniBatches[testIndex], lengthMiniBatches)
- }
- }
-}
-
-func assertCorrectSubBatchSize(t *testing.T, expectedSubBatchSize,
- actualSubBatchSize int) {
-
- t.Helper()
-
- if actualSubBatchSize != expectedSubBatchSize {
- t.Fatalf("Expecting subBatch size of %d, actual %d",
- expectedSubBatchSize, actualSubBatchSize)
- }
-}
-
-// TestCalculateCorrectSubBatchSize checks that we check the correct
-// sub batch size for each of the input vectors of batch sizes.
-func TestCalculateCorrectSubBatchSizes(t *testing.T) {
- t.Parallel()
-
- const minimumSubBatchSize = 10
- const batchDelay = time.Duration(100)
- const subBatchDelay = time.Duration(10)
-
- batchSizes := []int{2, 200, 250, 305, 352, 10010, 1000001}
- expectedSubBatchSize := []int{10, 20, 25, 31, 36, 1001, 100001}
-
- for testIndex := range batchSizes {
- batchSize := batchSizes[testIndex]
- expectedBatchSize := expectedSubBatchSize[testIndex]
-
- actualSubBatchSize := calculateSubBatchSize(
- batchDelay, subBatchDelay, minimumSubBatchSize, batchSize,
- )
-
- assertCorrectSubBatchSize(t, expectedBatchSize, actualSubBatchSize)
- }
-}
-
-// TestCalculateCorrectSubBatchSizesDifferentDelay checks that we check the
-// correct sub batch size for each of different delay.
-func TestCalculateCorrectSubBatchSizesDifferentDelay(t *testing.T) {
- t.Parallel()
-
- const batchSize = 100
- const minimumSubBatchSize = 10
-
- batchDelays := []time.Duration{100, 50, 20, 25, 5, 0}
- const subBatchDelay = 10
-
- expectedSubBatchSize := []int{10, 20, 50, 40, 100, 100}
-
- for testIndex := range batchDelays {
- batchDelay := batchDelays[testIndex]
- expectedBatchSize := expectedSubBatchSize[testIndex]
-
- actualSubBatchSize := calculateSubBatchSize(
- batchDelay, subBatchDelay, minimumSubBatchSize, batchSize,
- )
-
- assertCorrectSubBatchSize(t, expectedBatchSize, actualSubBatchSize)
- }
-}
-
-// TestBroadcastAnnsAfterGraphSynced ensures that we only broadcast
-// announcements after the graph has been considered as synced, i.e., after our
-// initial historical sync has completed.
-func TestBroadcastAnnsAfterGraphSynced(t *testing.T) {
- t.Parallel()
-
- ctx, cleanup, err := createTestCtx(10)
- if err != nil {
- t.Fatalf("can't create context: %v", err)
- }
- defer cleanup()
-
- // We'll mark the graph as not synced. This should prevent us from
- // broadcasting any messages we've received as part of our initial
- // historical sync.
- ctx.gossiper.syncMgr.markGraphSyncing()
-
- assertBroadcast := func(msg lnwire.Message, isRemote bool,
- shouldBroadcast bool) {
-
- t.Helper()
-
- nodePeer := &mockPeer{nodeKeyPriv1.PubKey(), nil, nil}
- var errChan chan er.R
- if isRemote {
- errChan = ctx.gossiper.ProcessRemoteAnnouncement(
- msg, nodePeer,
- )
- } else {
- errChan = ctx.gossiper.ProcessLocalAnnouncement(
- msg, nodePeer.pk,
- )
- }
-
- select {
- case err := <-errChan:
- if err != nil {
- t.Fatalf("unable to process gossip message: %v",
- err)
- }
- case <-time.After(2 * time.Second):
- t.Fatal("gossip message not processed")
- }
-
- select {
- case <-ctx.broadcastedMessage:
- if !shouldBroadcast {
- t.Fatal("gossip message was broadcast")
- }
- case <-time.After(2 * trickleDelay):
- if shouldBroadcast {
- t.Fatal("gossip message wasn't broadcast")
- }
- }
- }
-
- // A remote channel announcement should not be broadcast since the graph
- // has not yet been synced.
- chanAnn1, err := createRemoteChannelAnnouncement(0)
- if err != nil {
- t.Fatalf("unable to create channel announcement: %v", err)
- }
- assertBroadcast(chanAnn1, true, false)
-
- // A local channel announcement should be broadcast though, regardless
- // of whether we've synced our graph or not.
- chanUpd, err := createUpdateAnnouncement(0, 0, nodeKeyPriv1, 1)
- if err != nil {
- t.Fatalf("unable to create channel announcement: %v", err)
- }
- assertBroadcast(chanUpd, false, true)
-
- // Mark the graph as synced, which should allow the channel announcement
- // should to be broadcast.
- ctx.gossiper.syncMgr.markGraphSynced()
-
- chanAnn2, err := createRemoteChannelAnnouncement(1)
- if err != nil {
- t.Fatalf("unable to create channel announcement: %v", err)
- }
- assertBroadcast(chanAnn2, true, true)
-}
diff --git a/lnd/discovery/message_store.go b/lnd/discovery/message_store.go
deleted file mode 100644
index 0c497731..00000000
--- a/lnd/discovery/message_store.go
+++ /dev/null
@@ -1,299 +0,0 @@
-package discovery
-
-import (
- "bytes"
- "encoding/binary"
-
- "github.com/pkt-cash/pktd/btcutil/er"
- "github.com/pkt-cash/pktd/lnd/channeldb"
- "github.com/pkt-cash/pktd/lnd/channeldb/kvdb"
- "github.com/pkt-cash/pktd/lnd/lnwire"
-)
-
-var (
- // messageStoreBucket is a key used to create a top level bucket in the
- // gossiper database, used for storing messages that are to be sent to
- // peers. Upon restarts, these messages will be read and resent to their
- // respective peers.
- //
- // maps:
- // pubKey (33 bytes) + msgShortChanID (8 bytes) + msgType (2 bytes) -> msg
- messageStoreBucket = []byte("message-store")
-
- // ErrUnsupportedMessage is an error returned when we attempt to add a
- // message to the store that is not supported.
- ErrUnsupportedMessage = Err.CodeWithDetail("ErrUnsupportedMessage", "unsupported message type")
-
- // ErrCorruptedMessageStore indicates that the on-disk bucketing
- // structure has altered since the gossip message store instance was
- // initialized.
- ErrCorruptedMessageStore = Err.CodeWithDetail("ErrCorruptedMessageStore", "gossip message store has been "+
- "corrupted")
-)
-
-// GossipMessageStore is a store responsible for storing gossip messages which
-// we should reliably send to our peers.
-type GossipMessageStore interface {
- // AddMessage adds a message to the store for this peer.
- AddMessage(lnwire.Message, [33]byte) er.R
-
- // DeleteMessage deletes a message from the store for this peer.
- DeleteMessage(lnwire.Message, [33]byte) er.R
-
- // Messages returns the total set of messages that exist within the
- // store for all peers.
- Messages() (map[[33]byte][]lnwire.Message, er.R)
-
- // Peers returns the public key of all peers with messages within the
- // store.
- Peers() (map[[33]byte]struct{}, er.R)
-
- // MessagesForPeer returns the set of messages that exists within the
- // store for the given peer.
- MessagesForPeer([33]byte) ([]lnwire.Message, er.R)
-}
-
-// MessageStore is an implementation of the GossipMessageStore interface backed
-// by a channeldb instance. By design, this store will only keep the latest
-// version of a message (like in the case of multiple ChannelUpdate's) for a
-// channel with a peer.
-type MessageStore struct {
- db *channeldb.DB
-}
-
-// A compile-time assertion to ensure messageStore implements the
-// GossipMessageStore interface.
-var _ GossipMessageStore = (*MessageStore)(nil)
-
-// NewMessageStore creates a new message store backed by a channeldb instance.
-func NewMessageStore(db *channeldb.DB) (*MessageStore, er.R) {
- err := kvdb.Batch(db.Backend, func(tx kvdb.RwTx) er.R {
- _, err := tx.CreateTopLevelBucket(messageStoreBucket)
- return err
- })
- if err != nil {
- return nil, er.Errorf("unable to create required buckets: %v",
- err)
- }
-
- return &MessageStore{db}, nil
-}
-
-// msgShortChanID retrieves the short channel ID of the message.
-func msgShortChanID(msg lnwire.Message) (lnwire.ShortChannelID, er.R) {
- var shortChanID lnwire.ShortChannelID
- switch msg := msg.(type) {
- case *lnwire.AnnounceSignatures:
- shortChanID = msg.ShortChannelID
- case *lnwire.ChannelUpdate:
- shortChanID = msg.ShortChannelID
- default:
- return shortChanID, ErrUnsupportedMessage.Default()
- }
-
- return shortChanID, nil
-}
-
-// messageStoreKey constructs the database key for the message to be stored.
-func messageStoreKey(msg lnwire.Message, peerPubKey [33]byte) ([]byte, er.R) {
- shortChanID, err := msgShortChanID(msg)
- if err != nil {
- return nil, err
- }
-
- var k [33 + 8 + 2]byte
- copy(k[:33], peerPubKey[:])
- binary.BigEndian.PutUint64(k[33:41], shortChanID.ToUint64())
- binary.BigEndian.PutUint16(k[41:43], uint16(msg.MsgType()))
-
- return k[:], nil
-}
-
-// AddMessage adds a message to the store for this peer.
-func (s *MessageStore) AddMessage(msg lnwire.Message, peerPubKey [33]byte) er.R {
- // Construct the key for which we'll find this message with in the store.
- msgKey, err := messageStoreKey(msg, peerPubKey)
- if err != nil {
- return err
- }
-
- // Serialize the message with its wire encoding.
- var b bytes.Buffer
- if _, err := lnwire.WriteMessage(&b, msg, 0); err != nil {
- return err
- }
-
- return kvdb.Batch(s.db.Backend, func(tx kvdb.RwTx) er.R {
- messageStore := tx.ReadWriteBucket(messageStoreBucket)
- if messageStore == nil {
- return ErrCorruptedMessageStore.Default()
- }
-
- return messageStore.Put(msgKey, b.Bytes())
- })
-}
-
-// DeleteMessage deletes a message from the store for this peer.
-func (s *MessageStore) DeleteMessage(msg lnwire.Message,
- peerPubKey [33]byte) er.R {
-
- // Construct the key for which we'll find this message with in the
- // store.
- msgKey, err := messageStoreKey(msg, peerPubKey)
- if err != nil {
- return err
- }
-
- return kvdb.Batch(s.db.Backend, func(tx kvdb.RwTx) er.R {
- messageStore := tx.ReadWriteBucket(messageStoreBucket)
- if messageStore == nil {
- return ErrCorruptedMessageStore.Default()
- }
-
- // In the event that we're attempting to delete a ChannelUpdate
- // from the store, we'll make sure that we're actually deleting
- // the correct one as it can be overwritten.
- if msg, ok := msg.(*lnwire.ChannelUpdate); ok {
- // Deleting a value from a bucket that doesn't exist
- // acts as a NOP, so we'll return if a message doesn't
- // exist under this key.
- v := messageStore.Get(msgKey)
- if v == nil {
- return nil
- }
-
- dbMsg, err := lnwire.ReadMessage(bytes.NewReader(v), 0)
- if err != nil {
- return err
- }
-
- // If the timestamps don't match, then the update stored
- // should be the latest one, so we'll avoid deleting it.
- if msg.Timestamp != dbMsg.(*lnwire.ChannelUpdate).Timestamp {
- return nil
- }
- }
-
- return messageStore.Delete(msgKey)
- })
-}
-
-// readMessage reads a message from its serialized form and ensures its
-// supported by the current version of the message store.
-func readMessage(msgBytes []byte) (lnwire.Message, er.R) {
- msg, err := lnwire.ReadMessage(bytes.NewReader(msgBytes), 0)
- if err != nil {
- return nil, err
- }
-
- // Check if the message is supported by the store. We can reuse the
- // check for ShortChannelID as its a dependency on messages stored.
- if _, err := msgShortChanID(msg); err != nil {
- return nil, err
- }
-
- return msg, nil
-}
-
-// Messages returns the total set of messages that exist within the store for
-// all peers.
-func (s *MessageStore) Messages() (map[[33]byte][]lnwire.Message, er.R) {
- var msgs map[[33]byte][]lnwire.Message
- err := kvdb.View(s.db, func(tx kvdb.RTx) er.R {
- messageStore := tx.ReadBucket(messageStoreBucket)
- if messageStore == nil {
- return ErrCorruptedMessageStore.Default()
- }
-
- return messageStore.ForEach(func(k, v []byte) er.R {
- var pubKey [33]byte
- copy(pubKey[:], k[:33])
-
- // Deserialize the message from its raw bytes and filter
- // out any which are not currently supported by the
- // store.
- msg, err := readMessage(v)
- if ErrUnsupportedMessage.Is(err) {
- return nil
- }
- if err != nil {
- return err
- }
-
- msgs[pubKey] = append(msgs[pubKey], msg)
- return nil
- })
- }, func() {
- msgs = make(map[[33]byte][]lnwire.Message)
- })
- if err != nil {
- return nil, err
- }
-
- return msgs, nil
-}
-
-// MessagesForPeer returns the set of messages that exists within the store for
-// the given peer.
-func (s *MessageStore) MessagesForPeer(
- peerPubKey [33]byte) ([]lnwire.Message, er.R) {
-
- var msgs []lnwire.Message
- err := kvdb.View(s.db, func(tx kvdb.RTx) er.R {
- messageStore := tx.ReadBucket(messageStoreBucket)
- if messageStore == nil {
- return ErrCorruptedMessageStore.Default()
- }
-
- c := messageStore.ReadCursor()
- k, v := c.Seek(peerPubKey[:])
- for ; bytes.HasPrefix(k, peerPubKey[:]); k, v = c.Next() {
- // Deserialize the message from its raw bytes and filter
- // out any which are not currently supported by the
- // store.
- msg, err := readMessage(v)
- if ErrUnsupportedMessage.Is(err) {
- continue
- }
- if err != nil {
- return err
- }
-
- msgs = append(msgs, msg)
- }
-
- return nil
- }, func() {
- msgs = nil
- })
- if err != nil {
- return nil, err
- }
-
- return msgs, nil
-}
-
-// Peers returns the public key of all peers with messages within the store.
-func (s *MessageStore) Peers() (map[[33]byte]struct{}, er.R) {
- var peers map[[33]byte]struct{}
- err := kvdb.View(s.db, func(tx kvdb.RTx) er.R {
- messageStore := tx.ReadBucket(messageStoreBucket)
- if messageStore == nil {
- return ErrCorruptedMessageStore.Default()
- }
-
- return messageStore.ForEach(func(k, _ []byte) er.R {
- var pubKey [33]byte
- copy(pubKey[:], k[:33])
- peers[pubKey] = struct{}{}
- return nil
- })
- }, func() {
- peers = make(map[[33]byte]struct{})
- })
- if err != nil {
- return nil, err
- }
-
- return peers, nil
-}
diff --git a/lnd/discovery/message_store_test.go b/lnd/discovery/message_store_test.go
deleted file mode 100644
index 2df02803..00000000
--- a/lnd/discovery/message_store_test.go
+++ /dev/null
@@ -1,352 +0,0 @@
-package discovery
-
-import (
- "bytes"
- "io/ioutil"
- "math/rand"
- "os"
- "reflect"
- "testing"
-
- "github.com/davecgh/go-spew/spew"
- "github.com/pkt-cash/pktd/btcec"
- "github.com/pkt-cash/pktd/btcutil/er"
- "github.com/pkt-cash/pktd/lnd/channeldb"
- "github.com/pkt-cash/pktd/lnd/channeldb/kvdb"
- "github.com/pkt-cash/pktd/lnd/lnwire"
-)
-
-func createTestMessageStore(t *testing.T) (*MessageStore, func()) {
- t.Helper()
-
- tempDir, errr := ioutil.TempDir("", "channeldb")
- if errr != nil {
- t.Fatalf("unable to create temp dir: %v", errr)
- }
- db, err := channeldb.Open(tempDir)
- if err != nil {
- os.RemoveAll(tempDir)
- t.Fatalf("unable to open db: %v", err)
- }
-
- cleanUp := func() {
- db.Close()
- os.RemoveAll(tempDir)
- }
-
- store, err := NewMessageStore(db)
- if err != nil {
- cleanUp()
- t.Fatalf("unable to initialize message store: %v", err)
- }
-
- return store, cleanUp
-}
-
-func randPubKey(t *testing.T) *btcec.PublicKey {
- priv, err := btcec.NewPrivateKey(btcec.S256())
- if err != nil {
- t.Fatalf("unable to create private key: %v", err)
- }
-
- return priv.PubKey()
-}
-
-func randCompressedPubKey(t *testing.T) [33]byte {
- t.Helper()
-
- pubKey := randPubKey(t)
-
- var compressedPubKey [33]byte
- copy(compressedPubKey[:], pubKey.SerializeCompressed())
-
- return compressedPubKey
-}
-
-func randAnnounceSignatures() *lnwire.AnnounceSignatures {
- return &lnwire.AnnounceSignatures{
- ShortChannelID: lnwire.NewShortChanIDFromInt(rand.Uint64()),
- }
-}
-
-func randChannelUpdate() *lnwire.ChannelUpdate {
- return &lnwire.ChannelUpdate{
- ShortChannelID: lnwire.NewShortChanIDFromInt(rand.Uint64()),
- }
-}
-
-// TestMessageStoreMessages ensures that messages can be properly queried from
-// the store.
-func TestMessageStoreMessages(t *testing.T) {
- t.Parallel()
-
- // We'll start by creating our test message store.
- msgStore, cleanUp := createTestMessageStore(t)
- defer cleanUp()
-
- // We'll then create some test messages for two test peers, and none for
- // an additional test peer.
- channelUpdate1 := randChannelUpdate()
- announceSignatures1 := randAnnounceSignatures()
- peer1 := randCompressedPubKey(t)
- if err := msgStore.AddMessage(channelUpdate1, peer1); err != nil {
- t.Fatalf("unable to add message: %v", err)
- }
- if err := msgStore.AddMessage(announceSignatures1, peer1); err != nil {
- t.Fatalf("unable to add message: %v", err)
- }
- expectedPeerMsgs1 := map[uint64]lnwire.MessageType{
- channelUpdate1.ShortChannelID.ToUint64(): channelUpdate1.MsgType(),
- announceSignatures1.ShortChannelID.ToUint64(): announceSignatures1.MsgType(),
- }
-
- channelUpdate2 := randChannelUpdate()
- peer2 := randCompressedPubKey(t)
- if err := msgStore.AddMessage(channelUpdate2, peer2); err != nil {
- t.Fatalf("unable to add message: %v", err)
- }
- expectedPeerMsgs2 := map[uint64]lnwire.MessageType{
- channelUpdate2.ShortChannelID.ToUint64(): channelUpdate2.MsgType(),
- }
-
- peer3 := randCompressedPubKey(t)
- expectedPeerMsgs3 := map[uint64]lnwire.MessageType{}
-
- // assertPeerMsgs is a helper closure that we'll use to ensure we
- // retrieve the correct set of messages for a given peer.
- assertPeerMsgs := func(peerMsgs []lnwire.Message,
- expected map[uint64]lnwire.MessageType) {
-
- t.Helper()
-
- if len(peerMsgs) != len(expected) {
- t.Fatalf("expected %d pending messages, got %d",
- len(expected), len(peerMsgs))
- }
- for _, msg := range peerMsgs {
- var shortChanID uint64
- switch msg := msg.(type) {
- case *lnwire.AnnounceSignatures:
- shortChanID = msg.ShortChannelID.ToUint64()
- case *lnwire.ChannelUpdate:
- shortChanID = msg.ShortChannelID.ToUint64()
- default:
- t.Fatalf("found unexpected message type %T", msg)
- }
-
- msgType, ok := expected[shortChanID]
- if !ok {
- t.Fatalf("retrieved message with unexpected ID "+
- "%d from store", shortChanID)
- }
- if msgType != msg.MsgType() {
- t.Fatalf("expected message of type %v, got %v",
- msg.MsgType(), msgType)
- }
- }
- }
-
- // Then, we'll query the store for the set of messages for each peer and
- // ensure it matches what we expect.
- peers := [][33]byte{peer1, peer2, peer3}
- expectedPeerMsgs := []map[uint64]lnwire.MessageType{
- expectedPeerMsgs1, expectedPeerMsgs2, expectedPeerMsgs3,
- }
- for i, peer := range peers {
- peerMsgs, err := msgStore.MessagesForPeer(peer)
- if err != nil {
- t.Fatalf("unable to retrieve messages: %v", err)
- }
- assertPeerMsgs(peerMsgs, expectedPeerMsgs[i])
- }
-
- // Finally, we'll query the store for all of its messages of every peer.
- // Again, each peer should have a set of messages that match what we
- // expect.
- //
- // We'll construct the expected response. Only the first two peers will
- // have messages.
- totalPeerMsgs := make(map[[33]byte]map[uint64]lnwire.MessageType, 2)
- for i := 0; i < 2; i++ {
- totalPeerMsgs[peers[i]] = expectedPeerMsgs[i]
- }
-
- msgs, err := msgStore.Messages()
- if err != nil {
- t.Fatalf("unable to retrieve all peers with pending messages: "+
- "%v", err)
- }
- if len(msgs) != len(totalPeerMsgs) {
- t.Fatalf("expected %d peers with messages, got %d",
- len(totalPeerMsgs), len(msgs))
- }
- for peer, peerMsgs := range msgs {
- expected, ok := totalPeerMsgs[peer]
- if !ok {
- t.Fatalf("expected to find pending messages for peer %x",
- peer)
- }
-
- assertPeerMsgs(peerMsgs, expected)
- }
-
- peerPubKeys, err := msgStore.Peers()
- if err != nil {
- t.Fatalf("unable to retrieve all peers with pending messages: "+
- "%v", err)
- }
- if len(peerPubKeys) != len(totalPeerMsgs) {
- t.Fatalf("expected %d peers with messages, got %d",
- len(totalPeerMsgs), len(peerPubKeys))
- }
- for peerPubKey := range peerPubKeys {
- if _, ok := totalPeerMsgs[peerPubKey]; !ok {
- t.Fatalf("expected to find peer %x", peerPubKey)
- }
- }
-}
-
-// TestMessageStoreUnsupportedMessage ensures that we are not able to add a
-// message which is unsupported, and if a message is found to be unsupported by
-// the current version of the store, that it is properly filtered out from the
-// response.
-func TestMessageStoreUnsupportedMessage(t *testing.T) {
- t.Parallel()
-
- // We'll start by creating our test message store.
- msgStore, cleanUp := createTestMessageStore(t)
- defer cleanUp()
-
- // Create a message that is known to not be supported by the store.
- peer := randCompressedPubKey(t)
- unsupportedMsg := &lnwire.Error{}
-
- // Attempting to add it to the store should result in
- // ErrUnsupportedMessage.
- err := msgStore.AddMessage(unsupportedMsg, peer)
- if !ErrUnsupportedMessage.Is(err) {
- t.Fatalf("expected ErrUnsupportedMessage, got %v", err)
- }
-
- // We'll now pretend that the message is actually supported in a future
- // version of the store, so it's able to be added successfully. To
- // replicate this, we'll add the message manually rather than through
- // the existing AddMessage method.
- msgKey := peer[:]
- var rawMsg bytes.Buffer
- if _, err := lnwire.WriteMessage(&rawMsg, unsupportedMsg, 0); err != nil {
- t.Fatalf("unable to serialize message: %v", err)
- }
- err = kvdb.Update(msgStore.db, func(tx kvdb.RwTx) er.R {
- messageStore := tx.ReadWriteBucket(messageStoreBucket)
- return messageStore.Put(msgKey, rawMsg.Bytes())
- }, func() {})
- if err != nil {
- t.Fatalf("unable to add unsupported message to store: %v", err)
- }
-
- // Finally, we'll check that the store can properly filter out messages
- // that are currently unknown to it. We'll make sure this is done for
- // both Messages and MessagesForPeer.
- totalMsgs, err := msgStore.Messages()
- if err != nil {
- t.Fatalf("unable to retrieve messages: %v", err)
- }
- if len(totalMsgs) != 0 {
- t.Fatalf("expected to filter out unsupported message")
- }
- peerMsgs, err := msgStore.MessagesForPeer(peer)
- if err != nil {
- t.Fatalf("unable to retrieve peer messages: %v", err)
- }
- if len(peerMsgs) != 0 {
- t.Fatalf("expected to filter out unsupported message")
- }
-}
-
-// TestMessageStoreDeleteMessage ensures that we can properly delete messages
-// from the store.
-func TestMessageStoreDeleteMessage(t *testing.T) {
- t.Parallel()
-
- msgStore, cleanUp := createTestMessageStore(t)
- defer cleanUp()
-
- // assertMsg is a helper closure we'll use to ensure a message
- // does/doesn't exist within the store.
- assertMsg := func(msg lnwire.Message, peer [33]byte, exists bool) {
- t.Helper()
-
- storeMsgs, err := msgStore.MessagesForPeer(peer)
- if err != nil {
- t.Fatalf("unable to retrieve messages: %v", err)
- }
-
- found := false
- for _, storeMsg := range storeMsgs {
- if reflect.DeepEqual(msg, storeMsg) {
- found = true
- }
- }
-
- if found != exists {
- str := "find"
- if !exists {
- str = "not find"
- }
- t.Fatalf("expected to %v message %v", str,
- spew.Sdump(msg))
- }
- }
-
- // An AnnounceSignatures message should exist within the store after
- // adding it, and should no longer exists after deleting it.
- peer := randCompressedPubKey(t)
- annSig := randAnnounceSignatures()
- if err := msgStore.AddMessage(annSig, peer); err != nil {
- t.Fatalf("unable to add message: %v", err)
- }
- assertMsg(annSig, peer, true)
- if err := msgStore.DeleteMessage(annSig, peer); err != nil {
- t.Fatalf("unable to delete message: %v", err)
- }
- assertMsg(annSig, peer, false)
-
- // The store allows overwriting ChannelUpdates, since there can be
- // multiple versions, so we'll test things slightly different.
- //
- // The ChannelUpdate message should exist within the store after adding
- // it.
- chanUpdate := randChannelUpdate()
- if err := msgStore.AddMessage(chanUpdate, peer); err != nil {
- t.Fatalf("unable to add message: %v", err)
- }
- assertMsg(chanUpdate, peer, true)
-
- // Now, we'll create a new version for the same ChannelUpdate message.
- // Adding this one to the store will overwrite the previous one, so only
- // the new one should exist.
- newChanUpdate := randChannelUpdate()
- newChanUpdate.ShortChannelID = chanUpdate.ShortChannelID
- newChanUpdate.Timestamp = chanUpdate.Timestamp + 1
- if err := msgStore.AddMessage(newChanUpdate, peer); err != nil {
- t.Fatalf("unable to add message: %v", err)
- }
- assertMsg(chanUpdate, peer, false)
- assertMsg(newChanUpdate, peer, true)
-
- // Deleting the older message should act as a NOP and should NOT delete
- // the newer version as the older no longer exists.
- if err := msgStore.DeleteMessage(chanUpdate, peer); err != nil {
- t.Fatalf("unable to delete message: %v", err)
- }
- assertMsg(chanUpdate, peer, false)
- assertMsg(newChanUpdate, peer, true)
-
- // The newer version should no longer exist within the store after
- // deleting it.
- if err := msgStore.DeleteMessage(newChanUpdate, peer); err != nil {
- t.Fatalf("unable to delete message: %v", err)
- }
- assertMsg(newChanUpdate, peer, false)
-}
diff --git a/lnd/discovery/mock_test.go b/lnd/discovery/mock_test.go
deleted file mode 100644
index 5297a3e5..00000000
--- a/lnd/discovery/mock_test.go
+++ /dev/null
@@ -1,146 +0,0 @@
-package discovery
-
-import (
- "net"
- "sync"
-
- "github.com/pkt-cash/pktd/btcec"
- "github.com/pkt-cash/pktd/btcutil/er"
- "github.com/pkt-cash/pktd/lnd/channeldb"
- "github.com/pkt-cash/pktd/lnd/lnpeer"
- "github.com/pkt-cash/pktd/lnd/lnwire"
- "github.com/pkt-cash/pktd/wire"
-)
-
-// mockPeer implements the lnpeer.Peer interface and is used to test the
-// gossiper's interaction with peers.
-type mockPeer struct {
- pk *btcec.PublicKey
- sentMsgs chan lnwire.Message
- quit chan struct{}
-}
-
-var _ lnpeer.Peer = (*mockPeer)(nil)
-
-func (p *mockPeer) SendMessage(_ bool, msgs ...lnwire.Message) er.R {
- if p.sentMsgs == nil && p.quit == nil {
- return nil
- }
-
- for _, msg := range msgs {
- select {
- case p.sentMsgs <- msg:
- case <-p.quit:
- return er.New("peer disconnected")
- }
- }
-
- return nil
-}
-
-func (p *mockPeer) SendMessageLazy(sync bool, msgs ...lnwire.Message) er.R {
- return p.SendMessage(sync, msgs...)
-}
-
-func (p *mockPeer) AddNewChannel(_ *channeldb.OpenChannel, _ <-chan struct{}) er.R {
- return nil
-}
-func (p *mockPeer) WipeChannel(_ *wire.OutPoint) {}
-func (p *mockPeer) IdentityKey() *btcec.PublicKey { return p.pk }
-func (p *mockPeer) PubKey() [33]byte {
- var pubkey [33]byte
- copy(pubkey[:], p.pk.SerializeCompressed())
- return pubkey
-}
-func (p *mockPeer) Address() net.Addr { return nil }
-func (p *mockPeer) QuitSignal() <-chan struct{} {
- return p.quit
-}
-func (p *mockPeer) LocalFeatures() *lnwire.FeatureVector {
- return nil
-}
-func (p *mockPeer) RemoteFeatures() *lnwire.FeatureVector {
- return nil
-}
-
-// mockMessageStore is an in-memory implementation of the MessageStore interface
-// used for the gossiper's unit tests.
-type mockMessageStore struct {
- sync.Mutex
- messages map[[33]byte]map[lnwire.Message]struct{}
-}
-
-func newMockMessageStore() *mockMessageStore {
- return &mockMessageStore{
- messages: make(map[[33]byte]map[lnwire.Message]struct{}),
- }
-}
-
-var _ GossipMessageStore = (*mockMessageStore)(nil)
-
-func (s *mockMessageStore) AddMessage(msg lnwire.Message, pubKey [33]byte) er.R {
- s.Lock()
- defer s.Unlock()
-
- if _, ok := s.messages[pubKey]; !ok {
- s.messages[pubKey] = make(map[lnwire.Message]struct{})
- }
-
- s.messages[pubKey][msg] = struct{}{}
-
- return nil
-}
-
-func (s *mockMessageStore) DeleteMessage(msg lnwire.Message, pubKey [33]byte) er.R {
- s.Lock()
- defer s.Unlock()
-
- peerMsgs, ok := s.messages[pubKey]
- if !ok {
- return nil
- }
-
- delete(peerMsgs, msg)
- return nil
-}
-
-func (s *mockMessageStore) Messages() (map[[33]byte][]lnwire.Message, er.R) {
- s.Lock()
- defer s.Unlock()
-
- msgs := make(map[[33]byte][]lnwire.Message, len(s.messages))
- for peer, peerMsgs := range s.messages {
- for msg := range peerMsgs {
- msgs[peer] = append(msgs[peer], msg)
- }
- }
- return msgs, nil
-}
-
-func (s *mockMessageStore) Peers() (map[[33]byte]struct{}, er.R) {
- s.Lock()
- defer s.Unlock()
-
- peers := make(map[[33]byte]struct{}, len(s.messages))
- for peer := range s.messages {
- peers[peer] = struct{}{}
- }
- return peers, nil
-}
-
-func (s *mockMessageStore) MessagesForPeer(pubKey [33]byte) ([]lnwire.Message, er.R) {
- s.Lock()
- defer s.Unlock()
-
- peerMsgs, ok := s.messages[pubKey]
- if !ok {
- return nil, nil
- }
-
- msgs := make([]lnwire.Message, 0, len(peerMsgs))
- for msg := range peerMsgs {
- msgs = append(msgs, msg)
- }
-
- return msgs, nil
-}
diff --git a/lnd/discovery/reliable_sender.go b/lnd/discovery/reliable_sender.go
deleted file mode 100644
index c08fcf10..00000000
--- a/lnd/discovery/reliable_sender.go
+++ /dev/null
@@ -1,332 +0,0 @@
-package discovery
-
-import (
- "sync"
-
- "github.com/pkt-cash/pktd/btcutil/er"
- "github.com/pkt-cash/pktd/lnd/lnpeer"
- "github.com/pkt-cash/pktd/lnd/lnwire"
- "github.com/pkt-cash/pktd/pktlog/log"
-)
-
-// reliableSenderCfg contains all of necessary items for the reliableSender to
-// carry out its duties.
-type reliableSenderCfg struct {
- // NotifyWhenOnline is a function that allows the gossiper to be
- // notified when a certain peer comes online, allowing it to
- // retry sending a peer message.
- //
- // NOTE: The peerChan channel must be buffered.
- NotifyWhenOnline func(peerPubKey [33]byte, peerChan chan<- lnpeer.Peer)
-
- // NotifyWhenOffline is a function that allows the gossiper to be
- // notified when a certain peer disconnects, allowing it to request a
- // notification for when it reconnects.
- NotifyWhenOffline func(peerPubKey [33]byte) <-chan struct{}
-
- // MessageStore is a persistent storage of gossip messages which we will
- // use to determine which messages need to be resent for a given peer.
- MessageStore GossipMessageStore
-
- // IsMsgStale determines whether a message retrieved from the backing
- // MessageStore is seen as stale by the current graph.
- IsMsgStale func(lnwire.Message) bool
-}
-
-// peerManager contains the set of channels required for the peerHandler to
-// properly carry out its duties.
-type peerManager struct {
- // msgs is the channel through which messages will be streamed to the
- // handler in order to send the message to the peer while they're
- // online.
- msgs chan lnwire.Message
-
- // done is a channel that will be closed to signal that the handler for
- // the given peer has been torn down for whatever reason.
- done chan struct{}
-}
-
-// reliableSender is a small subsystem of the gossiper used to reliably send
-// gossip messages to peers.
-type reliableSender struct {
- start sync.Once
- stop sync.Once
-
- cfg reliableSenderCfg
-
- // activePeers keeps track of whether a peerHandler exists for a given
- // peer. A peerHandler is tasked with handling requests for messages
- // that should be reliably sent to peers while also taking into account
- // the peer's connection lifecycle.
- activePeers map[[33]byte]peerManager
- activePeersMtx sync.Mutex
-
- wg sync.WaitGroup
- quit chan struct{}
-}
-
-// newReliableSender returns a new reliableSender backed by the given config.
-func newReliableSender(cfg *reliableSenderCfg) *reliableSender {
- return &reliableSender{
- cfg: *cfg,
- activePeers: make(map[[33]byte]peerManager),
- quit: make(chan struct{}),
- }
-}
-
-// Start spawns message handlers for any peers with pending messages.
-func (s *reliableSender) Start() er.R {
- var err er.R
- s.start.Do(func() {
- err = s.resendPendingMsgs()
- })
- return err
-}
-
-// Stop halts the reliable sender from sending messages to peers.
-func (s *reliableSender) Stop() {
- s.stop.Do(func() {
- close(s.quit)
- s.wg.Wait()
- })
-}
-
-// sendMessage constructs a request to send a message reliably to a peer. In the
-// event that the peer is currently offline, this will only write the message to
-// disk. Once the peer reconnects, this message, along with any others pending,
-// will be sent to the peer.
-func (s *reliableSender) sendMessage(msg lnwire.Message, peerPubKey [33]byte) er.R {
- // We'll start by persisting the message to disk. This allows us to
- // resend the message upon restarts and peer reconnections.
- if err := s.cfg.MessageStore.AddMessage(msg, peerPubKey); err != nil {
- return err
- }
-
- // Then, we'll spawn a peerHandler for this peer to handle resending its
- // pending messages while taking into account its connection lifecycle.
-spawnHandler:
- msgHandler, ok := s.spawnPeerHandler(peerPubKey)
-
- // If the handler wasn't previously active, we can exit now as we know
- // that the message will be sent once the peer online notification is
- // received. This prevents us from potentially sending the message
- // twice.
- if !ok {
- return nil
- }
-
- // Otherwise, we'll attempt to stream the message to the handler.
- // There's a subtle race condition where the handler can be torn down
- // due to all of the messages sent being stale, so we'll handle this
- // gracefully by spawning another one to prevent blocking.
- select {
- case msgHandler.msgs <- msg:
- case <-msgHandler.done:
- goto spawnHandler
- case <-s.quit:
- return ErrGossiperShuttingDown.Default()
- }
-
- return nil
-}
-
-// spawnPeerMsgHandler spawns a peerHandler for the given peer if there isn't
-// one already active. The boolean returned signals whether there was already
-// one active or not.
-func (s *reliableSender) spawnPeerHandler(peerPubKey [33]byte) (peerManager, bool) {
- s.activePeersMtx.Lock()
- defer s.activePeersMtx.Unlock()
-
- msgHandler, ok := s.activePeers[peerPubKey]
- if !ok {
- msgHandler = peerManager{
- msgs: make(chan lnwire.Message),
- done: make(chan struct{}),
- }
- s.activePeers[peerPubKey] = msgHandler
-
- s.wg.Add(1)
- go s.peerHandler(msgHandler, peerPubKey)
- }
-
- return msgHandler, ok
-}
-
-// peerHandler is responsible for handling our reliable message send requests
-// for a given peer while also taking into account the peer's connection
-// lifecycle. Any messages that are attempted to be sent while the peer is
-// offline will be queued and sent once the peer reconnects.
-//
-// NOTE: This must be run as a goroutine.
-func (s *reliableSender) peerHandler(peerMgr peerManager, peerPubKey [33]byte) {
- defer s.wg.Done()
-
- // We'll start by requesting a notification for when the peer
- // reconnects.
- peerChan := make(chan lnpeer.Peer, 1)
-
-waitUntilOnline:
- log.Debugf("Requesting online notification for peer=%x", peerPubKey)
-
- s.cfg.NotifyWhenOnline(peerPubKey, peerChan)
-
- var peer lnpeer.Peer
-out:
- for {
- select {
- // While we're waiting, we'll also consume any messages that
- // must be sent to prevent blocking the caller. These can be
- // ignored for now since the peer is currently offline. Once
- // they reconnect, the messages will be sent since they should
- // have been persisted to disk.
- case msg := <-peerMgr.msgs:
- // Retrieve the short channel ID for which this message
- // applies for logging purposes. The error can be
- // ignored as the store can only contain messages which
- // have a ShortChannelID field.
- shortChanID, _ := msgShortChanID(msg)
- log.Debugf("Received request to send %v message for "+
- "channel=%v while peer=%x is offline",
- msg.MsgType(), shortChanID, peerPubKey)
-
- case peer = <-peerChan:
- break out
-
- case <-s.quit:
- return
- }
- }
-
- log.Debugf("Peer=%x is now online, proceeding to send pending messages",
- peerPubKey)
-
- // Once we detect the peer has reconnected, we'll also request a
- // notification for when they disconnect. We'll use this to make sure
- // they haven't disconnected (in the case of a flappy peer, etc.) by the
- // time we attempt to send them the pending messages.
- log.Debugf("Requesting offline notification for peer=%x", peerPubKey)
-
- offlineChan := s.cfg.NotifyWhenOffline(peerPubKey)
-
- pendingMsgs, err := s.cfg.MessageStore.MessagesForPeer(peerPubKey)
- if err != nil {
- log.Errorf("Unable to retrieve pending messages for peer %x: %v",
- peerPubKey, err)
- return
- }
-
- // With the peer online, we can now proceed to send our pending messages
- // for them.
- for _, msg := range pendingMsgs {
- // Retrieve the short channel ID for which this message applies
- // for logging purposes. The error can be ignored as the store
- // can only contain messages which have a ShortChannelID field.
- shortChanID, _ := msgShortChanID(msg)
-
- // Ensure the peer is still online right before sending the
- // message.
- select {
- case <-offlineChan:
- goto waitUntilOnline
- default:
- }
-
- if err := peer.SendMessage(false, msg); err != nil {
- log.Errorf("Unable to send %v message for channel=%v "+
- "to %x: %v", msg.MsgType(), shortChanID,
- peerPubKey, err)
- goto waitUntilOnline
- }
-
- log.Debugf("Successfully sent %v message for channel=%v with "+
- "peer=%x upon reconnection", msg.MsgType(), shortChanID,
- peerPubKey)
-
- // Now that the message has at least been sent once, we can
- // check whether it's stale. This guarantees that
- // AnnounceSignatures are sent at least once if we happen to
- // already have signatures for both parties.
- if s.cfg.IsMsgStale(msg) {
- err := s.cfg.MessageStore.DeleteMessage(msg, peerPubKey)
- if err != nil {
- log.Errorf("Unable to remove stale %v message "+
- "for channel=%v with peer %x: %v",
- msg.MsgType(), shortChanID, peerPubKey,
- err)
- continue
- }
-
- log.Debugf("Removed stale %v message for channel=%v "+
- "with peer=%x", msg.MsgType(), shortChanID,
- peerPubKey)
- }
- }
-
- // If all of our messages were stale, then there's no need for this
- // handler to continue running, so we can exit now.
- pendingMsgs, err = s.cfg.MessageStore.MessagesForPeer(peerPubKey)
- if err != nil {
- log.Errorf("Unable to retrieve pending messages for peer %x: %v",
- peerPubKey, err)
- return
- }
-
- if len(pendingMsgs) == 0 {
- log.Debugf("No pending messages left for peer=%x", peerPubKey)
-
- s.activePeersMtx.Lock()
- delete(s.activePeers, peerPubKey)
- s.activePeersMtx.Unlock()
-
- close(peerMgr.done)
-
- return
- }
-
- // Once the pending messages are sent, we can continue to send any
- // future messages while the peer remains connected.
- for {
- select {
- case msg := <-peerMgr.msgs:
- // Retrieve the short channel ID for which this message
- // applies for logging purposes. The error can be
- // ignored as the store can only contain messages which
- // have a ShortChannelID field.
- shortChanID, _ := msgShortChanID(msg)
-
- if err := peer.SendMessage(false, msg); err != nil {
- log.Errorf("Unable to send %v message for "+
- "channel=%v to %x: %v", msg.MsgType(),
- shortChanID, peerPubKey, err)
- }
-
- log.Debugf("Successfully sent %v message for "+
- "channel=%v with peer=%x", msg.MsgType(),
- shortChanID, peerPubKey)
-
- case <-offlineChan:
- goto waitUntilOnline
-
- case <-s.quit:
- return
- }
- }
-}
-
-// resendPendingMsgs retrieves and sends all of the messages within the message
-// store that should be reliably sent to their respective peers.
-func (s *reliableSender) resendPendingMsgs() er.R {
- // Fetch all of the peers for which we have pending messages for and
- // spawn a peerMsgHandler for each. Once the peer is seen as online, all
- // of the pending messages will be sent.
- peers, err := s.cfg.MessageStore.Peers()
- if err != nil {
- return err
- }
-
- for peer := range peers {
- s.spawnPeerHandler(peer)
- }
-
- return nil
-}
diff --git a/lnd/discovery/reliable_sender_test.go b/lnd/discovery/reliable_sender_test.go
deleted file mode 100644
index 36032775..00000000
--- a/lnd/discovery/reliable_sender_test.go
+++ /dev/null
@@ -1,290 +0,0 @@
-package discovery
-
-import (
- "testing"
- "time"
-
- "github.com/davecgh/go-spew/spew"
- "github.com/pkt-cash/pktd/btcec"
- "github.com/pkt-cash/pktd/btcutil/er"
- "github.com/pkt-cash/pktd/lnd/lnpeer"
- "github.com/pkt-cash/pktd/lnd/lntest/wait"
- "github.com/pkt-cash/pktd/lnd/lnwire"
-)
-
-// newTestReliableSender creates a new reliable sender instance used for
-// testing.
-func newTestReliableSender(t *testing.T) *reliableSender {
- t.Helper()
-
- cfg := &reliableSenderCfg{
- NotifyWhenOnline: func(pubKey [33]byte,
- peerChan chan<- lnpeer.Peer) {
- pk, err := btcec.ParsePubKey(pubKey[:], btcec.S256())
- if err != nil {
- t.Fatalf("unable to parse pubkey: %v", err)
- }
- peerChan <- &mockPeer{pk: pk}
- },
- NotifyWhenOffline: func(_ [33]byte) <-chan struct{} {
- c := make(chan struct{}, 1)
- return c
- },
- MessageStore: newMockMessageStore(),
- IsMsgStale: func(lnwire.Message) bool {
- return false
- },
- }
-
- return newReliableSender(cfg)
-}
-
-// assertMsgsSent ensures that the given messages can be read from a mock peer's
-// msgChan.
-func assertMsgsSent(t *testing.T, msgChan chan lnwire.Message,
- msgs ...lnwire.Message) {
-
- t.Helper()
-
- m := make(map[lnwire.Message]struct{}, len(msgs))
- for _, msg := range msgs {
- m[msg] = struct{}{}
- }
-
- for i := 0; i < len(msgs); i++ {
- select {
- case msg := <-msgChan:
- if _, ok := m[msg]; !ok {
- t.Fatalf("found unexpected message sent: %v",
- spew.Sdump(msg))
- }
- case <-time.After(time.Second):
- t.Fatal("reliable sender did not send message to peer")
- }
- }
-}
-
-// TestReliableSenderFlow ensures that the flow for sending messages reliably to
-// a peer while taking into account its connection lifecycle works as expected.
-func TestReliableSenderFlow(t *testing.T) {
- t.Parallel()
-
- reliableSender := newTestReliableSender(t)
-
- // Create a mock peer to send the messages to.
- pubKey := randPubKey(t)
- msgsSent := make(chan lnwire.Message)
- peer := &mockPeer{pubKey, msgsSent, reliableSender.quit}
-
- // Override NotifyWhenOnline and NotifyWhenOffline to provide the
- // notification channels so that we can control when notifications get
- // dispatched.
- notifyOnline := make(chan chan<- lnpeer.Peer, 2)
- notifyOffline := make(chan chan struct{}, 1)
-
- reliableSender.cfg.NotifyWhenOnline = func(_ [33]byte,
- peerChan chan<- lnpeer.Peer) {
- notifyOnline <- peerChan
- }
- reliableSender.cfg.NotifyWhenOffline = func(_ [33]byte) <-chan struct{} {
- c := make(chan struct{}, 1)
- notifyOffline <- c
- return c
- }
-
- // We'll start by creating our first message which we should reliably
- // send to our peer.
- msg1 := randChannelUpdate()
- var peerPubKey [33]byte
- copy(peerPubKey[:], pubKey.SerializeCompressed())
- if err := reliableSender.sendMessage(msg1, peerPubKey); err != nil {
- t.Fatalf("unable to reliably send message: %v", err)
- }
-
- // Since there isn't a peerHandler for this peer currently active due to
- // this being the first message being sent reliably, we should expect to
- // see a notification request for when the peer is online.
- var peerChan chan<- lnpeer.Peer
- select {
- case peerChan = <-notifyOnline:
- case <-time.After(time.Second):
- t.Fatal("reliable sender did not request online notification")
- }
-
- // We'll then attempt to send another additional message reliably.
- msg2 := randAnnounceSignatures()
- if err := reliableSender.sendMessage(msg2, peerPubKey); err != nil {
- t.Fatalf("unable to reliably send message: %v", err)
- }
-
- // This should not however request another peer online notification as
- // the peerHandler has already been started and is waiting for the
- // notification to be dispatched.
- select {
- case <-notifyOnline:
- t.Fatal("reliable sender should not request online notification")
- case <-time.After(time.Second):
- }
-
- // We'll go ahead and notify the peer.
- peerChan <- peer
-
- // By doing so, we should expect to see a notification request for when
- // the peer is offline.
- var offlineChan chan struct{}
- select {
- case offlineChan = <-notifyOffline:
- case <-time.After(time.Second):
- t.Fatal("reliable sender did not request offline notification")
- }
-
- // We should also see the messages arrive at the peer since they are now
- // seen as online.
- assertMsgsSent(t, peer.sentMsgs, msg1, msg2)
-
- // Then, we'll send one more message reliably.
- msg3 := randChannelUpdate()
- if err := reliableSender.sendMessage(msg3, peerPubKey); err != nil {
- t.Fatalf("unable to reliably send message: %v", err)
- }
-
- // Again, this should not request another peer online notification
- // request since we are currently waiting for the peer to be offline.
- select {
- case <-notifyOnline:
- t.Fatal("reliable sender should not request online notification")
- case <-time.After(time.Second):
- }
-
- // The expected message should be sent to the peer.
- assertMsgsSent(t, peer.sentMsgs, msg3)
-
- // We'll then notify that the peer is offline.
- close(offlineChan)
-
- // This should cause an online notification to be requested.
- select {
- case peerChan = <-notifyOnline:
- case <-time.After(time.Second):
- t.Fatal("reliable sender did not request online notification")
- }
-
- // Once we dispatch it, we should expect to see the messages be resent
- // to the peer as they are not stale.
- peerChan <- peer
-
- select {
- case <-notifyOffline:
- case <-time.After(5 * time.Second):
- t.Fatal("reliable sender did not request offline notification")
- }
-
- assertMsgsSent(t, peer.sentMsgs, msg1, msg2, msg3)
-}
-
-// TestReliableSenderStaleMessages ensures that the reliable sender is no longer
-// active for a peer which has successfully sent all of its messages and deemed
-// them as stale.
-func TestReliableSenderStaleMessages(t *testing.T) {
- t.Parallel()
-
- reliableSender := newTestReliableSender(t)
-
- // Create a mock peer to send the messages to.
- pubKey := randPubKey(t)
- msgsSent := make(chan lnwire.Message)
- peer := &mockPeer{pubKey, msgsSent, reliableSender.quit}
-
- // Override NotifyWhenOnline to provide the notification channel so that
- // we can control when notifications get dispatched.
- notifyOnline := make(chan chan<- lnpeer.Peer, 1)
- reliableSender.cfg.NotifyWhenOnline = func(_ [33]byte,
- peerChan chan<- lnpeer.Peer) {
- notifyOnline <- peerChan
- }
-
- // We'll also override IsMsgStale to mark all messages as stale as we're
- // interested in testing the stale message behavior.
- reliableSender.cfg.IsMsgStale = func(_ lnwire.Message) bool {
- return true
- }
-
- // We'll start by creating our first message which we should reliably
- // send to our peer, but will be seen as stale.
- msg1 := randAnnounceSignatures()
- var peerPubKey [33]byte
- copy(peerPubKey[:], pubKey.SerializeCompressed())
- if err := reliableSender.sendMessage(msg1, peerPubKey); err != nil {
- t.Fatalf("unable to reliably send message: %v", err)
- }
-
- // Since there isn't a peerHandler for this peer currently active due to
- // this being the first message being sent reliably, we should expect to
- // see a notification request for when the peer is online.
- var peerChan chan<- lnpeer.Peer
- select {
- case peerChan = <-notifyOnline:
- case <-time.After(time.Second):
- t.Fatal("reliable sender did not request online notification")
- }
-
- // We'll go ahead and notify the peer.
- peerChan <- peer
-
- // This should cause the message to be sent to the peer since they are
- // now seen as online. The message will be sent at least once to ensure
- // they can propagate before deciding whether they are stale or not.
- assertMsgsSent(t, peer.sentMsgs, msg1)
-
- // We'll create another message which we'll send reliably. This one
- // won't be seen as stale.
- msg2 := randChannelUpdate()
-
- // We'll then wait for the message to be removed from the backing
- // message store since it is seen as stale and has been sent at least
- // once. Once the message is removed, the peerHandler should be torn
- // down as there are no longer any pending messages within the store.
- err := wait.NoError(func() er.R {
- msgs, err := reliableSender.cfg.MessageStore.MessagesForPeer(
- peerPubKey,
- )
- if err != nil {
- return er.Errorf("unable to retrieve messages for "+
- "peer: %v", err)
- }
- if len(msgs) != 0 {
- return er.Errorf("expected to not find any "+
- "messages for peer, found %d", len(msgs))
- }
-
- return nil
- }, time.Second)
- if err != nil {
- t.Fatal(err)
- }
-
- // Override IsMsgStale to no longer mark messages as stale.
- reliableSender.cfg.IsMsgStale = func(_ lnwire.Message) bool {
- return false
- }
-
- // We'll request the message to be sent reliably.
- if err := reliableSender.sendMessage(msg2, peerPubKey); err != nil {
- t.Fatalf("unable to reliably send message: %v", err)
- }
-
- // We should see an online notification request indicating that a new
- // peerHandler has been spawned since it was previously torn down.
- select {
- case peerChan = <-notifyOnline:
- case <-time.After(time.Second):
- t.Fatal("reliable sender did not request online notification")
- }
-
- // Finally, notifying the peer is online should prompt the message to be
- // sent. Only the ChannelUpdate will be sent in this case since the
- // AnnounceSignatures message above was seen as stale.
- peerChan <- peer
-
- assertMsgsSent(t, peer.sentMsgs, msg2)
-}
diff --git a/lnd/discovery/sync_manager.go b/lnd/discovery/sync_manager.go
deleted file mode 100644
index 68de9266..00000000
--- a/lnd/discovery/sync_manager.go
+++ /dev/null
@@ -1,702 +0,0 @@
-package discovery
-
-import (
- "sync"
- "sync/atomic"
- "time"
-
- "github.com/pkt-cash/pktd/btcutil/er"
- "github.com/pkt-cash/pktd/chaincfg/chainhash"
- "github.com/pkt-cash/pktd/lnd/lnpeer"
- "github.com/pkt-cash/pktd/lnd/lnwire"
- "github.com/pkt-cash/pktd/lnd/routing/route"
- "github.com/pkt-cash/pktd/lnd/ticker"
- "github.com/pkt-cash/pktd/pktlog/log"
-)
-
-const (
- // DefaultSyncerRotationInterval is the default interval in which we'll
- // rotate a single active syncer.
- DefaultSyncerRotationInterval = 20 * time.Minute
-
- // DefaultHistoricalSyncInterval is the default interval in which we'll
- // force a historical sync to ensure we have as much of the public
- // network as possible.
- DefaultHistoricalSyncInterval = time.Hour
-)
-
-var (
- // ErrSyncManagerExiting is an error returned when we attempt to
- // start/stop a gossip syncer for a connected/disconnected peer, but the
- // SyncManager has already been stopped.
- ErrSyncManagerExiting = Err.CodeWithDetail("ErrSyncManagerExiting", "sync manager exiting")
-)
-
-// newSyncer in an internal message we'll use within the SyncManager to signal
-// that we should create a GossipSyncer for a newly connected peer.
-type newSyncer struct {
- // peer is the newly connected peer.
- peer lnpeer.Peer
-
- // doneChan serves as a signal to the caller that the SyncManager's
- // internal state correctly reflects the stale active syncer.
- doneChan chan struct{}
-}
-
-// staleSyncer is an internal message we'll use within the SyncManager to signal
-// that a peer has disconnected and its GossipSyncer should be removed.
-type staleSyncer struct {
- // peer is the peer that has disconnected.
- peer route.Vertex
-
- // doneChan serves as a signal to the caller that the SyncManager's
- // internal state correctly reflects the stale active syncer. This is
- // needed to ensure we always create a new syncer for a flappy peer
- // after they disconnect if they happened to be an active syncer.
- doneChan chan struct{}
-}
-
-// SyncManagerCfg contains all of the dependencies required for the SyncManager
-// to carry out its duties.
-type SyncManagerCfg struct {
- // ChainHash is a hash that indicates the specific network of the active
- // chain.
- ChainHash chainhash.Hash
-
- // ChanSeries is an interface that provides access to a time series view
- // of the current known channel graph. Each GossipSyncer enabled peer
- // will utilize this in order to create and respond to channel graph
- // time series queries.
- ChanSeries ChannelGraphTimeSeries
-
- // NumActiveSyncers is the number of peers for which we should have
- // active syncers with. After reaching NumActiveSyncers, any future
- // gossip syncers will be passive.
- NumActiveSyncers int
-
- // RotateTicker is a ticker responsible for notifying the SyncManager
- // when it should rotate its active syncers. A single active syncer with
- // a chansSynced state will be exchanged for a passive syncer in order
- // to ensure we don't keep syncing with the same peers.
- RotateTicker ticker.Ticker
-
- // HistoricalSyncTicker is a ticker responsible for notifying the
- // SyncManager when it should attempt a historical sync with a gossip
- // sync peer.
- HistoricalSyncTicker ticker.Ticker
-
- // IgnoreHistoricalFilters will prevent syncers from replying with
- // historical data when the remote peer sets a gossip_timestamp_range.
- // This prevents ranges with old start times from causing us to dump the
- // graph on connect.
- IgnoreHistoricalFilters bool
-}
-
-// SyncManager is a subsystem of the gossiper that manages the gossip syncers
-// for peers currently connected. When a new peer is connected, the manager will
-// create its accompanying gossip syncer and determine whether it should have an
-// ActiveSync or PassiveSync sync type based on how many other gossip syncers
-// are currently active. Any ActiveSync gossip syncers are started in a
-// round-robin manner to ensure we're not syncing with multiple peers at the
-// same time. The first GossipSyncer registered with the SyncManager will
-// attempt a historical sync to ensure we have as much of the public channel
-// graph as possible.
-type SyncManager struct {
- // initialHistoricalSyncCompleted serves as a barrier when initializing
- // new active GossipSyncers. If 0, the initial historical sync has not
- // completed, so we'll defer initializing any active GossipSyncers. If
- // 1, then we can transition the GossipSyncer immediately. We set up
- // this barrier to ensure we have most of the graph before attempting to
- // accept new updates at tip.
- //
- // NOTE: This must be used atomically.
- initialHistoricalSyncCompleted int32
-
- start sync.Once
- stop sync.Once
-
- cfg SyncManagerCfg
-
- // newSyncers is a channel we'll use to process requests to create
- // GossipSyncers for newly connected peers.
- newSyncers chan *newSyncer
-
- // staleSyncers is a channel we'll use to process requests to tear down
- // GossipSyncers for disconnected peers.
- staleSyncers chan *staleSyncer
-
- // syncersMu guards the read and write access to the activeSyncers and
- // inactiveSyncers maps below.
- syncersMu sync.Mutex
-
- // activeSyncers is the set of all syncers for which we are currently
- // receiving graph updates from. The number of possible active syncers
- // is bounded by NumActiveSyncers.
- activeSyncers map[route.Vertex]*GossipSyncer
-
- // inactiveSyncers is the set of all syncers for which we are not
- // currently receiving new graph updates from.
- inactiveSyncers map[route.Vertex]*GossipSyncer
-
- wg sync.WaitGroup
- quit chan struct{}
-}
-
-// newSyncManager constructs a new SyncManager backed by the given config.
-func newSyncManager(cfg *SyncManagerCfg) *SyncManager {
- return &SyncManager{
- cfg: *cfg,
- newSyncers: make(chan *newSyncer),
- staleSyncers: make(chan *staleSyncer),
- activeSyncers: make(
- map[route.Vertex]*GossipSyncer, cfg.NumActiveSyncers,
- ),
- inactiveSyncers: make(map[route.Vertex]*GossipSyncer),
- quit: make(chan struct{}),
- }
-}
-
-// Start starts the SyncManager in order to properly carry out its duties.
-func (m *SyncManager) Start() {
- m.start.Do(func() {
- m.wg.Add(1)
- go m.syncerHandler()
- })
-}
-
-// Stop stops the SyncManager from performing its duties.
-func (m *SyncManager) Stop() {
- m.stop.Do(func() {
- close(m.quit)
- m.wg.Wait()
-
- for _, syncer := range m.inactiveSyncers {
- syncer.Stop()
- }
- for _, syncer := range m.activeSyncers {
- syncer.Stop()
- }
- })
-}
-
-// syncerHandler is the SyncManager's main event loop responsible for:
-//
-// 1. Creating and tearing down GossipSyncers for connected/disconnected peers.
-
-// 2. Finding new peers to receive graph updates from to ensure we don't only
-// receive them from the same set of peers.
-
-// 3. Finding new peers to force a historical sync with to ensure we have as
-// much of the public network as possible.
-//
-// NOTE: This must be run as a goroutine.
-func (m *SyncManager) syncerHandler() {
- defer m.wg.Done()
-
- m.cfg.RotateTicker.Resume()
- defer m.cfg.RotateTicker.Stop()
-
- m.cfg.HistoricalSyncTicker.Resume()
- defer m.cfg.HistoricalSyncTicker.Stop()
-
- var (
- // initialHistoricalSyncer is the syncer we are currently
- // performing an initial historical sync with.
- initialHistoricalSyncer *GossipSyncer
-
- // initialHistoricalSyncSignal is a signal that will fire once
- // the intiial historical sync has been completed. This is
- // crucial to ensure that another historical sync isn't
- // attempted just because the initialHistoricalSyncer was
- // disconnected.
- initialHistoricalSyncSignal chan struct{}
- )
-
- for {
- select {
- // A new peer has been connected, so we'll create its
- // accompanying GossipSyncer.
- case newSyncer := <-m.newSyncers:
- // If we already have a syncer, then we'll exit early as
- // we don't want to override it.
- if _, ok := m.GossipSyncer(newSyncer.peer.PubKey()); ok {
- close(newSyncer.doneChan)
- continue
- }
-
- s := m.createGossipSyncer(newSyncer.peer)
-
- // attemptHistoricalSync determines whether we should
- // attempt an initial historical sync when a new peer
- // connects.
- attemptHistoricalSync := false
-
- m.syncersMu.Lock()
- switch {
- // Regardless of whether the initial historical sync
- // has completed, we'll re-trigger a historical sync if
- // we no longer have any syncers. This might be
- // necessary if we lost all our peers at one point, and
- // now we finally have one again.
- case len(m.activeSyncers) == 0 &&
- len(m.inactiveSyncers) == 0:
-
- attemptHistoricalSync = true
- fallthrough
-
- // If we've exceeded our total number of active syncers,
- // we'll initialize this GossipSyncer as passive.
- case len(m.activeSyncers) >= m.cfg.NumActiveSyncers:
- fallthrough
-
- // If the initial historical sync has yet to complete,
- // then we'll declare it as passive and attempt to
- // transition it when the initial historical sync
- // completes.
- case !m.IsGraphSynced():
- s.setSyncType(PassiveSync)
- m.inactiveSyncers[s.cfg.peerPub] = s
-
- // The initial historical sync has completed, so we can
- // immediately start the GossipSyncer as active.
- default:
- s.setSyncType(ActiveSync)
- m.activeSyncers[s.cfg.peerPub] = s
- }
- m.syncersMu.Unlock()
-
- s.Start()
-
- // Once we create the GossipSyncer, we'll signal to the
- // caller that they can proceed since the SyncManager's
- // internal state has been updated.
- close(newSyncer.doneChan)
-
- // We'll force a historical sync with the first peer we
- // connect to, to ensure we get as much of the graph as
- // possible.
- if !attemptHistoricalSync {
- continue
- }
- m.markGraphSyncing()
-
- log.Debugf("Attempting initial historical sync with "+
- "GossipSyncer(%x)", s.cfg.peerPub)
-
- if err := s.historicalSync(); err != nil {
- log.Errorf("Unable to attempt initial "+
- "historical sync with "+
- "GossipSyncer(%x): %v", s.cfg.peerPub,
- err)
- continue
- }
-
- // Once the historical sync has started, we'll get a
- // keep track of the corresponding syncer to properly
- // handle disconnects. We'll also use a signal to know
- // when the historical sync completed.
- initialHistoricalSyncer = s
- initialHistoricalSyncSignal = s.ResetSyncedSignal()
-
- // An existing peer has disconnected, so we'll tear down its
- // corresponding GossipSyncer.
- case staleSyncer := <-m.staleSyncers:
- // Once the corresponding GossipSyncer has been stopped
- // and removed, we'll signal to the caller that they can
- // proceed since the SyncManager's internal state has
- // been updated.
- m.removeGossipSyncer(staleSyncer.peer)
- close(staleSyncer.doneChan)
-
- // If we don't have an initialHistoricalSyncer, or we do
- // but it is not the peer being disconnected, then we
- // have nothing left to do and can proceed.
- switch {
- case initialHistoricalSyncer == nil:
- fallthrough
- case staleSyncer.peer != initialHistoricalSyncer.cfg.peerPub:
- continue
- }
-
- // Otherwise, our initialHistoricalSyncer corresponds to
- // the peer being disconnected, so we'll have to find a
- // replacement.
- log.Debug("Finding replacement for intitial " +
- "historical sync")
-
- s := m.forceHistoricalSync()
- if s == nil {
- log.Debug("No eligible replacement found " +
- "for initial historical sync")
- continue
- }
-
- log.Debugf("Replaced initial historical "+
- "GossipSyncer(%v) with GossipSyncer(%x)",
- staleSyncer.peer, s.cfg.peerPub)
-
- initialHistoricalSyncer = s
- initialHistoricalSyncSignal = s.ResetSyncedSignal()
-
- // Our initial historical sync signal has completed, so we'll
- // nil all of the relevant fields as they're no longer needed.
- case <-initialHistoricalSyncSignal:
- initialHistoricalSyncer = nil
- initialHistoricalSyncSignal = nil
- m.markGraphSynced()
-
- log.Debug("Initial historical sync completed")
-
- // With the initial historical sync complete, we can
- // begin receiving new graph updates at tip. We'll
- // determine whether we can have any more active
- // GossipSyncers. If we do, we'll randomly select some
- // that are currently passive to transition.
- m.syncersMu.Lock()
- numActiveLeft := m.cfg.NumActiveSyncers - len(m.activeSyncers)
- if numActiveLeft <= 0 {
- m.syncersMu.Unlock()
- continue
- }
-
- log.Debugf("Attempting to transition %v passive "+
- "GossipSyncers to active", numActiveLeft)
-
- for i := 0; i < numActiveLeft; i++ {
- chooseRandomSyncer(
- m.inactiveSyncers, m.transitionPassiveSyncer,
- )
- }
-
- m.syncersMu.Unlock()
-
- // Our RotateTicker has ticked, so we'll attempt to rotate a
- // single active syncer with a passive one.
- case <-m.cfg.RotateTicker.Ticks():
- m.rotateActiveSyncerCandidate()
-
- // Our HistoricalSyncTicker has ticked, so we'll randomly select
- // a peer and force a historical sync with them.
- case <-m.cfg.HistoricalSyncTicker.Ticks():
- s := m.forceHistoricalSync()
-
- // If we don't have a syncer available or we've already
- // performed our initial historical sync, then we have
- // nothing left to do.
- if s == nil || m.IsGraphSynced() {
- continue
- }
-
- // Otherwise, we'll track the peer we've performed a
- // historical sync with in order to handle the case
- // where our previous historical sync peer did not
- // respond to our queries and we haven't ingested as
- // much of the graph as we should.
- initialHistoricalSyncer = s
- initialHistoricalSyncSignal = s.ResetSyncedSignal()
-
- case <-m.quit:
- return
- }
- }
-}
-
-// createGossipSyncer creates the GossipSyncer for a newly connected peer.
-func (m *SyncManager) createGossipSyncer(peer lnpeer.Peer) *GossipSyncer {
- nodeID := route.Vertex(peer.PubKey())
- log.Infof("Creating new GossipSyncer for peer=%x", nodeID[:])
-
- encoding := lnwire.EncodingSortedPlain
- s := newGossipSyncer(gossipSyncerCfg{
- chainHash: m.cfg.ChainHash,
- peerPub: nodeID,
- channelSeries: m.cfg.ChanSeries,
- encodingType: encoding,
- chunkSize: encodingTypeToChunkSize[encoding],
- batchSize: requestBatchSize,
- sendToPeer: func(msgs ...lnwire.Message) er.R {
- return peer.SendMessageLazy(false, msgs...)
- },
- sendToPeerSync: func(msgs ...lnwire.Message) er.R {
- return peer.SendMessageLazy(true, msgs...)
- },
- ignoreHistoricalFilters: m.cfg.IgnoreHistoricalFilters,
- })
-
- // Gossip syncers are initialized by default in a PassiveSync type
- // and chansSynced state so that they can reply to any peer queries or
- // handle any sync transitions.
- s.setSyncState(chansSynced)
- s.setSyncType(PassiveSync)
- return s
-}
-
-// removeGossipSyncer removes all internal references to the disconnected peer's
-// GossipSyncer and stops it. In the event of an active GossipSyncer being
-// disconnected, a passive GossipSyncer, if any, will take its place.
-func (m *SyncManager) removeGossipSyncer(peer route.Vertex) {
- m.syncersMu.Lock()
- defer m.syncersMu.Unlock()
-
- s, ok := m.gossipSyncer(peer)
- if !ok {
- return
- }
-
- log.Infof("Removing GossipSyncer for peer=%v", peer)
-
- // We'll stop the GossipSyncer for the disconnected peer in a goroutine
- // to prevent blocking the SyncManager.
- go s.Stop()
-
- // If it's a non-active syncer, then we can just exit now.
- if _, ok := m.inactiveSyncers[peer]; ok {
- delete(m.inactiveSyncers, peer)
- return
- }
-
- // Otherwise, we'll need find a new one to replace it, if any.
- delete(m.activeSyncers, peer)
- newActiveSyncer := chooseRandomSyncer(
- m.inactiveSyncers, m.transitionPassiveSyncer,
- )
- if newActiveSyncer == nil {
- return
- }
-
- log.Debugf("Replaced active GossipSyncer(%x) with GossipSyncer(%x)",
- peer, newActiveSyncer.cfg.peerPub)
-}
-
-// rotateActiveSyncerCandidate rotates a single active syncer. In order to
-// achieve this, the active syncer must be in a chansSynced state in order to
-// process the sync transition.
-func (m *SyncManager) rotateActiveSyncerCandidate() {
- m.syncersMu.Lock()
- defer m.syncersMu.Unlock()
-
- // If we couldn't find an eligible active syncer to rotate, we can
- // return early.
- activeSyncer := chooseRandomSyncer(m.activeSyncers, nil)
- if activeSyncer == nil {
- log.Debug("No eligible active syncer to rotate")
- return
- }
-
- // Similarly, if we don't have a candidate to rotate with, we can return
- // early as well.
- candidate := chooseRandomSyncer(m.inactiveSyncers, nil)
- if candidate == nil {
- log.Debug("No eligible candidate to rotate active syncer")
- return
- }
-
- // Otherwise, we'll attempt to transition each syncer to their
- // respective new sync type.
- log.Debugf("Rotating active GossipSyncer(%x) with GossipSyncer(%x)",
- activeSyncer.cfg.peerPub, candidate.cfg.peerPub)
-
- if err := m.transitionActiveSyncer(activeSyncer); err != nil {
- log.Errorf("Unable to transition active GossipSyncer(%x): %v",
- activeSyncer.cfg.peerPub, err)
- return
- }
-
- if err := m.transitionPassiveSyncer(candidate); err != nil {
- log.Errorf("Unable to transition passive GossipSyncer(%x): %v",
- activeSyncer.cfg.peerPub, err)
- return
- }
-}
-
-// transitionActiveSyncer transitions an active syncer to a passive one.
-//
-// NOTE: This must be called with the syncersMu lock held.
-func (m *SyncManager) transitionActiveSyncer(s *GossipSyncer) er.R {
- log.Debugf("Transitioning active GossipSyncer(%x) to passive",
- s.cfg.peerPub)
-
- if err := s.ProcessSyncTransition(PassiveSync); err != nil {
- return err
- }
-
- delete(m.activeSyncers, s.cfg.peerPub)
- m.inactiveSyncers[s.cfg.peerPub] = s
-
- return nil
-}
-
-// transitionPassiveSyncer transitions a passive syncer to an active one.
-//
-// NOTE: This must be called with the syncersMu lock held.
-func (m *SyncManager) transitionPassiveSyncer(s *GossipSyncer) er.R {
- log.Debugf("Transitioning passive GossipSyncer(%x) to active",
- s.cfg.peerPub)
-
- if err := s.ProcessSyncTransition(ActiveSync); err != nil {
- return err
- }
-
- delete(m.inactiveSyncers, s.cfg.peerPub)
- m.activeSyncers[s.cfg.peerPub] = s
-
- return nil
-}
-
-// forceHistoricalSync chooses a syncer with a remote peer at random and forces
-// a historical sync with it.
-func (m *SyncManager) forceHistoricalSync() *GossipSyncer {
- m.syncersMu.Lock()
- defer m.syncersMu.Unlock()
-
- // We'll sample from both sets of active and inactive syncers in the
- // event that we don't have any inactive syncers.
- return chooseRandomSyncer(m.gossipSyncers(), func(s *GossipSyncer) er.R {
- return s.historicalSync()
- })
-}
-
-// chooseRandomSyncer iterates through the set of syncers given and returns the
-// first one which was able to successfully perform the action enclosed in the
-// function closure.
-//
-// NOTE: It's possible for a nil value to be returned if there are no eligible
-// candidate syncers.
-func chooseRandomSyncer(syncers map[route.Vertex]*GossipSyncer,
- action func(*GossipSyncer) er.R) *GossipSyncer {
-
- for _, s := range syncers {
- // Only syncers in a chansSynced state are viable for sync
- // transitions, so skip any that aren't.
- if s.syncState() != chansSynced {
- continue
- }
-
- if action != nil {
- if err := action(s); err != nil {
- log.Debugf("Skipping eligible candidate "+
- "GossipSyncer(%x): %v", s.cfg.peerPub,
- err)
- continue
- }
- }
-
- return s
- }
-
- return nil
-}
-
-// InitSyncState is called by outside sub-systems when a connection is
-// established to a new peer that understands how to perform channel range
-// queries. We'll allocate a new GossipSyncer for it, and start any goroutines
-// needed to handle new queries. The first GossipSyncer registered with the
-// SyncManager will attempt a historical sync to ensure we have as much of the
-// public channel graph as possible.
-//
-// TODO(wilmer): Only mark as ActiveSync if this isn't a channel peer.
-func (m *SyncManager) InitSyncState(peer lnpeer.Peer) er.R {
- done := make(chan struct{})
-
- select {
- case m.newSyncers <- &newSyncer{
- peer: peer,
- doneChan: done,
- }:
- case <-m.quit:
- return ErrSyncManagerExiting.Default()
- }
-
- select {
- case <-done:
- return nil
- case <-m.quit:
- return ErrSyncManagerExiting.Default()
- }
-}
-
-// PruneSyncState is called by outside sub-systems once a peer that we were
-// previously connected to has been disconnected. In this case we can stop the
-// existing GossipSyncer assigned to the peer and free up resources.
-func (m *SyncManager) PruneSyncState(peer route.Vertex) {
- done := make(chan struct{})
-
- // We avoid returning an error when the SyncManager is stopped since the
- // GossipSyncer will be stopped then anyway.
- select {
- case m.staleSyncers <- &staleSyncer{
- peer: peer,
- doneChan: done,
- }:
- case <-m.quit:
- return
- }
-
- select {
- case <-done:
- case <-m.quit:
- }
-}
-
-// GossipSyncer returns the associated gossip syncer of a peer. The boolean
-// returned signals whether there exists a gossip syncer for the peer.
-func (m *SyncManager) GossipSyncer(peer route.Vertex) (*GossipSyncer, bool) {
- m.syncersMu.Lock()
- defer m.syncersMu.Unlock()
- return m.gossipSyncer(peer)
-}
-
-// gossipSyncer returns the associated gossip syncer of a peer. The boolean
-// returned signals whether there exists a gossip syncer for the peer.
-func (m *SyncManager) gossipSyncer(peer route.Vertex) (*GossipSyncer, bool) {
- syncer, ok := m.inactiveSyncers[peer]
- if ok {
- return syncer, true
- }
- syncer, ok = m.activeSyncers[peer]
- if ok {
- return syncer, true
- }
- return nil, false
-}
-
-// GossipSyncers returns all of the currently initialized gossip syncers.
-func (m *SyncManager) GossipSyncers() map[route.Vertex]*GossipSyncer {
- m.syncersMu.Lock()
- defer m.syncersMu.Unlock()
- return m.gossipSyncers()
-}
-
-// gossipSyncers returns all of the currently initialized gossip syncers.
-func (m *SyncManager) gossipSyncers() map[route.Vertex]*GossipSyncer {
- numSyncers := len(m.inactiveSyncers) + len(m.activeSyncers)
- syncers := make(map[route.Vertex]*GossipSyncer, numSyncers)
-
- for _, syncer := range m.inactiveSyncers {
- syncers[syncer.cfg.peerPub] = syncer
- }
- for _, syncer := range m.activeSyncers {
- syncers[syncer.cfg.peerPub] = syncer
- }
-
- return syncers
-}
-
-// markGraphSynced allows us to report that the initial historical sync has
-// completed.
-func (m *SyncManager) markGraphSynced() {
- atomic.StoreInt32(&m.initialHistoricalSyncCompleted, 1)
-}
-
-// markGraphSyncing allows us to report that the initial historical sync is
-// still undergoing.
-func (m *SyncManager) markGraphSyncing() {
- atomic.StoreInt32(&m.initialHistoricalSyncCompleted, 0)
-}
-
-// IsGraphSynced determines whether we've completed our initial historical sync.
-// The initial historical sync is done to ensure we've ingested as much of the
-// public graph as possible.
-func (m *SyncManager) IsGraphSynced() bool {
- return atomic.LoadInt32(&m.initialHistoricalSyncCompleted) == 1
-}
diff --git a/lnd/discovery/sync_manager_test.go b/lnd/discovery/sync_manager_test.go
deleted file mode 100644
index df29c738..00000000
--- a/lnd/discovery/sync_manager_test.go
+++ /dev/null
@@ -1,586 +0,0 @@
-package discovery
-
-import (
- "math"
- "reflect"
- "sync/atomic"
- "testing"
- "time"
-
- "github.com/davecgh/go-spew/spew"
- "github.com/pkt-cash/pktd/btcutil/er"
- "github.com/pkt-cash/pktd/lnd/lntest/wait"
- "github.com/pkt-cash/pktd/lnd/lnwire"
- "github.com/pkt-cash/pktd/lnd/ticker"
-)
-
-// randPeer creates a random peer.
-func randPeer(t *testing.T, quit chan struct{}) *mockPeer {
- t.Helper()
-
- return &mockPeer{
- pk: randPubKey(t),
- sentMsgs: make(chan lnwire.Message),
- quit: quit,
- }
-}
-
-// newTestSyncManager creates a new test SyncManager using mock implementations
-// of its dependencies.
-func newTestSyncManager(numActiveSyncers int) *SyncManager {
- hID := lnwire.ShortChannelID{BlockHeight: latestKnownHeight}
- return newSyncManager(&SyncManagerCfg{
- ChanSeries: newMockChannelGraphTimeSeries(hID),
- RotateTicker: ticker.NewForce(DefaultSyncerRotationInterval),
- HistoricalSyncTicker: ticker.NewForce(DefaultHistoricalSyncInterval),
- NumActiveSyncers: numActiveSyncers,
- })
-}
-
-// TestSyncManagerNumActiveSyncers ensures that we are unable to have more than
-// NumActiveSyncers active syncers.
-func TestSyncManagerNumActiveSyncers(t *testing.T) {
- t.Parallel()
-
- // We'll start by creating our test sync manager which will hold up to
- // 3 active syncers.
- const numActiveSyncers = 3
- const numSyncers = numActiveSyncers + 1
-
- syncMgr := newTestSyncManager(numActiveSyncers)
- syncMgr.Start()
- defer syncMgr.Stop()
-
- // We'll go ahead and create our syncers. We'll gather the ones which
- // should be active and passive to check them later on.
- for i := 0; i < numActiveSyncers; i++ {
- peer := randPeer(t, syncMgr.quit)
- syncMgr.InitSyncState(peer)
- s := assertSyncerExistence(t, syncMgr, peer)
-
- // The first syncer registered always attempts a historical
- // sync.
- if i == 0 {
- assertTransitionToChansSynced(t, s, peer)
- }
- assertActiveGossipTimestampRange(t, peer)
- assertSyncerStatus(t, s, chansSynced, ActiveSync)
- }
-
- for i := 0; i < numSyncers-numActiveSyncers; i++ {
- peer := randPeer(t, syncMgr.quit)
- syncMgr.InitSyncState(peer)
- s := assertSyncerExistence(t, syncMgr, peer)
- assertSyncerStatus(t, s, chansSynced, PassiveSync)
- }
-}
-
-// TestSyncManagerNewActiveSyncerAfterDisconnect ensures that we can regain an
-// active syncer after losing one due to the peer disconnecting.
-func TestSyncManagerNewActiveSyncerAfterDisconnect(t *testing.T) {
- t.Parallel()
-
- // We'll create our test sync manager to have two active syncers.
- syncMgr := newTestSyncManager(2)
- syncMgr.Start()
- defer syncMgr.Stop()
-
- // The first will be an active syncer that performs a historical sync
- // since it is the first one registered with the SyncManager.
- historicalSyncPeer := randPeer(t, syncMgr.quit)
- syncMgr.InitSyncState(historicalSyncPeer)
- historicalSyncer := assertSyncerExistence(t, syncMgr, historicalSyncPeer)
- assertTransitionToChansSynced(t, historicalSyncer, historicalSyncPeer)
- assertActiveGossipTimestampRange(t, historicalSyncPeer)
- assertSyncerStatus(t, historicalSyncer, chansSynced, ActiveSync)
-
- // Then, we'll create the second active syncer, which is the one we'll
- // disconnect.
- activeSyncPeer := randPeer(t, syncMgr.quit)
- syncMgr.InitSyncState(activeSyncPeer)
- activeSyncer := assertSyncerExistence(t, syncMgr, activeSyncPeer)
- assertActiveGossipTimestampRange(t, activeSyncPeer)
- assertSyncerStatus(t, activeSyncer, chansSynced, ActiveSync)
-
- // It will then be torn down to simulate a disconnection. Since there
- // are no other candidate syncers available, the active syncer won't be
- // replaced.
- syncMgr.PruneSyncState(activeSyncPeer.PubKey())
-
- // Then, we'll start our active syncer again, but this time we'll also
- // have a passive syncer available to replace the active syncer after
- // the peer disconnects.
- syncMgr.InitSyncState(activeSyncPeer)
- activeSyncer = assertSyncerExistence(t, syncMgr, activeSyncPeer)
- assertActiveGossipTimestampRange(t, activeSyncPeer)
- assertSyncerStatus(t, activeSyncer, chansSynced, ActiveSync)
-
- // Create our second peer, which should be initialized as a passive
- // syncer.
- newActiveSyncPeer := randPeer(t, syncMgr.quit)
- syncMgr.InitSyncState(newActiveSyncPeer)
- newActiveSyncer := assertSyncerExistence(t, syncMgr, newActiveSyncPeer)
- assertSyncerStatus(t, newActiveSyncer, chansSynced, PassiveSync)
-
- // Disconnect our active syncer, which should trigger the SyncManager to
- // replace it with our passive syncer.
- go syncMgr.PruneSyncState(activeSyncPeer.PubKey())
- assertPassiveSyncerTransition(t, newActiveSyncer, newActiveSyncPeer)
-}
-
-// TestSyncManagerRotateActiveSyncerCandidate tests that we can successfully
-// rotate our active syncers after a certain interval.
-func TestSyncManagerRotateActiveSyncerCandidate(t *testing.T) {
- t.Parallel()
-
- // We'll create our sync manager with three active syncers.
- syncMgr := newTestSyncManager(1)
- syncMgr.Start()
- defer syncMgr.Stop()
-
- // The first syncer registered always performs a historical sync.
- activeSyncPeer := randPeer(t, syncMgr.quit)
- syncMgr.InitSyncState(activeSyncPeer)
- activeSyncer := assertSyncerExistence(t, syncMgr, activeSyncPeer)
- assertTransitionToChansSynced(t, activeSyncer, activeSyncPeer)
- assertActiveGossipTimestampRange(t, activeSyncPeer)
- assertSyncerStatus(t, activeSyncer, chansSynced, ActiveSync)
-
- // We'll send a tick to force a rotation. Since there aren't any
- // candidates, none of the active syncers will be rotated.
- syncMgr.cfg.RotateTicker.(*ticker.Force).Force <- time.Time{}
- assertNoMsgSent(t, activeSyncPeer)
- assertSyncerStatus(t, activeSyncer, chansSynced, ActiveSync)
-
- // We'll then go ahead and add a passive syncer.
- passiveSyncPeer := randPeer(t, syncMgr.quit)
- syncMgr.InitSyncState(passiveSyncPeer)
- passiveSyncer := assertSyncerExistence(t, syncMgr, passiveSyncPeer)
- assertSyncerStatus(t, passiveSyncer, chansSynced, PassiveSync)
-
- // We'll force another rotation - this time, since we have a passive
- // syncer available, they should be rotated.
- syncMgr.cfg.RotateTicker.(*ticker.Force).Force <- time.Time{}
-
- // The transition from an active syncer to a passive syncer causes the
- // peer to send out a new GossipTimestampRange in the past so that they
- // don't receive new graph updates.
- assertActiveSyncerTransition(t, activeSyncer, activeSyncPeer)
-
- // The transition from a passive syncer to an active syncer causes the
- // peer to send a new GossipTimestampRange with the current timestamp to
- // signal that they would like to receive new graph updates from their
- // peers. This will also cause the gossip syncer to redo its state
- // machine, starting from its initial syncingChans state. We'll then
- // need to transition it to its final chansSynced state to ensure the
- // next syncer is properly started in the round-robin.
- assertPassiveSyncerTransition(t, passiveSyncer, passiveSyncPeer)
-}
-
-// TestSyncManagerInitialHistoricalSync ensures that we only attempt a single
-// historical sync during the SyncManager's startup. If the peer corresponding
-// to the initial historical syncer disconnects, we should attempt to find a
-// replacement.
-func TestSyncManagerInitialHistoricalSync(t *testing.T) {
- t.Parallel()
-
- syncMgr := newTestSyncManager(0)
-
- // The graph should not be considered as synced since the sync manager
- // has yet to start.
- if syncMgr.IsGraphSynced() {
- t.Fatal("expected graph to not be considered as synced")
- }
-
- syncMgr.Start()
- defer syncMgr.Stop()
-
- // We should expect to see a QueryChannelRange message with a
- // FirstBlockHeight of the genesis block, signaling that an initial
- // historical sync is being attempted.
- peer := randPeer(t, syncMgr.quit)
- syncMgr.InitSyncState(peer)
- assertMsgSent(t, peer, &lnwire.QueryChannelRange{
- FirstBlockHeight: 0,
- NumBlocks: math.MaxUint32,
- })
-
- // The graph should not be considered as synced since the initial
- // historical sync has not finished.
- if syncMgr.IsGraphSynced() {
- t.Fatal("expected graph to not be considered as synced")
- }
-
- // If an additional peer connects, then another historical sync should
- // not be attempted.
- finalHistoricalPeer := randPeer(t, syncMgr.quit)
- syncMgr.InitSyncState(finalHistoricalPeer)
- finalHistoricalSyncer := assertSyncerExistence(t, syncMgr, finalHistoricalPeer)
- assertNoMsgSent(t, finalHistoricalPeer)
-
- // If we disconnect the peer performing the initial historical sync, a
- // new one should be chosen.
- syncMgr.PruneSyncState(peer.PubKey())
-
- // Complete the initial historical sync by transitionining the syncer to
- // its final chansSynced state. The graph should be considered as synced
- // after the fact.
- assertTransitionToChansSynced(t, finalHistoricalSyncer, finalHistoricalPeer)
- if !syncMgr.IsGraphSynced() {
- t.Fatal("expected graph to be considered as synced")
- }
-
- // Once the initial historical sync has succeeded, another one should
- // not be attempted by disconnecting the peer who performed it.
- extraPeer := randPeer(t, syncMgr.quit)
- syncMgr.InitSyncState(extraPeer)
- assertNoMsgSent(t, extraPeer)
- syncMgr.PruneSyncState(finalHistoricalPeer.PubKey())
- assertNoMsgSent(t, extraPeer)
-}
-
-// TestSyncManagerHistoricalSyncOnReconnect tests that the sync manager will
-// re-trigger a historical sync when a new peer connects after a historical
-// sync has completed, but we have lost all peers.
-func TestSyncManagerHistoricalSyncOnReconnect(t *testing.T) {
- t.Parallel()
-
- syncMgr := newTestSyncManager(2)
- syncMgr.Start()
- defer syncMgr.Stop()
-
- // We should expect to see a QueryChannelRange message with a
- // FirstBlockHeight of the genesis block, signaling that an initial
- // historical sync is being attempted.
- peer := randPeer(t, syncMgr.quit)
- syncMgr.InitSyncState(peer)
- s := assertSyncerExistence(t, syncMgr, peer)
- assertTransitionToChansSynced(t, s, peer)
- assertActiveGossipTimestampRange(t, peer)
- assertSyncerStatus(t, s, chansSynced, ActiveSync)
-
- // Now that the historical sync is completed, we prune the syncer,
- // simulating all peers having disconnected.
- syncMgr.PruneSyncState(peer.PubKey())
-
- // If a new peer now connects, then another historical sync should
- // be attempted. This is to ensure we get an up-to-date graph if we
- // haven't had any peers for a time.
- nextPeer := randPeer(t, syncMgr.quit)
- syncMgr.InitSyncState(nextPeer)
- s1 := assertSyncerExistence(t, syncMgr, nextPeer)
- assertTransitionToChansSynced(t, s1, nextPeer)
- assertActiveGossipTimestampRange(t, nextPeer)
- assertSyncerStatus(t, s1, chansSynced, ActiveSync)
-}
-
-// TestSyncManagerForceHistoricalSync ensures that we can perform routine
-// historical syncs whenever the HistoricalSyncTicker fires.
-func TestSyncManagerForceHistoricalSync(t *testing.T) {
- t.Parallel()
-
- syncMgr := newTestSyncManager(0)
- syncMgr.Start()
- defer syncMgr.Stop()
-
- // We should expect to see a QueryChannelRange message with a
- // FirstBlockHeight of the genesis block, signaling that a historical
- // sync is being attempted.
- peer := randPeer(t, syncMgr.quit)
- syncMgr.InitSyncState(peer)
- assertMsgSent(t, peer, &lnwire.QueryChannelRange{
- FirstBlockHeight: 0,
- NumBlocks: math.MaxUint32,
- })
-
- // If an additional peer connects, then a historical sync should not be
- // attempted again.
- extraPeer := randPeer(t, syncMgr.quit)
- syncMgr.InitSyncState(extraPeer)
- assertNoMsgSent(t, extraPeer)
-
- // Then, we'll send a tick to force a historical sync. This should
- // trigger the extra peer to also perform a historical sync since the
- // first peer is not eligible due to not being in a chansSynced state.
- syncMgr.cfg.HistoricalSyncTicker.(*ticker.Force).Force <- time.Time{}
- assertMsgSent(t, extraPeer, &lnwire.QueryChannelRange{
- FirstBlockHeight: 0,
- NumBlocks: math.MaxUint32,
- })
-}
-
-// TestSyncManagerGraphSyncedAfterHistoricalSyncReplacement ensures that the
-// sync manager properly marks the graph as synced given that our initial
-// historical sync has stalled, but a replacement has fully completed.
-func TestSyncManagerGraphSyncedAfterHistoricalSyncReplacement(t *testing.T) {
- t.Parallel()
-
- syncMgr := newTestSyncManager(0)
- syncMgr.Start()
- defer syncMgr.Stop()
-
- // We should expect to see a QueryChannelRange message with a
- // FirstBlockHeight of the genesis block, signaling that an initial
- // historical sync is being attempted.
- peer := randPeer(t, syncMgr.quit)
- syncMgr.InitSyncState(peer)
- assertMsgSent(t, peer, &lnwire.QueryChannelRange{
- FirstBlockHeight: 0,
- NumBlocks: math.MaxUint32,
- })
-
- // The graph should not be considered as synced since the initial
- // historical sync has not finished.
- if syncMgr.IsGraphSynced() {
- t.Fatal("expected graph to not be considered as synced")
- }
-
- // If an additional peer connects, then another historical sync should
- // not be attempted.
- finalHistoricalPeer := randPeer(t, syncMgr.quit)
- syncMgr.InitSyncState(finalHistoricalPeer)
- finalHistoricalSyncer := assertSyncerExistence(t, syncMgr, finalHistoricalPeer)
- assertNoMsgSent(t, finalHistoricalPeer)
-
- // To simulate that our initial historical sync has stalled, we'll force
- // a historical sync with the new peer to ensure it is replaced.
- syncMgr.cfg.HistoricalSyncTicker.(*ticker.Force).Force <- time.Time{}
-
- // The graph should still not be considered as synced since the
- // replacement historical sync has not finished.
- if syncMgr.IsGraphSynced() {
- t.Fatal("expected graph to not be considered as synced")
- }
-
- // Complete the replacement historical sync by transitioning the syncer
- // to its final chansSynced state. The graph should be considered as
- // synced after the fact.
- assertTransitionToChansSynced(t, finalHistoricalSyncer, finalHistoricalPeer)
- if !syncMgr.IsGraphSynced() {
- t.Fatal("expected graph to be considered as synced")
- }
-}
-
-// TestSyncManagerWaitUntilInitialHistoricalSync ensures that no GossipSyncers
-// are initialized as ActiveSync until the initial historical sync has been
-// completed. Once it does, the pending GossipSyncers should be transitioned to
-// ActiveSync.
-func TestSyncManagerWaitUntilInitialHistoricalSync(t *testing.T) {
- t.Parallel()
-
- const numActiveSyncers = 2
-
- // We'll start by creating our test sync manager which will hold up to
- // 2 active syncers.
- syncMgr := newTestSyncManager(numActiveSyncers)
- syncMgr.Start()
- defer syncMgr.Stop()
-
- // We'll go ahead and create our syncers.
- peers := make([]*mockPeer, 0, numActiveSyncers)
- syncers := make([]*GossipSyncer, 0, numActiveSyncers)
- for i := 0; i < numActiveSyncers; i++ {
- peer := randPeer(t, syncMgr.quit)
- peers = append(peers, peer)
-
- syncMgr.InitSyncState(peer)
- s := assertSyncerExistence(t, syncMgr, peer)
- syncers = append(syncers, s)
-
- // The first one always attempts a historical sync. We won't
- // transition it to chansSynced to ensure the remaining syncers
- // aren't started as active.
- if i == 0 {
- assertSyncerStatus(t, s, syncingChans, PassiveSync)
- continue
- }
-
- // The rest should remain in a passive and chansSynced state,
- // and they should be queued to transition to active once the
- // initial historical sync is completed.
- assertNoMsgSent(t, peer)
- assertSyncerStatus(t, s, chansSynced, PassiveSync)
- }
-
- // To ensure we don't transition any pending active syncers that have
- // previously disconnected, we'll disconnect the last one.
- stalePeer := peers[numActiveSyncers-1]
- syncMgr.PruneSyncState(stalePeer.PubKey())
-
- // Then, we'll complete the initial historical sync by transitioning the
- // historical syncer to its final chansSynced state. This should trigger
- // all of the pending active syncers to transition, except for the one
- // we disconnected.
- assertTransitionToChansSynced(t, syncers[0], peers[0])
- for i, s := range syncers {
- if i == numActiveSyncers-1 {
- assertNoMsgSent(t, peers[i])
- continue
- }
- assertPassiveSyncerTransition(t, s, peers[i])
- }
-}
-
-// assertNoMsgSent is a helper function that ensures a peer hasn't sent any
-// messages.
-func assertNoMsgSent(t *testing.T, peer *mockPeer) {
- t.Helper()
-
- select {
- case msg := <-peer.sentMsgs:
- t.Fatalf("peer %x sent unexpected message %v", peer.PubKey(),
- spew.Sdump(msg))
- case <-time.After(time.Second):
- }
-}
-
-// assertMsgSent asserts that the peer has sent the given message.
-func assertMsgSent(t *testing.T, peer *mockPeer, msg lnwire.Message) {
- t.Helper()
-
- var msgSent lnwire.Message
- select {
- case msgSent = <-peer.sentMsgs:
- case <-time.After(time.Second):
- t.Fatalf("expected peer %x to send %T message", peer.PubKey(),
- msg)
- }
-
- if !reflect.DeepEqual(msgSent, msg) {
- t.Fatalf("expected peer %x to send message: %v\ngot: %v",
- peer.PubKey(), spew.Sdump(msg), spew.Sdump(msgSent))
- }
-}
-
-// assertActiveGossipTimestampRange is a helper function that ensures a peer has
-// sent a lnwire.GossipTimestampRange message indicating that it would like to
-// receive new graph updates.
-func assertActiveGossipTimestampRange(t *testing.T, peer *mockPeer) {
- t.Helper()
-
- var msgSent lnwire.Message
- select {
- case msgSent = <-peer.sentMsgs:
- case <-time.After(2 * time.Second):
- t.Fatalf("expected peer %x to send lnwire.GossipTimestampRange "+
- "message", peer.PubKey())
- }
-
- msg, ok := msgSent.(*lnwire.GossipTimestampRange)
- if !ok {
- t.Fatalf("expected peer %x to send %T message", peer.PubKey(),
- msg)
- }
- if msg.FirstTimestamp == 0 {
- t.Fatalf("expected *lnwire.GossipTimestampRange message with " +
- "non-zero FirstTimestamp")
- }
- if msg.TimestampRange == 0 {
- t.Fatalf("expected *lnwire.GossipTimestampRange message with " +
- "non-zero TimestampRange")
- }
-}
-
-// assertSyncerExistence asserts that a GossipSyncer exists for the given peer.
-func assertSyncerExistence(t *testing.T, syncMgr *SyncManager,
- peer *mockPeer) *GossipSyncer {
-
- t.Helper()
-
- s, ok := syncMgr.GossipSyncer(peer.PubKey())
- if !ok {
- t.Fatalf("gossip syncer for peer %x not found", peer.PubKey())
- }
-
- return s
-}
-
-// assertSyncerStatus asserts that the gossip syncer for the given peer matches
-// the expected sync state and type.
-func assertSyncerStatus(t *testing.T, s *GossipSyncer, syncState syncerState,
- syncType SyncerType) {
-
- t.Helper()
-
- // We'll check the status of our syncer within a WaitPredicate as some
- // sync transitions might cause this to be racy.
- err := wait.NoError(func() er.R {
- state := s.syncState()
- if s.syncState() != syncState {
- return er.Errorf("expected syncState %v for peer "+
- "%x, got %v", syncState, s.cfg.peerPub, state)
- }
-
- typ := s.SyncType()
- if s.SyncType() != syncType {
- return er.Errorf("expected syncType %v for peer "+
- "%x, got %v", syncType, s.cfg.peerPub, typ)
- }
-
- return nil
- }, time.Second)
- if err != nil {
- t.Fatal(err)
- }
-}
-
-// assertTransitionToChansSynced asserts the transition of an ActiveSync
-// GossipSyncer to its final chansSynced state.
-func assertTransitionToChansSynced(t *testing.T, s *GossipSyncer, peer *mockPeer) {
- t.Helper()
-
- query := &lnwire.QueryChannelRange{
- FirstBlockHeight: 0,
- NumBlocks: math.MaxUint32,
- }
- assertMsgSent(t, peer, query)
-
- s.ProcessQueryMsg(&lnwire.ReplyChannelRange{
- QueryChannelRange: *query,
- Complete: 1,
- }, nil)
-
- chanSeries := s.cfg.channelSeries.(*mockChannelGraphTimeSeries)
-
- select {
- case <-chanSeries.filterReq:
- chanSeries.filterResp <- nil
- case <-time.After(2 * time.Second):
- t.Fatal("expected to receive FilterKnownChanIDs request")
- }
-
- err := wait.NoError(func() er.R {
- state := syncerState(atomic.LoadUint32(&s.state))
- if state != chansSynced {
- return er.Errorf("expected syncerState %v, got %v",
- chansSynced, state)
- }
-
- return nil
- }, time.Second)
- if err != nil {
- t.Fatal(err)
- }
-}
-
-// assertPassiveSyncerTransition asserts that a gossip syncer goes through all
-// of its expected steps when transitioning from passive to active.
-func assertPassiveSyncerTransition(t *testing.T, s *GossipSyncer, peer *mockPeer) {
-
- t.Helper()
-
- assertActiveGossipTimestampRange(t, peer)
- assertSyncerStatus(t, s, chansSynced, ActiveSync)
-}
-
-// assertActiveSyncerTransition asserts that a gossip syncer goes through all of
-// its expected steps when transitioning from active to passive.
-func assertActiveSyncerTransition(t *testing.T, s *GossipSyncer, peer *mockPeer) {
- t.Helper()
-
- assertMsgSent(t, peer, &lnwire.GossipTimestampRange{
- FirstTimestamp: uint32(zeroTimestamp.Unix()),
- TimestampRange: 0,
- })
- assertSyncerStatus(t, s, chansSynced, PassiveSync)
-}
diff --git a/lnd/discovery/syncer.go b/lnd/discovery/syncer.go
deleted file mode 100644
index 104108a9..00000000
--- a/lnd/discovery/syncer.go
+++ /dev/null
@@ -1,1442 +0,0 @@
-package discovery
-
-import (
- "fmt"
- "math"
- "sync"
- "sync/atomic"
- "time"
-
- "github.com/pkt-cash/pktd/btcutil/er"
- "github.com/pkt-cash/pktd/chaincfg/chainhash"
- "github.com/pkt-cash/pktd/lnd/lnpeer"
- "github.com/pkt-cash/pktd/lnd/lnwire"
- "github.com/pkt-cash/pktd/pktlog/log"
- "golang.org/x/time/rate"
-)
-
-// SyncerType encapsulates the different types of syncing mechanisms for a
-// gossip syncer.
-type SyncerType uint8
-
-const (
- // ActiveSync denotes that a gossip syncer:
- //
- // 1. Should not attempt to synchronize with the remote peer for
- // missing channels.
- // 2. Should respond to queries from the remote peer.
- // 3. Should receive new updates from the remote peer.
- //
- // They are started in a chansSynced state in order to accomplish their
- // responsibilities above.
- ActiveSync SyncerType = iota
-
- // PassiveSync denotes that a gossip syncer:
- //
- // 1. Should not attempt to synchronize with the remote peer for
- // missing channels.
- // 2. Should respond to queries from the remote peer.
- // 3. Should not receive new updates from the remote peer.
- //
- // They are started in a chansSynced state in order to accomplish their
- // responsibilities above.
- PassiveSync
-)
-
-// String returns a human readable string describing the target SyncerType.
-func (t SyncerType) String() string {
- switch t {
- case ActiveSync:
- return "ActiveSync"
- case PassiveSync:
- return "PassiveSync"
- default:
- return fmt.Sprintf("unknown sync type %d", t)
- }
-}
-
-// syncerState is an enum that represents the current state of the GossipSyncer.
-// As the syncer is a state machine, we'll gate our actions based off of the
-// current state and the next incoming message.
-type syncerState uint32
-
-const (
- // syncingChans is the default state of the GossipSyncer. We start in
- // this state when a new peer first connects and we don't yet know if
- // we're fully synchronized.
- syncingChans syncerState = iota
-
- // waitingQueryRangeReply is the second main phase of the GossipSyncer.
- // We enter this state after we send out our first QueryChannelRange
- // reply. We'll stay in this state until the remote party sends us a
- // ReplyShortChanIDsEnd message that indicates they've responded to our
- // query entirely. After this state, we'll transition to
- // waitingQueryChanReply after we send out requests for all the new
- // chan ID's to us.
- waitingQueryRangeReply
-
- // queryNewChannels is the third main phase of the GossipSyncer. In
- // this phase we'll send out all of our QueryShortChanIDs messages in
- // response to the new channels that we don't yet know about.
- queryNewChannels
-
- // waitingQueryChanReply is the fourth main phase of the GossipSyncer.
- // We enter this phase once we've sent off a query chink to the remote
- // peer. We'll stay in this phase until we receive a
- // ReplyShortChanIDsEnd message which indicates that the remote party
- // has responded to all of our requests.
- waitingQueryChanReply
-
- // chansSynced is the terminal stage of the GossipSyncer. Once we enter
- // this phase, we'll send out our update horizon, which filters out the
- // set of channel updates that we're interested in. In this state,
- // we'll be able to accept any outgoing messages from the
- // AuthenticatedGossiper, and decide if we should forward them to our
- // target peer based on its update horizon.
- chansSynced
-)
-
-// String returns a human readable string describing the target syncerState.
-func (s syncerState) String() string {
- switch s {
- case syncingChans:
- return "syncingChans"
-
- case waitingQueryRangeReply:
- return "waitingQueryRangeReply"
-
- case queryNewChannels:
- return "queryNewChannels"
-
- case waitingQueryChanReply:
- return "waitingQueryChanReply"
-
- case chansSynced:
- return "chansSynced"
-
- default:
- return "UNKNOWN STATE"
- }
-}
-
-const (
- // DefaultMaxUndelayedQueryReplies specifies how many gossip queries we
- // will respond to immediately before starting to delay responses.
- DefaultMaxUndelayedQueryReplies = 10
-
- // DefaultDelayedQueryReplyInterval is the length of time we will wait
- // before responding to gossip queries after replying to
- // maxUndelayedQueryReplies queries.
- DefaultDelayedQueryReplyInterval = 5 * time.Second
-
- // chanRangeQueryBuffer is the number of blocks back that we'll go when
- // asking the remote peer for their any channels they know of beyond
- // our highest known channel ID.
- chanRangeQueryBuffer = 144
-
- // syncTransitionTimeout is the default timeout in which we'll wait up
- // to when attempting to perform a sync transition.
- syncTransitionTimeout = 5 * time.Second
-
- // requestBatchSize is the maximum number of channels we will query the
- // remote peer for in a QueryShortChanIDs message.
- requestBatchSize = 500
-)
-
-var (
- // encodingTypeToChunkSize maps an encoding type, to the max number of
- // short chan ID's using the encoding type that we can fit into a
- // single message safely.
- encodingTypeToChunkSize = map[lnwire.ShortChanIDEncoding]int32{
- lnwire.EncodingSortedPlain: 8000,
- }
-
- // ErrGossipSyncerExiting signals that the syncer has been killed.
- ErrGossipSyncerExiting = Err.CodeWithDetail("ErrGossipSyncerExiting", "gossip syncer exiting")
-
- // ErrSyncTransitionTimeout is an error returned when we've timed out
- // attempting to perform a sync transition.
- ErrSyncTransitionTimeout = Err.CodeWithDetail("ErrSyncTransitionTimeout", "timed out attempting to "+
- "transition sync type")
-
- // zeroTimestamp is the timestamp we'll use when we want to indicate to
- // peers that we do not want to receive any new graph updates.
- zeroTimestamp time.Time
-)
-
-// syncTransitionReq encapsulates a request for a gossip syncer sync transition.
-type syncTransitionReq struct {
- newSyncType SyncerType
- errChan chan er.R
-}
-
-// historicalSyncReq encapsulates a request for a gossip syncer to perform a
-// historical sync.
-type historicalSyncReq struct {
- // doneChan is a channel that serves as a signal and is closed to ensure
- // the historical sync is attempted by the time we return to the caller.
- doneChan chan struct{}
-}
-
-// gossipSyncerCfg is a struct that packages all the information a GossipSyncer
-// needs to carry out its duties.
-type gossipSyncerCfg struct {
- // chainHash is the chain that this syncer is responsible for.
- chainHash chainhash.Hash
-
- // peerPub is the public key of the peer we're syncing with, serialized
- // in compressed format.
- peerPub [33]byte
-
- // channelSeries is the primary interface that we'll use to generate
- // our queries and respond to the queries of the remote peer.
- channelSeries ChannelGraphTimeSeries
-
- // encodingType is the current encoding type we're aware of. Requests
- // with different encoding types will be rejected.
- encodingType lnwire.ShortChanIDEncoding
-
- // chunkSize is the max number of short chan IDs using the syncer's
- // encoding type that we can fit into a single message safely.
- chunkSize int32
-
- // batchSize is the max number of channels the syncer will query from
- // the remote node in a single QueryShortChanIDs request.
- batchSize int32
-
- // sendToPeer sends a variadic number of messages to the remote peer.
- // This method should not block while waiting for sends to be written
- // to the wire.
- sendToPeer func(...lnwire.Message) er.R
-
- // sendToPeerSync sends a variadic number of messages to the remote
- // peer, blocking until all messages have been sent successfully or a
- // write error is encountered.
- sendToPeerSync func(...lnwire.Message) er.R
-
- // maxUndelayedQueryReplies specifies how many gossip queries we will
- // respond to immediately before starting to delay responses.
- maxUndelayedQueryReplies int
-
- // delayedQueryReplyInterval is the length of time we will wait before
- // responding to gossip queries after replying to
- // maxUndelayedQueryReplies queries.
- delayedQueryReplyInterval time.Duration
-
- // noSyncChannels will prevent the GossipSyncer from spawning a
- // channelGraphSyncer, meaning we will not try to reconcile unknown
- // channels with the remote peer.
- noSyncChannels bool
-
- // noReplyQueries will prevent the GossipSyncer from spawning a
- // replyHandler, meaning we will not reply to queries from our remote
- // peer.
- noReplyQueries bool
-
- // ignoreHistoricalFilters will prevent syncers from replying with
- // historical data when the remote peer sets a gossip_timestamp_range.
- // This prevents ranges with old start times from causing us to dump the
- // graph on connect.
- ignoreHistoricalFilters bool
-}
-
-// GossipSyncer is a struct that handles synchronizing the channel graph state
-// with a remote peer. The GossipSyncer implements a state machine that will
-// progressively ensure we're synchronized with the channel state of the remote
-// node. Once both nodes have been synchronized, we'll use an update filter to
-// filter out which messages should be sent to a remote peer based on their
-// update horizon. If the update horizon isn't specified, then we won't send
-// them any channel updates at all.
-type GossipSyncer struct {
- started sync.Once
- stopped sync.Once
-
- // state is the current state of the GossipSyncer.
- //
- // NOTE: This variable MUST be used atomically.
- state uint32
-
- // syncType denotes the SyncerType the gossip syncer is currently
- // exercising.
- //
- // NOTE: This variable MUST be used atomically.
- syncType uint32
-
- // remoteUpdateHorizon is the update horizon of the remote peer. We'll
- // use this to properly filter out any messages.
- remoteUpdateHorizon *lnwire.GossipTimestampRange
-
- // localUpdateHorizon is our local update horizon, we'll use this to
- // determine if we've already sent out our update.
- localUpdateHorizon *lnwire.GossipTimestampRange
-
- // syncTransitions is a channel through which new sync type transition
- // requests will be sent through. These requests should only be handled
- // when the gossip syncer is in a chansSynced state to ensure its state
- // machine behaves as expected.
- syncTransitionReqs chan *syncTransitionReq
-
- // historicalSyncReqs is a channel that serves as a signal for the
- // gossip syncer to perform a historical sync. These can only be done
- // once the gossip syncer is in a chansSynced state to ensure its state
- // machine behaves as expected.
- historicalSyncReqs chan *historicalSyncReq
-
- // genHistoricalChanRangeQuery when true signals to the gossip syncer
- // that it should request the remote peer for all of its known channel
- // IDs starting from the genesis block of the chain. This can only
- // happen if the gossip syncer receives a request to attempt a
- // historical sync. It can be unset if the syncer ever transitions from
- // PassiveSync to ActiveSync.
- genHistoricalChanRangeQuery bool
-
- // gossipMsgs is a channel that all responses to our queries from the
- // target peer will be sent over, these will be read by the
- // channelGraphSyncer.
- gossipMsgs chan lnwire.Message
-
- // queryMsgs is a channel that all queries from the remote peer will be
- // received over, these will be read by the replyHandler.
- queryMsgs chan lnwire.Message
-
- // curQueryRangeMsg keeps track of the latest QueryChannelRange message
- // we've sent to a peer to ensure we've consumed all expected replies.
- // This field is primarily used within the waitingQueryChanReply state.
- curQueryRangeMsg *lnwire.QueryChannelRange
-
- // prevReplyChannelRange keeps track of the previous ReplyChannelRange
- // message we've received from a peer to ensure they've fully replied to
- // our query by ensuring they covered our requested block range. This
- // field is primarily used within the waitingQueryChanReply state.
- prevReplyChannelRange *lnwire.ReplyChannelRange
-
- // bufferedChanRangeReplies is used in the waitingQueryChanReply to
- // buffer all the chunked response to our query.
- bufferedChanRangeReplies []lnwire.ShortChannelID
-
- // newChansToQuery is used to pass the set of channels we should query
- // for from the waitingQueryChanReply state to the queryNewChannels
- // state.
- newChansToQuery []lnwire.ShortChannelID
-
- cfg gossipSyncerCfg
-
- // rateLimiter dictates the frequency with which we will reply to gossip
- // queries from a peer. This is used to delay responses to peers to
- // prevent DOS vulnerabilities if they are spamming with an unreasonable
- // number of queries.
- rateLimiter *rate.Limiter
-
- // syncedSignal is a channel that, if set, will be closed when the
- // GossipSyncer reaches its terminal chansSynced state.
- syncedSignal chan struct{}
-
- sync.Mutex
-
- quit chan struct{}
- wg sync.WaitGroup
-}
-
-// newGossipSyncer returns a new instance of the GossipSyncer populated using
-// the passed config.
-func newGossipSyncer(cfg gossipSyncerCfg) *GossipSyncer {
- // If no parameter was specified for max undelayed query replies, set it
- // to the default of 5 queries.
- if cfg.maxUndelayedQueryReplies <= 0 {
- cfg.maxUndelayedQueryReplies = DefaultMaxUndelayedQueryReplies
- }
-
- // If no parameter was specified for delayed query reply interval, set
- // to the default of 5 seconds.
- if cfg.delayedQueryReplyInterval <= 0 {
- cfg.delayedQueryReplyInterval = DefaultDelayedQueryReplyInterval
- }
-
- // Construct a rate limiter that will govern how frequently we reply to
- // gossip queries from this peer. The limiter will automatically adjust
- // during periods of quiescence, and increase the reply interval under
- // load.
- interval := rate.Every(cfg.delayedQueryReplyInterval)
- rateLimiter := rate.NewLimiter(
- interval, cfg.maxUndelayedQueryReplies,
- )
-
- return &GossipSyncer{
- cfg: cfg,
- rateLimiter: rateLimiter,
- syncTransitionReqs: make(chan *syncTransitionReq),
- historicalSyncReqs: make(chan *historicalSyncReq),
- gossipMsgs: make(chan lnwire.Message, 100),
- queryMsgs: make(chan lnwire.Message, 100),
- quit: make(chan struct{}),
- }
-}
-
-// Start starts the GossipSyncer and any goroutines that it needs to carry out
-// its duties.
-func (g *GossipSyncer) Start() {
- g.started.Do(func() {
- log.Debugf("Starting GossipSyncer(%x)", g.cfg.peerPub[:])
-
- // TODO(conner): only spawn channelGraphSyncer if remote
- // supports gossip queries, and only spawn replyHandler if we
- // advertise support
- if !g.cfg.noSyncChannels {
- g.wg.Add(1)
- go g.channelGraphSyncer()
- }
- if !g.cfg.noReplyQueries {
- g.wg.Add(1)
- go g.replyHandler()
- }
- })
-}
-
-// Stop signals the GossipSyncer for a graceful exit, then waits until it has
-// exited.
-func (g *GossipSyncer) Stop() {
- g.stopped.Do(func() {
- close(g.quit)
- g.wg.Wait()
- })
-}
-
-// channelGraphSyncer is the main goroutine responsible for ensuring that we
-// properly channel graph state with the remote peer, and also that we only
-// send them messages which actually pass their defined update horizon.
-func (g *GossipSyncer) channelGraphSyncer() {
- defer g.wg.Done()
-
- for {
- state := g.syncState()
- syncType := g.SyncType()
-
- log.Debugf("GossipSyncer(%x): state=%v, type=%v",
- g.cfg.peerPub[:], state, syncType)
-
- switch state {
- // When we're in this state, we're trying to synchronize our
- // view of the network with the remote peer. We'll kick off
- // this sync by asking them for the set of channels they
- // understand, as we'll as responding to any other queries by
- // them.
- case syncingChans:
- // If we're in this state, then we'll send the remote
- // peer our opening QueryChannelRange message.
- queryRangeMsg, err := g.genChanRangeQuery(
- g.genHistoricalChanRangeQuery,
- )
- if err != nil {
- log.Errorf("Unable to gen chan range "+
- "query: %v", err)
- return
- }
-
- err = g.cfg.sendToPeer(queryRangeMsg)
- if err != nil {
- log.Errorf("Unable to send chan range "+
- "query: %v", err)
- return
- }
-
- // With the message sent successfully, we'll transition
- // into the next state where we wait for their reply.
- g.setSyncState(waitingQueryRangeReply)
-
- // In this state, we've sent out our initial channel range
- // query and are waiting for the final response from the remote
- // peer before we perform a diff to see with channels they know
- // of that we don't.
- case waitingQueryRangeReply:
- // We'll wait to either process a new message from the
- // remote party, or exit due to the gossiper exiting,
- // or us being signalled to do so.
- select {
- case msg := <-g.gossipMsgs:
- // The remote peer is sending a response to our
- // initial query, we'll collate this response,
- // and see if it's the final one in the series.
- // If so, we can then transition to querying
- // for the new channels.
- queryReply, ok := msg.(*lnwire.ReplyChannelRange)
- if ok {
- err := g.processChanRangeReply(queryReply)
- if err != nil {
- log.Errorf("Unable to "+
- "process chan range "+
- "query: %v", err)
- return
- }
- continue
- }
-
- log.Warnf("Unexpected message: %T in state=%v",
- msg, state)
-
- case <-g.quit:
- return
- }
-
- // We'll enter this state once we've discovered which channels
- // the remote party knows of that we don't yet know of
- // ourselves.
- case queryNewChannels:
- // First, we'll attempt to continue our channel
- // synchronization by continuing to send off another
- // query chunk.
- done, err := g.synchronizeChanIDs()
- if err != nil {
- log.Errorf("Unable to sync chan IDs: %v", err)
- }
-
- // If this wasn't our last query, then we'll need to
- // transition to our waiting state.
- if !done {
- g.setSyncState(waitingQueryChanReply)
- continue
- }
-
- // If we're fully synchronized, then we can transition
- // to our terminal state.
- g.setSyncState(chansSynced)
-
- // In this state, we've just sent off a new query for channels
- // that we don't yet know of. We'll remain in this state until
- // the remote party signals they've responded to our query in
- // totality.
- case waitingQueryChanReply:
- // Once we've sent off our query, we'll wait for either
- // an ending reply, or just another query from the
- // remote peer.
- select {
- case msg := <-g.gossipMsgs:
- // If this is the final reply to one of our
- // queries, then we'll loop back into our query
- // state to send of the remaining query chunks.
- _, ok := msg.(*lnwire.ReplyShortChanIDsEnd)
- if ok {
- g.setSyncState(queryNewChannels)
- continue
- }
-
- log.Warnf("Unexpected message: %T in state=%v",
- msg, state)
-
- case <-g.quit:
- return
- }
-
- // This is our final terminal state where we'll only reply to
- // any further queries by the remote peer.
- case chansSynced:
- g.Lock()
- if g.syncedSignal != nil {
- close(g.syncedSignal)
- g.syncedSignal = nil
- }
- g.Unlock()
-
- // If we haven't yet sent out our update horizon, and
- // we want to receive real-time channel updates, we'll
- // do so now.
- if g.localUpdateHorizon == nil && syncType == ActiveSync {
- err := g.sendGossipTimestampRange(
- time.Now(), math.MaxUint32,
- )
- if err != nil {
- log.Errorf("Unable to send update "+
- "horizon to %x: %v",
- g.cfg.peerPub, err)
- }
- }
-
- // With our horizon set, we'll simply reply to any new
- // messages or process any state transitions and exit if
- // needed.
- select {
- case req := <-g.syncTransitionReqs:
- req.errChan <- g.handleSyncTransition(req)
-
- case req := <-g.historicalSyncReqs:
- g.handleHistoricalSync(req)
-
- case <-g.quit:
- return
- }
- }
- }
-}
-
-// replyHandler is an event loop whose sole purpose is to reply to the remote
-// peers queries. Our replyHandler will respond to messages generated by their
-// channelGraphSyncer, and vice versa. Each party's channelGraphSyncer drives
-// the other's replyHandler, allowing the replyHandler to operate independently
-// from the state machine maintained on the same node.
-//
-// NOTE: This method MUST be run as a goroutine.
-func (g *GossipSyncer) replyHandler() {
- defer g.wg.Done()
-
- for {
- select {
- case msg := <-g.queryMsgs:
- err := g.replyPeerQueries(msg)
- switch {
- case ErrGossipSyncerExiting.Is(err):
- return
-
- case lnpeer.ErrPeerExiting.Is(err):
- return
-
- case err != nil:
- log.Errorf("Unable to reply to peer "+
- "query: %v", err)
- }
-
- case <-g.quit:
- return
- }
- }
-}
-
-// sendGossipTimestampRange constructs and sets a GossipTimestampRange for the
-// syncer and sends it to the remote peer.
-func (g *GossipSyncer) sendGossipTimestampRange(firstTimestamp time.Time,
- timestampRange uint32) er.R {
-
- endTimestamp := firstTimestamp.Add(
- time.Duration(timestampRange) * time.Second,
- )
-
- log.Infof("GossipSyncer(%x): applying gossipFilter(start=%v, end=%v)",
- g.cfg.peerPub[:], firstTimestamp, endTimestamp)
-
- localUpdateHorizon := &lnwire.GossipTimestampRange{
- ChainHash: g.cfg.chainHash,
- FirstTimestamp: uint32(firstTimestamp.Unix()),
- TimestampRange: timestampRange,
- }
-
- if err := g.cfg.sendToPeer(localUpdateHorizon); err != nil {
- return err
- }
-
- if firstTimestamp == zeroTimestamp && timestampRange == 0 {
- g.localUpdateHorizon = nil
- } else {
- g.localUpdateHorizon = localUpdateHorizon
- }
-
- return nil
-}
-
-// synchronizeChanIDs is called by the channelGraphSyncer when we need to query
-// the remote peer for its known set of channel IDs within a particular block
-// range. This method will be called continually until the entire range has
-// been queried for with a response received. We'll chunk our requests as
-// required to ensure they fit into a single message. We may re-renter this
-// state in the case that chunking is required.
-func (g *GossipSyncer) synchronizeChanIDs() (bool, er.R) {
- // If we're in this state yet there are no more new channels to query
- // for, then we'll transition to our final synced state and return true
- // to signal that we're fully synchronized.
- if len(g.newChansToQuery) == 0 {
- log.Infof("GossipSyncer(%x): no more chans to query",
- g.cfg.peerPub[:])
- return true, nil
- }
-
- // Otherwise, we'll issue our next chunked query to receive replies
- // for.
- var queryChunk []lnwire.ShortChannelID
-
- // If the number of channels to query for is less than the chunk size,
- // then we can issue a single query.
- if int32(len(g.newChansToQuery)) < g.cfg.batchSize {
- queryChunk = g.newChansToQuery
- g.newChansToQuery = nil
-
- } else {
- // Otherwise, we'll need to only query for the next chunk.
- // We'll slice into our query chunk, then slide down our main
- // pointer down by the chunk size.
- queryChunk = g.newChansToQuery[:g.cfg.batchSize]
- g.newChansToQuery = g.newChansToQuery[g.cfg.batchSize:]
- }
-
- log.Infof("GossipSyncer(%x): querying for %v new channels",
- g.cfg.peerPub[:], len(queryChunk))
-
- // With our chunk obtained, we'll send over our next query, then return
- // false indicating that we're net yet fully synced.
- err := g.cfg.sendToPeer(&lnwire.QueryShortChanIDs{
- ChainHash: g.cfg.chainHash,
- EncodingType: lnwire.EncodingSortedPlain,
- ShortChanIDs: queryChunk,
- })
-
- return false, err
-}
-
-// isLegacyReplyChannelRange determines where a ReplyChannelRange message is
-// considered legacy. There was a point where lnd used to include the same query
-// over multiple replies, rather than including the portion of the query the
-// reply is handling. We'll use this as a way of detecting whether we are
-// communicating with a legacy node so we can properly sync with them.
-func isLegacyReplyChannelRange(query *lnwire.QueryChannelRange,
- reply *lnwire.ReplyChannelRange) bool {
-
- return reply.QueryChannelRange == *query
-}
-
-// processChanRangeReply is called each time the GossipSyncer receives a new
-// reply to the initial range query to discover new channels that it didn't
-// previously know of.
-func (g *GossipSyncer) processChanRangeReply(msg *lnwire.ReplyChannelRange) er.R {
- // If we're not communicating with a legacy node, we'll apply some
- // further constraints on their reply to ensure it satisfies our query.
- if !isLegacyReplyChannelRange(g.curQueryRangeMsg, msg) {
- // The first block should be within our original request.
- if msg.FirstBlockHeight < g.curQueryRangeMsg.FirstBlockHeight {
- return er.Errorf("reply includes channels for height "+
- "%v prior to query %v", msg.FirstBlockHeight,
- g.curQueryRangeMsg.FirstBlockHeight)
- }
-
- // The last block should also be. We don't need to check the
- // intermediate ones because they should already be in sorted
- // order.
- replyLastHeight := msg.QueryChannelRange.LastBlockHeight()
- queryLastHeight := g.curQueryRangeMsg.LastBlockHeight()
- if replyLastHeight > queryLastHeight {
- return er.Errorf("reply includes channels for height "+
- "%v after query %v", replyLastHeight,
- queryLastHeight)
- }
-
- // If we've previously received a reply for this query, look at
- // its last block to ensure the current reply properly follows
- // it.
- if g.prevReplyChannelRange != nil {
- prevReply := g.prevReplyChannelRange
- prevReplyLastHeight := prevReply.LastBlockHeight()
-
- // The current reply can either start from the previous
- // reply's last block, if there are still more channels
- // for the same block, or the block after.
- if msg.FirstBlockHeight != prevReplyLastHeight &&
- msg.FirstBlockHeight != prevReplyLastHeight+1 {
-
- return er.Errorf("first block of reply %v "+
- "does not continue from last block of "+
- "previous %v", msg.FirstBlockHeight,
- prevReplyLastHeight)
- }
- }
- }
-
- g.prevReplyChannelRange = msg
- g.bufferedChanRangeReplies = append(
- g.bufferedChanRangeReplies, msg.ShortChanIDs...,
- )
-
- log.Infof("GossipSyncer(%x): buffering chan range reply of size=%v",
- g.cfg.peerPub[:], len(msg.ShortChanIDs))
-
- // If this isn't the last response, then we can exit as we've already
- // buffered the latest portion of the streaming reply.
- switch {
- // If we're communicating with a legacy node, we'll need to look at the
- // complete field.
- case isLegacyReplyChannelRange(g.curQueryRangeMsg, msg):
- if msg.Complete == 0 {
- return nil
- }
-
- // Otherwise, we'll look at the reply's height range.
- default:
- replyLastHeight := msg.QueryChannelRange.LastBlockHeight()
- queryLastHeight := g.curQueryRangeMsg.LastBlockHeight()
-
- // TODO(wilmer): This might require some padding if the remote
- // node is not aware of the last height we sent them, i.e., is
- // behind a few blocks from us.
- if replyLastHeight < queryLastHeight {
- return nil
- }
- }
-
- log.Infof("GossipSyncer(%x): filtering through %v chans",
- g.cfg.peerPub[:], len(g.bufferedChanRangeReplies))
-
- // Otherwise, this is the final response, so we'll now check to see
- // which channels they know of that we don't.
- newChans, err := g.cfg.channelSeries.FilterKnownChanIDs(
- g.cfg.chainHash, g.bufferedChanRangeReplies,
- )
- if err != nil {
- return er.Errorf("unable to filter chan ids: %v", err)
- }
-
- // As we've received the entirety of the reply, we no longer need to
- // hold on to the set of buffered replies or the original query that
- // prompted the replies, so we'll let that be garbage collected now.
- g.curQueryRangeMsg = nil
- g.prevReplyChannelRange = nil
- g.bufferedChanRangeReplies = nil
-
- // If there aren't any channels that we don't know of, then we can
- // switch straight to our terminal state.
- if len(newChans) == 0 {
- log.Infof("GossipSyncer(%x): remote peer has no new chans",
- g.cfg.peerPub[:])
-
- g.setSyncState(chansSynced)
- return nil
- }
-
- // Otherwise, we'll set the set of channels that we need to query for
- // the next state, and also transition our state.
- g.newChansToQuery = newChans
- g.setSyncState(queryNewChannels)
-
- log.Infof("GossipSyncer(%x): starting query for %v new chans",
- g.cfg.peerPub[:], len(newChans))
-
- return nil
-}
-
-// genChanRangeQuery generates the initial message we'll send to the remote
-// party when we're kicking off the channel graph synchronization upon
-// connection. The historicalQuery boolean can be used to generate a query from
-// the genesis block of the chain.
-func (g *GossipSyncer) genChanRangeQuery(
- historicalQuery bool) (*lnwire.QueryChannelRange, er.R) {
-
- // First, we'll query our channel graph time series for its highest
- // known channel ID.
- newestChan, err := g.cfg.channelSeries.HighestChanID(g.cfg.chainHash)
- if err != nil {
- return nil, err
- }
-
- // Once we have the chan ID of the newest, we'll obtain the block height
- // of the channel, then subtract our default horizon to ensure we don't
- // miss any channels. By default, we go back 1 day from the newest
- // channel, unless we're attempting a historical sync, where we'll
- // actually start from the genesis block instead.
- var startHeight uint32
- switch {
- case historicalQuery:
- fallthrough
- case newestChan.BlockHeight <= chanRangeQueryBuffer:
- startHeight = 0
- default:
- startHeight = uint32(newestChan.BlockHeight - chanRangeQueryBuffer)
- }
-
- log.Infof("GossipSyncer(%x): requesting new chans from height=%v "+
- "and %v blocks after", g.cfg.peerPub[:], startHeight,
- math.MaxUint32-startHeight)
-
- // Finally, we'll craft the channel range query, using our starting
- // height, then asking for all known channels to the foreseeable end of
- // the main chain.
- query := &lnwire.QueryChannelRange{
- ChainHash: g.cfg.chainHash,
- FirstBlockHeight: startHeight,
- NumBlocks: math.MaxUint32 - startHeight,
- }
- g.curQueryRangeMsg = query
-
- return query, nil
-}
-
-// replyPeerQueries is called in response to any query by the remote peer.
-// We'll examine our state and send back our best response.
-func (g *GossipSyncer) replyPeerQueries(msg lnwire.Message) er.R {
- reservation := g.rateLimiter.Reserve()
- delay := reservation.Delay()
-
- // If we've already replied a handful of times, we will start to delay
- // responses back to the remote peer. This can help prevent DOS attacks
- // where the remote peer spams us endlessly.
- if delay > 0 {
- log.Infof("GossipSyncer(%x): rate limiting gossip replies, "+
- "responding in %s", g.cfg.peerPub[:], delay)
-
- select {
- case <-time.After(delay):
- case <-g.quit:
- return ErrGossipSyncerExiting.Default()
- }
- }
-
- switch msg := msg.(type) {
-
- // In this state, we'll also handle any incoming channel range queries
- // from the remote peer as they're trying to sync their state as well.
- case *lnwire.QueryChannelRange:
- return g.replyChanRangeQuery(msg)
-
- // If the remote peer skips straight to requesting new channels that
- // they don't know of, then we'll ensure that we also handle this case.
- case *lnwire.QueryShortChanIDs:
- return g.replyShortChanIDs(msg)
-
- default:
- return er.Errorf("unknown message: %T", msg)
- }
-}
-
-// replyChanRangeQuery will be dispatched in response to a channel range query
-// by the remote node. We'll query the channel time series for channels that
-// meet the channel range, then chunk our responses to the remote node. We also
-// ensure that our final fragment carries the "complete" bit to indicate the
-// end of our streaming response.
-func (g *GossipSyncer) replyChanRangeQuery(query *lnwire.QueryChannelRange) er.R {
- // Before responding, we'll check to ensure that the remote peer is
- // querying for the same chain that we're on. If not, we'll send back a
- // response with a complete value of zero to indicate we're on a
- // different chain.
- if g.cfg.chainHash != query.ChainHash {
- log.Warnf("Remote peer requested QueryChannelRange for "+
- "chain=%v, we're on chain=%v", query.ChainHash,
- g.cfg.chainHash)
-
- return g.cfg.sendToPeerSync(&lnwire.ReplyChannelRange{
- QueryChannelRange: *query,
- Complete: 0,
- EncodingType: g.cfg.encodingType,
- ShortChanIDs: nil,
- })
- }
-
- log.Infof("GossipSyncer(%x): filtering chan range: start_height=%v, "+
- "num_blocks=%v", g.cfg.peerPub[:], query.FirstBlockHeight,
- query.NumBlocks)
-
- // Next, we'll consult the time series to obtain the set of known
- // channel ID's that match their query.
- startBlock := query.FirstBlockHeight
- endBlock := query.LastBlockHeight()
- channelRange, err := g.cfg.channelSeries.FilterChannelRange(
- query.ChainHash, startBlock, endBlock,
- )
- if err != nil {
- return err
- }
-
- // TODO(roasbeef): means can't send max uint above?
- // * or make internal 64
-
- // In the base case (no actual response) the first block and last block
- // will match those of the query. In the loop below, we'll update these
- // two variables incrementally with each chunk to properly compute the
- // starting block for each response and the number of blocks in a
- // response.
- firstBlockHeight := startBlock
- lastBlockHeight := endBlock
-
- numChannels := int32(len(channelRange))
- numChansSent := int32(0)
- for {
- // We'll send our this response in a streaming manner,
- // chunk-by-chunk. We do this as there's a transport message
- // size limit which we'll need to adhere to.
- var channelChunk []lnwire.ShortChannelID
-
- // We know this is the final chunk, if the difference between
- // the total number of channels, and the number of channels
- // we've sent is less-than-or-equal to the chunk size.
- isFinalChunk := (numChannels - numChansSent) <= g.cfg.chunkSize
-
- // If this is indeed the last chunk, then we'll send the
- // remainder of the channels.
- if isFinalChunk {
- channelChunk = channelRange[numChansSent:]
-
- log.Infof("GossipSyncer(%x): sending final chan "+
- "range chunk, size=%v", g.cfg.peerPub[:],
- len(channelChunk))
- } else {
- // Otherwise, we'll only send off a fragment exactly
- // sized to the proper chunk size.
- channelChunk = channelRange[numChansSent : numChansSent+g.cfg.chunkSize]
-
- log.Infof("GossipSyncer(%x): sending range chunk of "+
- "size=%v", g.cfg.peerPub[:], len(channelChunk))
- }
-
- // If we have any channels at all to return, then we need to
- // update our pointers to the first and last blocks for each
- // response.
- if len(channelChunk) > 0 {
- // If this is the first response we'll send, we'll point
- // the first block to the first block in the query.
- // Otherwise, we'll continue from the block we left off
- // at.
- if numChansSent == 0 {
- firstBlockHeight = startBlock
- } else {
- firstBlockHeight = lastBlockHeight
- }
-
- // If this is the last response we'll send, we'll point
- // the last block to the last block of the query.
- // Otherwise, we'll set it to the height of the last
- // channel in the chunk.
- if isFinalChunk {
- lastBlockHeight = endBlock
- } else {
- lastBlockHeight = channelChunk[len(channelChunk)-1].BlockHeight
- }
- }
-
- // The number of blocks contained in this response (the total
- // span) is the difference between the last channel ID and the
- // first in the range. We add one as even if all channels
- // returned are in the same block, we need to count that.
- numBlocksInResp := lastBlockHeight - firstBlockHeight + 1
-
- // With our chunk assembled, we'll now send to the remote peer
- // the current chunk.
- replyChunk := lnwire.ReplyChannelRange{
- QueryChannelRange: lnwire.QueryChannelRange{
- ChainHash: query.ChainHash,
- NumBlocks: numBlocksInResp,
- FirstBlockHeight: firstBlockHeight,
- },
- Complete: 0,
- EncodingType: g.cfg.encodingType,
- ShortChanIDs: channelChunk,
- }
- if isFinalChunk {
- replyChunk.Complete = 1
- }
- if err := g.cfg.sendToPeerSync(&replyChunk); err != nil {
- return err
- }
-
- // If this was the final chunk, then we'll exit now as our
- // response is now complete.
- if isFinalChunk {
- return nil
- }
-
- numChansSent += int32(len(channelChunk))
- }
-}
-
-// replyShortChanIDs will be dispatched in response to a query by the remote
-// node for information concerning a set of short channel ID's. Our response
-// will be sent in a streaming chunked manner to ensure that we remain below
-// the current transport level message size.
-func (g *GossipSyncer) replyShortChanIDs(query *lnwire.QueryShortChanIDs) er.R {
- // Before responding, we'll check to ensure that the remote peer is
- // querying for the same chain that we're on. If not, we'll send back a
- // response with a complete value of zero to indicate we're on a
- // different chain.
- if g.cfg.chainHash != query.ChainHash {
- log.Warnf("Remote peer requested QueryShortChanIDs for "+
- "chain=%v, we're on chain=%v", query.ChainHash,
- g.cfg.chainHash)
-
- return g.cfg.sendToPeerSync(&lnwire.ReplyShortChanIDsEnd{
- ChainHash: query.ChainHash,
- Complete: 0,
- })
- }
-
- if len(query.ShortChanIDs) == 0 {
- log.Infof("GossipSyncer(%x): ignoring query for blank short chan ID's",
- g.cfg.peerPub[:])
- return nil
- }
-
- log.Infof("GossipSyncer(%x): fetching chan anns for %v chans",
- g.cfg.peerPub[:], len(query.ShortChanIDs))
-
- // Now that we know we're on the same chain, we'll query the channel
- // time series for the set of messages that we know of which satisfies
- // the requirement of being a chan ann, chan update, or a node ann
- // related to the set of queried channels.
- replyMsgs, err := g.cfg.channelSeries.FetchChanAnns(
- query.ChainHash, query.ShortChanIDs,
- )
- if err != nil {
- return er.Errorf("unable to fetch chan anns for %v..., %v",
- query.ShortChanIDs[0].ToUint64(), err)
- }
-
- // Reply with any messages related to those channel ID's, we'll write
- // each one individually and synchronously to throttle the sends and
- // perform buffering of responses in the syncer as opposed to the peer.
- for _, msg := range replyMsgs {
- err := g.cfg.sendToPeerSync(msg)
- if err != nil {
- return err
- }
- }
-
- // Regardless of whether we had any messages to reply with, send over
- // the sentinel message to signal that the stream has terminated.
- return g.cfg.sendToPeerSync(&lnwire.ReplyShortChanIDsEnd{
- ChainHash: query.ChainHash,
- Complete: 1,
- })
-}
-
-// ApplyGossipFilter applies a gossiper filter sent by the remote node to the
-// state machine. Once applied, we'll ensure that we don't forward any messages
-// to the peer that aren't within the time range of the filter.
-func (g *GossipSyncer) ApplyGossipFilter(filter *lnwire.GossipTimestampRange) er.R {
- g.Lock()
-
- g.remoteUpdateHorizon = filter
-
- startTime := time.Unix(int64(g.remoteUpdateHorizon.FirstTimestamp), 0)
- endTime := startTime.Add(
- time.Duration(g.remoteUpdateHorizon.TimestampRange) * time.Second,
- )
-
- g.Unlock()
-
- // If requested, don't reply with historical gossip data when the remote
- // peer sets their gossip timestamp range.
- if g.cfg.ignoreHistoricalFilters {
- return nil
- }
-
- // Now that the remote peer has applied their filter, we'll query the
- // database for all the messages that are beyond this filter.
- newUpdatestoSend, err := g.cfg.channelSeries.UpdatesInHorizon(
- g.cfg.chainHash, startTime, endTime,
- )
- if err != nil {
- return err
- }
-
- log.Infof("GossipSyncer(%x): applying new update horizon: start=%v, "+
- "end=%v, backlog_size=%v", g.cfg.peerPub[:], startTime, endTime,
- len(newUpdatestoSend))
-
- // If we don't have any to send, then we can return early.
- if len(newUpdatestoSend) == 0 {
- return nil
- }
-
- // We'll conclude by launching a goroutine to send out any updates.
- g.wg.Add(1)
- go func() {
- defer g.wg.Done()
-
- for _, msg := range newUpdatestoSend {
- err := g.cfg.sendToPeerSync(msg)
- switch {
- case ErrGossipSyncerExiting.Is(err):
- return
-
- case lnpeer.ErrPeerExiting.Is(err):
- return
-
- case err != nil:
- log.Errorf("Unable to send message for "+
- "peer catch up: %v", err)
- }
- }
- }()
-
- return nil
-}
-
-// FilterGossipMsgs takes a set of gossip messages, and only send it to a peer
-// iff the message is within the bounds of their set gossip filter. If the peer
-// doesn't have a gossip filter set, then no messages will be forwarded.
-func (g *GossipSyncer) FilterGossipMsgs(msgs ...msgWithSenders) {
- // If the peer doesn't have an update horizon set, then we won't send
- // it any new update messages.
- if g.remoteUpdateHorizon == nil {
- return
- }
-
- // If we've been signaled to exit, or are exiting, then we'll stop
- // short.
- select {
- case <-g.quit:
- return
- default:
- }
-
- // TODO(roasbeef): need to ensure that peer still online...send msg to
- // gossiper on peer termination to signal peer disconnect?
-
- var err er.R
-
- // Before we filter out the messages, we'll construct an index over the
- // set of channel announcements and channel updates. This will allow us
- // to quickly check if we should forward a chan ann, based on the known
- // channel updates for a channel.
- chanUpdateIndex := make(map[lnwire.ShortChannelID][]*lnwire.ChannelUpdate)
- for _, msg := range msgs {
- chanUpdate, ok := msg.msg.(*lnwire.ChannelUpdate)
- if !ok {
- continue
- }
-
- chanUpdateIndex[chanUpdate.ShortChannelID] = append(
- chanUpdateIndex[chanUpdate.ShortChannelID], chanUpdate,
- )
- }
-
- // We'll construct a helper function that we'll us below to determine
- // if a given messages passes the gossip msg filter.
- g.Lock()
- startTime := time.Unix(int64(g.remoteUpdateHorizon.FirstTimestamp), 0)
- endTime := startTime.Add(
- time.Duration(g.remoteUpdateHorizon.TimestampRange) * time.Second,
- )
- g.Unlock()
-
- passesFilter := func(timeStamp uint32) bool {
- t := time.Unix(int64(timeStamp), 0)
- return t.Equal(startTime) ||
- (t.After(startTime) && t.Before(endTime))
- }
-
- msgsToSend := make([]lnwire.Message, 0, len(msgs))
- for _, msg := range msgs {
- // If the target peer is the peer that sent us this message,
- // then we'll exit early as we don't need to filter this
- // message.
- if _, ok := msg.senders[g.cfg.peerPub]; ok {
- continue
- }
-
- switch msg := msg.msg.(type) {
-
- // For each channel announcement message, we'll only send this
- // message if the channel updates for the channel are between
- // our time range.
- case *lnwire.ChannelAnnouncement:
- // First, we'll check if the channel updates are in
- // this message batch.
- chanUpdates, ok := chanUpdateIndex[msg.ShortChannelID]
- if !ok {
- // If not, we'll attempt to query the database
- // to see if we know of the updates.
- chanUpdates, err = g.cfg.channelSeries.FetchChanUpdates(
- g.cfg.chainHash, msg.ShortChannelID,
- )
- if err != nil {
- log.Warnf("no channel updates found for "+
- "short_chan_id=%v",
- msg.ShortChannelID)
- continue
- }
- }
-
- for _, chanUpdate := range chanUpdates {
- if passesFilter(chanUpdate.Timestamp) {
- msgsToSend = append(msgsToSend, msg)
- break
- }
- }
-
- if len(chanUpdates) == 0 {
- msgsToSend = append(msgsToSend, msg)
- }
-
- // For each channel update, we'll only send if it the timestamp
- // is between our time range.
- case *lnwire.ChannelUpdate:
- if passesFilter(msg.Timestamp) {
- msgsToSend = append(msgsToSend, msg)
- }
-
- // Similarly, we only send node announcements if the update
- // timestamp ifs between our set gossip filter time range.
- case *lnwire.NodeAnnouncement:
- if passesFilter(msg.Timestamp) {
- msgsToSend = append(msgsToSend, msg)
- }
- }
- }
-
- log.Tracef("GossipSyncer(%x): filtered gossip msgs: set=%v, sent=%v",
- g.cfg.peerPub[:], len(msgs), len(msgsToSend))
-
- if len(msgsToSend) == 0 {
- return
- }
-
- g.cfg.sendToPeer(msgsToSend...)
-}
-
-// ProcessQueryMsg is used by outside callers to pass new channel time series
-// queries to the internal processing goroutine.
-func (g *GossipSyncer) ProcessQueryMsg(msg lnwire.Message, peerQuit <-chan struct{}) {
- var msgChan chan lnwire.Message
- switch msg.(type) {
- case *lnwire.QueryChannelRange, *lnwire.QueryShortChanIDs:
- msgChan = g.queryMsgs
- default:
- msgChan = g.gossipMsgs
- }
-
- select {
- case msgChan <- msg:
- case <-peerQuit:
- case <-g.quit:
- }
-}
-
-// setSyncState sets the gossip syncer's state to the given state.
-func (g *GossipSyncer) setSyncState(state syncerState) {
- atomic.StoreUint32(&g.state, uint32(state))
-}
-
-// syncState returns the current syncerState of the target GossipSyncer.
-func (g *GossipSyncer) syncState() syncerState {
- return syncerState(atomic.LoadUint32(&g.state))
-}
-
-// ResetSyncedSignal returns a channel that will be closed in order to serve as
-// a signal for when the GossipSyncer has reached its chansSynced state.
-func (g *GossipSyncer) ResetSyncedSignal() chan struct{} {
- g.Lock()
- defer g.Unlock()
-
- syncedSignal := make(chan struct{})
-
- syncState := syncerState(atomic.LoadUint32(&g.state))
- if syncState == chansSynced {
- close(syncedSignal)
- return syncedSignal
- }
-
- g.syncedSignal = syncedSignal
- return g.syncedSignal
-}
-
-// ProcessSyncTransition sends a request to the gossip syncer to transition its
-// sync type to a new one.
-//
-// NOTE: This can only be done once the gossip syncer has reached its final
-// chansSynced state.
-func (g *GossipSyncer) ProcessSyncTransition(newSyncType SyncerType) er.R {
- errChan := make(chan er.R, 1)
- select {
- case g.syncTransitionReqs <- &syncTransitionReq{
- newSyncType: newSyncType,
- errChan: errChan,
- }:
- case <-time.After(syncTransitionTimeout):
- return ErrSyncTransitionTimeout.Default()
- case <-g.quit:
- return ErrGossipSyncerExiting.Default()
- }
-
- select {
- case err := <-errChan:
- return err
- case <-g.quit:
- return ErrGossipSyncerExiting.Default()
- }
-}
-
-// handleSyncTransition handles a new sync type transition request.
-//
-// NOTE: The gossip syncer might have another sync state as a result of this
-// transition.
-func (g *GossipSyncer) handleSyncTransition(req *syncTransitionReq) er.R {
- // Return early from any NOP sync transitions.
- syncType := g.SyncType()
- if syncType == req.newSyncType {
- return nil
- }
-
- log.Debugf("GossipSyncer(%x): transitioning from %v to %v",
- g.cfg.peerPub, syncType, req.newSyncType)
-
- var (
- firstTimestamp time.Time
- timestampRange uint32
- )
-
- switch req.newSyncType {
- // If an active sync has been requested, then we should resume receiving
- // new graph updates from the remote peer.
- case ActiveSync:
- firstTimestamp = time.Now()
- timestampRange = math.MaxUint32
-
- // If a PassiveSync transition has been requested, then we should no
- // longer receive any new updates from the remote peer. We can do this
- // by setting our update horizon to a range in the past ensuring no
- // graph updates match the timestamp range.
- case PassiveSync:
- firstTimestamp = zeroTimestamp
- timestampRange = 0
-
- default:
- return er.Errorf("unhandled sync transition %v",
- req.newSyncType)
- }
-
- err := g.sendGossipTimestampRange(firstTimestamp, timestampRange)
- if err != nil {
- return er.Errorf("unable to send local update horizon: %v", err)
- }
-
- g.setSyncType(req.newSyncType)
-
- return nil
-}
-
-// setSyncType sets the gossip syncer's sync type to the given type.
-func (g *GossipSyncer) setSyncType(syncType SyncerType) {
- atomic.StoreUint32(&g.syncType, uint32(syncType))
-}
-
-// SyncType returns the current SyncerType of the target GossipSyncer.
-func (g *GossipSyncer) SyncType() SyncerType {
- return SyncerType(atomic.LoadUint32(&g.syncType))
-}
-
-// historicalSync sends a request to the gossip syncer to perofmr a historical
-// sync.
-//
-// NOTE: This can only be done once the gossip syncer has reached its final
-// chansSynced state.
-func (g *GossipSyncer) historicalSync() er.R {
- done := make(chan struct{})
-
- select {
- case g.historicalSyncReqs <- &historicalSyncReq{
- doneChan: done,
- }:
- case <-time.After(syncTransitionTimeout):
- return ErrSyncTransitionTimeout.Default()
- case <-g.quit:
- return ErrGossiperShuttingDown.Default()
- }
-
- select {
- case <-done:
- return nil
- case <-g.quit:
- return ErrGossiperShuttingDown.Default()
- }
-}
-
-// handleHistoricalSync handles a request to the gossip syncer to perform a
-// historical sync.
-func (g *GossipSyncer) handleHistoricalSync(req *historicalSyncReq) {
- // We'll go back to our initial syncingChans state in order to request
- // the remote peer to give us all of the channel IDs they know of
- // starting from the genesis block.
- g.genHistoricalChanRangeQuery = true
- g.setSyncState(syncingChans)
- close(req.doneChan)
-}
diff --git a/lnd/discovery/syncer_test.go b/lnd/discovery/syncer_test.go
deleted file mode 100644
index 17167bbe..00000000
--- a/lnd/discovery/syncer_test.go
+++ /dev/null
@@ -1,2303 +0,0 @@
-package discovery
-
-import (
- "math"
- "reflect"
- "sync"
- "testing"
- "time"
-
- "github.com/davecgh/go-spew/spew"
- "github.com/pkt-cash/pktd/btcutil/er"
- "github.com/pkt-cash/pktd/chaincfg"
- "github.com/pkt-cash/pktd/chaincfg/chainhash"
- "github.com/pkt-cash/pktd/lnd/lnwire"
-)
-
-const (
- defaultEncoding = lnwire.EncodingSortedPlain
- latestKnownHeight = 1337
-)
-
-var (
- defaultChunkSize = encodingTypeToChunkSize[defaultEncoding]
-)
-
-type horizonQuery struct {
- chain chainhash.Hash
- start time.Time
- end time.Time
-}
-type filterRangeReq struct {
- startHeight, endHeight uint32
-}
-
-type mockChannelGraphTimeSeries struct {
- highestID lnwire.ShortChannelID
-
- horizonReq chan horizonQuery
- horizonResp chan []lnwire.Message
-
- filterReq chan []lnwire.ShortChannelID
- filterResp chan []lnwire.ShortChannelID
-
- filterRangeReqs chan filterRangeReq
- filterRangeResp chan []lnwire.ShortChannelID
-
- annReq chan []lnwire.ShortChannelID
- annResp chan []lnwire.Message
-
- updateReq chan lnwire.ShortChannelID
- updateResp chan []*lnwire.ChannelUpdate
-}
-
-func newMockChannelGraphTimeSeries(
- hID lnwire.ShortChannelID) *mockChannelGraphTimeSeries {
-
- return &mockChannelGraphTimeSeries{
- highestID: hID,
-
- horizonReq: make(chan horizonQuery, 1),
- horizonResp: make(chan []lnwire.Message, 1),
-
- filterReq: make(chan []lnwire.ShortChannelID, 1),
- filterResp: make(chan []lnwire.ShortChannelID, 1),
-
- filterRangeReqs: make(chan filterRangeReq, 1),
- filterRangeResp: make(chan []lnwire.ShortChannelID, 1),
-
- annReq: make(chan []lnwire.ShortChannelID, 1),
- annResp: make(chan []lnwire.Message, 1),
-
- updateReq: make(chan lnwire.ShortChannelID, 1),
- updateResp: make(chan []*lnwire.ChannelUpdate, 1),
- }
-}
-
-func (m *mockChannelGraphTimeSeries) HighestChanID(chain chainhash.Hash) (*lnwire.ShortChannelID, er.R) {
- return &m.highestID, nil
-}
-func (m *mockChannelGraphTimeSeries) UpdatesInHorizon(chain chainhash.Hash,
- startTime time.Time, endTime time.Time) ([]lnwire.Message, er.R) {
-
- m.horizonReq <- horizonQuery{
- chain, startTime, endTime,
- }
-
- return <-m.horizonResp, nil
-}
-func (m *mockChannelGraphTimeSeries) FilterKnownChanIDs(chain chainhash.Hash,
- superSet []lnwire.ShortChannelID) ([]lnwire.ShortChannelID, er.R) {
-
- m.filterReq <- superSet
-
- return <-m.filterResp, nil
-}
-func (m *mockChannelGraphTimeSeries) FilterChannelRange(chain chainhash.Hash,
- startHeight, endHeight uint32) ([]lnwire.ShortChannelID, er.R) {
-
- m.filterRangeReqs <- filterRangeReq{startHeight, endHeight}
-
- return <-m.filterRangeResp, nil
-}
-func (m *mockChannelGraphTimeSeries) FetchChanAnns(chain chainhash.Hash,
- shortChanIDs []lnwire.ShortChannelID) ([]lnwire.Message, er.R) {
-
- m.annReq <- shortChanIDs
-
- return <-m.annResp, nil
-}
-func (m *mockChannelGraphTimeSeries) FetchChanUpdates(chain chainhash.Hash,
- shortChanID lnwire.ShortChannelID) ([]*lnwire.ChannelUpdate, er.R) {
-
- m.updateReq <- shortChanID
-
- return <-m.updateResp, nil
-}
-
-var _ ChannelGraphTimeSeries = (*mockChannelGraphTimeSeries)(nil)
-
-// newTestSyncer creates a new test instance of a GossipSyncer. A buffered
-// message channel is returned for intercepting messages sent from the syncer,
-// in addition to a mock channel series which allows the test to control which
-// messages the syncer knows of or wishes to filter out. The variadic flags are
-// treated as positional arguments where the first index signals that the syncer
-// should spawn a channelGraphSyncer and second index signals that the syncer
-// should spawn a replyHandler. Any flags beyond the first two are currently
-// ignored. If no flags are provided, both a channelGraphSyncer and replyHandler
-// will be spawned by default.
-func newTestSyncer(hID lnwire.ShortChannelID,
- encodingType lnwire.ShortChanIDEncoding, chunkSize int32,
- flags ...bool) (chan []lnwire.Message,
- *GossipSyncer, *mockChannelGraphTimeSeries) {
-
- syncChannels := true
- replyQueries := true
- if len(flags) > 0 {
- syncChannels = flags[0]
- }
- if len(flags) > 1 {
- replyQueries = flags[1]
- }
-
- msgChan := make(chan []lnwire.Message, 20)
- cfg := gossipSyncerCfg{
- channelSeries: newMockChannelGraphTimeSeries(hID),
- encodingType: encodingType,
- chunkSize: chunkSize,
- batchSize: chunkSize,
- noSyncChannels: !syncChannels,
- noReplyQueries: !replyQueries,
- sendToPeer: func(msgs ...lnwire.Message) er.R {
- msgChan <- msgs
- return nil
- },
- sendToPeerSync: func(msgs ...lnwire.Message) er.R {
- msgChan <- msgs
- return nil
- },
- delayedQueryReplyInterval: 2 * time.Second,
- }
- syncer := newGossipSyncer(cfg)
-
- return msgChan, syncer, cfg.channelSeries.(*mockChannelGraphTimeSeries)
-}
-
-// TestGossipSyncerFilterGossipMsgsNoHorizon tests that if the remote peer
-// doesn't have a horizon set, then we won't send any incoming messages to it.
-func TestGossipSyncerFilterGossipMsgsNoHorizon(t *testing.T) {
- t.Parallel()
-
- // First, we'll create a GossipSyncer instance with a canned sendToPeer
- // message to allow us to intercept their potential sends.
- msgChan, syncer, _ := newTestSyncer(
- lnwire.NewShortChanIDFromInt(10), defaultEncoding,
- defaultChunkSize,
- )
-
- // With the syncer created, we'll create a set of messages to filter
- // through the gossiper to the target peer.
- msgs := []msgWithSenders{
- {
- msg: &lnwire.NodeAnnouncement{Timestamp: uint32(time.Now().Unix())},
- },
- {
- msg: &lnwire.NodeAnnouncement{Timestamp: uint32(time.Now().Unix())},
- },
- }
-
- // We'll then attempt to filter the set of messages through the target
- // peer.
- syncer.FilterGossipMsgs(msgs...)
-
- // As the remote peer doesn't yet have a gossip timestamp set, we
- // shouldn't receive any outbound messages.
- select {
- case msg := <-msgChan:
- t.Fatalf("received message but shouldn't have: %v",
- spew.Sdump(msg))
-
- case <-time.After(time.Millisecond * 10):
- }
-}
-
-func unixStamp(a int64) uint32 {
- t := time.Unix(a, 0)
- return uint32(t.Unix())
-}
-
-// TestGossipSyncerFilterGossipMsgsAll tests that we're able to properly filter
-// out a set of incoming messages based on the set remote update horizon for a
-// peer. We tests all messages type, and all time straddling. We'll also send a
-// channel ann that already has a channel update on disk.
-func TestGossipSyncerFilterGossipMsgsAllInMemory(t *testing.T) {
- t.Parallel()
-
- // First, we'll create a GossipSyncer instance with a canned sendToPeer
- // message to allow us to intercept their potential sends.
- msgChan, syncer, chanSeries := newTestSyncer(
- lnwire.NewShortChanIDFromInt(10), defaultEncoding,
- defaultChunkSize,
- )
-
- // We'll create then apply a remote horizon for the target peer with a
- // set of manually selected timestamps.
- remoteHorizon := &lnwire.GossipTimestampRange{
- FirstTimestamp: unixStamp(25000),
- TimestampRange: uint32(1000),
- }
- syncer.remoteUpdateHorizon = remoteHorizon
-
- // With the syncer created, we'll create a set of messages to filter
- // through the gossiper to the target peer. Our message will consist of
- // one node announcement above the horizon, one below. Additionally,
- // we'll include a chan ann with an update below the horizon, one
- // with an update timestamp above the horizon, and one without any
- // channel updates at all.
- msgs := []msgWithSenders{
- {
- // Node ann above horizon.
- msg: &lnwire.NodeAnnouncement{Timestamp: unixStamp(25001)},
- },
- {
- // Node ann below horizon.
- msg: &lnwire.NodeAnnouncement{Timestamp: unixStamp(5)},
- },
- {
- // Node ann above horizon.
- msg: &lnwire.NodeAnnouncement{Timestamp: unixStamp(999999)},
- },
- {
- // Ann tuple below horizon.
- msg: &lnwire.ChannelAnnouncement{
- ShortChannelID: lnwire.NewShortChanIDFromInt(10),
- },
- },
- {
- msg: &lnwire.ChannelUpdate{
- ShortChannelID: lnwire.NewShortChanIDFromInt(10),
- Timestamp: unixStamp(5),
- },
- },
- {
- // Ann tuple above horizon.
- msg: &lnwire.ChannelAnnouncement{
- ShortChannelID: lnwire.NewShortChanIDFromInt(15),
- },
- },
- {
- msg: &lnwire.ChannelUpdate{
- ShortChannelID: lnwire.NewShortChanIDFromInt(15),
- Timestamp: unixStamp(25002),
- },
- },
- {
- // Ann tuple beyond horizon.
- msg: &lnwire.ChannelAnnouncement{
- ShortChannelID: lnwire.NewShortChanIDFromInt(20),
- },
- },
- {
- msg: &lnwire.ChannelUpdate{
- ShortChannelID: lnwire.NewShortChanIDFromInt(20),
- Timestamp: unixStamp(999999),
- },
- },
- {
- // Ann w/o an update at all, the update in the DB will
- // be below the horizon.
- msg: &lnwire.ChannelAnnouncement{
- ShortChannelID: lnwire.NewShortChanIDFromInt(25),
- },
- },
- }
-
- // Before we send off the query, we'll ensure we send the missing
- // channel update for that final ann. It will be below the horizon, so
- // shouldn't be sent anyway.
- errCh := make(chan er.R, 1)
- go func() {
- select {
- case <-time.After(time.Second * 15):
- errCh <- er.New("no query received")
- return
- case query := <-chanSeries.updateReq:
- // It should be asking for the chan updates of short
- // chan ID 25.
- expectedID := lnwire.NewShortChanIDFromInt(25)
- if expectedID != query {
- errCh <- er.Errorf("wrong query id: expected %v, got %v",
- expectedID, query)
- return
- }
-
- // If so, then we'll send back the missing update.
- chanSeries.updateResp <- []*lnwire.ChannelUpdate{
- {
- ShortChannelID: lnwire.NewShortChanIDFromInt(25),
- Timestamp: unixStamp(5),
- },
- }
- errCh <- nil
- }
- }()
-
- // We'll then instruct the gossiper to filter this set of messages.
- syncer.FilterGossipMsgs(msgs...)
-
- // Out of all the messages we sent in, we should only get 2 of them
- // back.
- select {
- case <-time.After(time.Second * 15):
- t.Fatalf("no msgs received")
-
- case msgs := <-msgChan:
- if len(msgs) != 3 {
- t.Fatalf("expected 3 messages instead got %v "+
- "messages: %v", len(msgs), spew.Sdump(msgs))
- }
- }
-
- // Wait for error from goroutine.
- select {
- case <-time.After(time.Second * 30):
- t.Fatalf("goroutine did not return within 30 seconds")
- case err := <-errCh:
- if err != nil {
- t.Fatal(err)
- }
- }
-}
-
-// TestGossipSyncerApplyNoHistoricalGossipFilter tests that once a gossip filter
-// is applied for the remote peer, then we don't send the peer all known
-// messages which are within their desired time horizon.
-func TestGossipSyncerApplyNoHistoricalGossipFilter(t *testing.T) {
- t.Parallel()
-
- // First, we'll create a GossipSyncer instance with a canned sendToPeer
- // message to allow us to intercept their potential sends.
- _, syncer, chanSeries := newTestSyncer(
- lnwire.NewShortChanIDFromInt(10), defaultEncoding,
- defaultChunkSize,
- )
- syncer.cfg.ignoreHistoricalFilters = true
-
- // We'll apply this gossip horizon for the remote peer.
- remoteHorizon := &lnwire.GossipTimestampRange{
- FirstTimestamp: unixStamp(25000),
- TimestampRange: uint32(1000),
- }
-
- // After applying the gossip filter, the chan series should not be
- // queried using the updated horizon.
- errChan := make(chan er.R, 1)
- var wg sync.WaitGroup
- wg.Add(1)
- go func() {
- defer wg.Done()
-
- select {
- // No query received, success.
- case <-time.After(3 * time.Second):
- errChan <- nil
-
- // Unexpected query received.
- case <-chanSeries.horizonReq:
- errChan <- er.New("chan series should not have been " +
- "queried")
- }
- }()
-
- // We'll now attempt to apply the gossip filter for the remote peer.
- syncer.ApplyGossipFilter(remoteHorizon)
-
- // Ensure that the syncer's remote horizon was properly updated.
- if !reflect.DeepEqual(syncer.remoteUpdateHorizon, remoteHorizon) {
- t.Fatalf("expected remote horizon: %v, got: %v",
- remoteHorizon, syncer.remoteUpdateHorizon)
- }
-
- // Wait for the query check to finish.
- wg.Wait()
-
- // Assert that no query was made as a result of applying the gossip
- // filter.
- err := <-errChan
- if err != nil {
- t.Fatalf(err.String())
- }
-}
-
-// TestGossipSyncerApplyGossipFilter tests that once a gossip filter is applied
-// for the remote peer, then we send the peer all known messages which are
-// within their desired time horizon.
-func TestGossipSyncerApplyGossipFilter(t *testing.T) {
- t.Parallel()
-
- // First, we'll create a GossipSyncer instance with a canned sendToPeer
- // message to allow us to intercept their potential sends.
- msgChan, syncer, chanSeries := newTestSyncer(
- lnwire.NewShortChanIDFromInt(10), defaultEncoding,
- defaultChunkSize,
- )
-
- // We'll apply this gossip horizon for the remote peer.
- remoteHorizon := &lnwire.GossipTimestampRange{
- FirstTimestamp: unixStamp(25000),
- TimestampRange: uint32(1000),
- }
-
- // Before we apply the horizon, we'll dispatch a response to the query
- // that the syncer will issue.
- errCh := make(chan er.R, 1)
- go func() {
- select {
- case <-time.After(time.Second * 15):
- errCh <- er.New("no query recvd")
- return
- case query := <-chanSeries.horizonReq:
- // The syncer should have translated the time range
- // into the proper star time.
- if remoteHorizon.FirstTimestamp != uint32(query.start.Unix()) {
- errCh <- er.Errorf("wrong query stamp: expected %v, got %v",
- remoteHorizon.FirstTimestamp, query.start)
- return
- }
-
- // For this first response, we'll send back an empty
- // set of messages. As result, we shouldn't send any
- // messages.
- chanSeries.horizonResp <- []lnwire.Message{}
- errCh <- nil
- }
- }()
-
- // We'll now attempt to apply the gossip filter for the remote peer.
- err := syncer.ApplyGossipFilter(remoteHorizon)
- if err != nil {
- t.Fatalf("unable to apply filter: %v", err)
- }
-
- // There should be no messages in the message queue as we didn't send
- // the syncer and messages within the horizon.
- select {
- case msgs := <-msgChan:
- t.Fatalf("expected no msgs, instead got %v", spew.Sdump(msgs))
- default:
- }
-
- // Wait for error result from goroutine.
- select {
- case <-time.After(time.Second * 30):
- t.Fatalf("goroutine did not return within 30 seconds")
- case err := <-errCh:
- if err != nil {
- t.Fatal(err)
- }
- }
-
- // If we repeat the process, but give the syncer a set of valid
- // messages, then these should be sent to the remote peer.
- go func() {
- select {
- case <-time.After(time.Second * 15):
- errCh <- er.New("no query recvd")
- return
- case query := <-chanSeries.horizonReq:
- // The syncer should have translated the time range
- // into the proper star time.
- if remoteHorizon.FirstTimestamp != uint32(query.start.Unix()) {
- errCh <- er.Errorf("wrong query stamp: expected %v, got %v",
- remoteHorizon.FirstTimestamp, query.start)
- return
- }
-
- // For this first response, we'll send back a proper
- // set of messages that should be echoed back.
- chanSeries.horizonResp <- []lnwire.Message{
- &lnwire.ChannelUpdate{
- ShortChannelID: lnwire.NewShortChanIDFromInt(25),
- Timestamp: unixStamp(5),
- },
- }
- errCh <- nil
- }
- }()
- err = syncer.ApplyGossipFilter(remoteHorizon)
- if err != nil {
- t.Fatalf("unable to apply filter: %v", err)
- }
-
- // We should get back the exact same message.
- select {
- case <-time.After(time.Second * 15):
- t.Fatalf("no msgs received")
-
- case msgs := <-msgChan:
- if len(msgs) != 1 {
- t.Fatalf("wrong messages: expected %v, got %v",
- 1, len(msgs))
- }
- }
-
- // Wait for error result from goroutine.
- select {
- case <-time.After(time.Second * 30):
- t.Fatalf("goroutine did not return within 30 seconds")
- case err := <-errCh:
- if err != nil {
- t.Fatal(err)
- }
- }
-}
-
-// TestGossipSyncerQueryChannelRangeWrongChainHash tests that if we receive a
-// channel range query for the wrong chain, then we send back a response with no
-// channels and complete=0.
-func TestGossipSyncerQueryChannelRangeWrongChainHash(t *testing.T) {
- t.Parallel()
-
- // First, we'll create a GossipSyncer instance with a canned sendToPeer
- // message to allow us to intercept their potential sends.
- msgChan, syncer, _ := newTestSyncer(
- lnwire.NewShortChanIDFromInt(10), defaultEncoding,
- defaultChunkSize,
- )
-
- // We'll now ask the syncer to reply to a channel range query, but for a
- // chain that it isn't aware of.
- query := &lnwire.QueryChannelRange{
- ChainHash: *chaincfg.SimNetParams.GenesisHash,
- FirstBlockHeight: 0,
- NumBlocks: math.MaxUint32,
- }
- err := syncer.replyChanRangeQuery(query)
- if err != nil {
- t.Fatalf("unable to process short chan ID's: %v", err)
- }
-
- select {
- case <-time.After(time.Second * 15):
- t.Fatalf("no msgs received")
-
- case msgs := <-msgChan:
- // We should get back exactly one message, that's a
- // ReplyChannelRange with a matching query, and a complete value
- // of zero.
- if len(msgs) != 1 {
- t.Fatalf("wrong messages: expected %v, got %v",
- 1, len(msgs))
- }
-
- msg, ok := msgs[0].(*lnwire.ReplyChannelRange)
- if !ok {
- t.Fatalf("expected lnwire.ReplyChannelRange, got %T", msg)
- }
-
- if msg.QueryChannelRange != *query {
- t.Fatalf("wrong query channel range in reply: "+
- "expected: %v\ngot: %v", spew.Sdump(*query),
- spew.Sdump(msg.QueryChannelRange))
- }
- if msg.Complete != 0 {
- t.Fatalf("expected complete set to 0, got %v",
- msg.Complete)
- }
- }
-}
-
-// TestGossipSyncerReplyShortChanIDsWrongChainHash tests that if we get a chan
-// ID query for the wrong chain, then we send back only a short ID end with
-// complete=0.
-func TestGossipSyncerReplyShortChanIDsWrongChainHash(t *testing.T) {
- t.Parallel()
-
- // First, we'll create a GossipSyncer instance with a canned sendToPeer
- // message to allow us to intercept their potential sends.
- msgChan, syncer, _ := newTestSyncer(
- lnwire.NewShortChanIDFromInt(10), defaultEncoding,
- defaultChunkSize,
- )
-
- // We'll now ask the syncer to reply to a chan ID query, but for a
- // chain that it isn't aware of.
- err := syncer.replyShortChanIDs(&lnwire.QueryShortChanIDs{
- ChainHash: *chaincfg.SimNetParams.GenesisHash,
- })
- if err != nil {
- t.Fatalf("unable to process short chan ID's: %v", err)
- }
-
- select {
- case <-time.After(time.Second * 15):
- t.Fatalf("no msgs received")
- case msgs := <-msgChan:
-
- // We should get back exactly one message, that's a
- // ReplyShortChanIDsEnd with a matching chain hash, and a
- // complete value of zero.
- if len(msgs) != 1 {
- t.Fatalf("wrong messages: expected %v, got %v",
- 1, len(msgs))
- }
-
- msg, ok := msgs[0].(*lnwire.ReplyShortChanIDsEnd)
- if !ok {
- t.Fatalf("expected lnwire.ReplyShortChanIDsEnd "+
- "instead got %T", msg)
- }
-
- if msg.ChainHash != *chaincfg.SimNetParams.GenesisHash {
- t.Fatalf("wrong chain hash: expected %v, got %v",
- msg.ChainHash, chaincfg.SimNetParams.GenesisHash)
- }
- if msg.Complete != 0 {
- t.Fatalf("complete set incorrectly")
- }
- }
-}
-
-// TestGossipSyncerReplyShortChanIDs tests that in the case of a known chain
-// hash for a QueryShortChanIDs, we'll return the set of matching
-// announcements, as well as an ending ReplyShortChanIDsEnd message.
-func TestGossipSyncerReplyShortChanIDs(t *testing.T) {
- t.Parallel()
-
- // First, we'll create a GossipSyncer instance with a canned sendToPeer
- // message to allow us to intercept their potential sends.
- msgChan, syncer, chanSeries := newTestSyncer(
- lnwire.NewShortChanIDFromInt(10), defaultEncoding,
- defaultChunkSize,
- )
-
- queryChanIDs := []lnwire.ShortChannelID{
- lnwire.NewShortChanIDFromInt(1),
- lnwire.NewShortChanIDFromInt(2),
- lnwire.NewShortChanIDFromInt(3),
- }
-
- queryReply := []lnwire.Message{
- &lnwire.ChannelAnnouncement{
- ShortChannelID: lnwire.NewShortChanIDFromInt(20),
- },
- &lnwire.ChannelUpdate{
- ShortChannelID: lnwire.NewShortChanIDFromInt(20),
- Timestamp: unixStamp(999999),
- },
- &lnwire.NodeAnnouncement{Timestamp: unixStamp(25001)},
- }
-
- // We'll then craft a reply to the upcoming query for all the matching
- // channel announcements for a particular set of short channel ID's.
- errCh := make(chan er.R, 1)
- go func() {
- select {
- case <-time.After(time.Second * 15):
- errCh <- er.New("no query recvd")
- return
- case chanIDs := <-chanSeries.annReq:
- // The set of chan ID's should match exactly.
- if !reflect.DeepEqual(chanIDs, queryChanIDs) {
- errCh <- er.Errorf("wrong chan IDs: expected %v, got %v",
- queryChanIDs, chanIDs)
- return
- }
-
- // If they do, then we'll send back a response with
- // some canned messages.
- chanSeries.annResp <- queryReply
- errCh <- nil
- }
- }()
-
- // With our set up above complete, we'll now attempt to obtain a reply
- // from the channel syncer for our target chan ID query.
- err := syncer.replyShortChanIDs(&lnwire.QueryShortChanIDs{
- ShortChanIDs: queryChanIDs,
- })
- if err != nil {
- t.Fatalf("unable to query for chan IDs: %v", err)
- }
-
- for i := 0; i < len(queryReply)+1; i++ {
- select {
- case <-time.After(time.Second * 15):
- t.Fatalf("no msgs received")
-
- // We should get back exactly 4 messages. The first 3 are the
- // same messages we sent above, and the query end message.
- case msgs := <-msgChan:
- if len(msgs) != 1 {
- t.Fatalf("wrong number of messages: "+
- "expected %v, got %v", 1, len(msgs))
- }
-
- isQueryReply := i < len(queryReply)
- finalMsg, ok := msgs[0].(*lnwire.ReplyShortChanIDsEnd)
-
- switch {
- case isQueryReply &&
- !reflect.DeepEqual(queryReply[i], msgs[0]):
-
- t.Fatalf("wrong message: expected %v, got %v",
- spew.Sdump(queryReply[i]),
- spew.Sdump(msgs[0]))
-
- case !isQueryReply && !ok:
- t.Fatalf("expected lnwire.ReplyShortChanIDsEnd"+
- " instead got %T", msgs[3])
-
- case !isQueryReply && finalMsg.Complete != 1:
- t.Fatalf("complete wasn't set")
- }
- }
- }
-
- // Wait for error from goroutine.
- select {
- case <-time.After(time.Second * 30):
- t.Fatalf("goroutine did not return within 30 seconds")
- case err := <-errCh:
- if err != nil {
- t.Fatal(err)
- }
- }
-}
-
-// TestGossipSyncerReplyChanRangeQuery tests that if we receive a
-// QueryChannelRange message, then we'll properly send back a chunked reply to
-// the remote peer.
-func TestGossipSyncerReplyChanRangeQuery(t *testing.T) {
- t.Parallel()
-
- // We'll use a smaller chunk size so we can easily test all the edge
- // cases.
- const chunkSize = 2
-
- // We'll now create our test gossip syncer that will shortly respond to
- // our canned query.
- msgChan, syncer, chanSeries := newTestSyncer(
- lnwire.NewShortChanIDFromInt(10), defaultEncoding, chunkSize,
- )
-
- // Next, we'll craft a query to ask for all the new chan ID's after
- // block 100.
- const startingBlockHeight = 100
- const numBlocks = 50
- const endingBlockHeight = startingBlockHeight + numBlocks - 1
- query := &lnwire.QueryChannelRange{
- FirstBlockHeight: uint32(startingBlockHeight),
- NumBlocks: uint32(numBlocks),
- }
-
- // We'll then launch a goroutine to reply to the query with a set of 5
- // responses. This will ensure we get two full chunks, and one partial
- // chunk.
- queryResp := []lnwire.ShortChannelID{
- {
- BlockHeight: uint32(startingBlockHeight),
- },
- {
- BlockHeight: 102,
- },
- {
- BlockHeight: 104,
- },
- {
- BlockHeight: 106,
- },
- {
- BlockHeight: 108,
- },
- }
-
- errCh := make(chan er.R, 1)
- go func() {
- select {
- case <-time.After(time.Second * 15):
- errCh <- er.New("no query recvd")
- return
- case filterReq := <-chanSeries.filterRangeReqs:
- // We should be querying for block 100 to 150.
- if filterReq.startHeight != startingBlockHeight &&
- filterReq.endHeight != endingBlockHeight {
-
- errCh <- er.Errorf("wrong height range: %v",
- spew.Sdump(filterReq))
- return
- }
-
- // If the proper request was sent, then we'll respond
- // with our set of short channel ID's.
- chanSeries.filterRangeResp <- queryResp
- errCh <- nil
- }
- }()
-
- // With our goroutine active, we'll now issue the query.
- if err := syncer.replyChanRangeQuery(query); err != nil {
- t.Fatalf("unable to issue query: %v", err)
- }
-
- // At this point, we'll now wait for the syncer to send the chunked
- // reply. We should get three sets of messages as two of them should be
- // full, while the other is the final fragment.
- const numExpectedChunks = 3
- respMsgs := make([]lnwire.ShortChannelID, 0, 5)
- for i := 0; i < numExpectedChunks; i++ {
- select {
- case <-time.After(time.Second * 15):
- t.Fatalf("no msgs received")
-
- case msg := <-msgChan:
- resp := msg[0]
- rangeResp, ok := resp.(*lnwire.ReplyChannelRange)
- if !ok {
- t.Fatalf("expected ReplyChannelRange instead got %T", msg)
- }
-
- // We'll determine the correct values of each field in
- // each response based on the order that they were sent.
- var (
- expectedFirstBlockHeight uint32
- expectedNumBlocks uint32
- expectedComplete uint8
- )
-
- switch {
- // The first reply should range from our starting block
- // height until it reaches its maximum capacity of
- // channels.
- case i == 0:
- expectedFirstBlockHeight = startingBlockHeight
- expectedNumBlocks = chunkSize + 1
-
- // The last reply should range starting from the next
- // block of our previous reply up until the ending
- // height of the query. It should also have the Complete
- // bit set.
- case i == numExpectedChunks-1:
- expectedFirstBlockHeight = respMsgs[len(respMsgs)-1].BlockHeight
- expectedNumBlocks = endingBlockHeight - expectedFirstBlockHeight + 1
- expectedComplete = 1
-
- // Any intermediate replies should range starting from
- // the next block of our previous reply up until it
- // reaches its maximum capacity of channels.
- default:
- expectedFirstBlockHeight = respMsgs[len(respMsgs)-1].BlockHeight
- expectedNumBlocks = 5
- }
-
- switch {
- case rangeResp.FirstBlockHeight != expectedFirstBlockHeight:
- t.Fatalf("FirstBlockHeight in resp #%d "+
- "incorrect: expected %v, got %v", i+1,
- expectedFirstBlockHeight,
- rangeResp.FirstBlockHeight)
-
- case rangeResp.NumBlocks != expectedNumBlocks:
- t.Fatalf("NumBlocks in resp #%d incorrect: "+
- "expected %v, got %v", i+1,
- expectedNumBlocks, rangeResp.NumBlocks)
-
- case rangeResp.Complete != expectedComplete:
- t.Fatalf("Complete in resp #%d incorrect: "+
- "expected %v, got %v", i+1,
- expectedNumBlocks, rangeResp.Complete)
- }
-
- respMsgs = append(respMsgs, rangeResp.ShortChanIDs...)
- }
- }
-
- // We should get back exactly 5 short chan ID's, and they should match
- // exactly the ID's we sent as a reply.
- if len(respMsgs) != len(queryResp) {
- t.Fatalf("expected %v chan ID's, instead got %v",
- len(queryResp), spew.Sdump(respMsgs))
- }
- if !reflect.DeepEqual(queryResp, respMsgs) {
- t.Fatalf("mismatched response: expected %v, got %v",
- spew.Sdump(queryResp), spew.Sdump(respMsgs))
- }
-
- // Wait for error from goroutine.
- select {
- case <-time.After(time.Second * 30):
- t.Fatalf("goroutine did not return within 30 seconds")
- case err := <-errCh:
- if err != nil {
- t.Fatal(err)
- }
- }
-}
-
-// TestGossipSyncerReplyChanRangeQuery tests a variety of
-// QueryChannelRange messages to ensure the underlying queries are
-// executed with the correct block range
-func TestGossipSyncerReplyChanRangeQueryBlockRange(t *testing.T) {
- t.Parallel()
-
- // First create our test gossip syncer that will handle and
- // respond to the test queries
- _, syncer, chanSeries := newTestSyncer(
- lnwire.NewShortChanIDFromInt(10), defaultEncoding, math.MaxInt32,
- )
-
- // Next construct test queries with various startBlock and endBlock
- // ranges
- queryReqs := []*lnwire.QueryChannelRange{
- // full range example
- {
- FirstBlockHeight: uint32(0),
- NumBlocks: uint32(math.MaxUint32),
- },
-
- // small query example that does not overflow
- {
- FirstBlockHeight: uint32(1000),
- NumBlocks: uint32(100),
- },
-
- // overflow example
- {
- FirstBlockHeight: uint32(1000),
- NumBlocks: uint32(math.MaxUint32),
- },
- }
-
- // Next construct the expected filterRangeReq startHeight and endHeight
- // values that we will compare to the captured values
- expFilterReqs := []filterRangeReq{
- {
- startHeight: uint32(0),
- endHeight: uint32(math.MaxUint32 - 1),
- },
- {
- startHeight: uint32(1000),
- endHeight: uint32(1099),
- },
- {
- startHeight: uint32(1000),
- endHeight: uint32(math.MaxUint32),
- },
- }
-
- // We'll then launch a goroutine to capture the filterRangeReqs for
- // each request and return those results once all queries have been
- // received
- resultsCh := make(chan []filterRangeReq, 1)
- errCh := make(chan er.R, 1)
- go func() {
- // We will capture the values supplied to the chanSeries here
- // and return the results once all the requests have been
- // collected
- capFilterReqs := []filterRangeReq{}
-
- for filterReq := range chanSeries.filterRangeReqs {
- // capture the filter request so we can compare to the
- // expected values later
- capFilterReqs = append(capFilterReqs, filterReq)
-
- // Reply with an empty result for each query to allow
- // unblock the caller
- queryResp := []lnwire.ShortChannelID{}
- chanSeries.filterRangeResp <- queryResp
-
- // Once we have collected all results send the results
- // back to the main thread and terminate the goroutine
- if len(capFilterReqs) == len(expFilterReqs) {
- resultsCh <- capFilterReqs
- return
- }
-
- }
- }()
-
- // We'll launch a goroutine to send the query sequentially. This
- // goroutine ensures that the timeout logic below on the mainthread
- // will be reached
- go func() {
- for _, query := range queryReqs {
- if err := syncer.replyChanRangeQuery(query); err != nil {
- errCh <- er.Errorf("unable to issue query: %v", err)
- return
- }
- }
- }()
-
- // Wait for the results to be collected and validate that the
- // collected results match the expected results, the timeout to
- // expire, or an error to occur
- select {
- case capFilterReq := <-resultsCh:
- if !reflect.DeepEqual(expFilterReqs, capFilterReq) {
- t.Fatalf("mismatched filter reqs: expected %v, got %v",
- spew.Sdump(expFilterReqs), spew.Sdump(capFilterReq))
- }
- case <-time.After(time.Second * 10):
- t.Fatalf("goroutine did not return within 10 seconds")
- case err := <-errCh:
- if err != nil {
- t.Fatal(err)
- }
- }
-}
-
-// TestGossipSyncerReplyChanRangeQueryNoNewChans tests that if we issue a reply
-// for a channel range query, and we don't have any new channels, then we send
-// back a single response that signals completion.
-func TestGossipSyncerReplyChanRangeQueryNoNewChans(t *testing.T) {
- t.Parallel()
-
- // We'll now create our test gossip syncer that will shortly respond to
- // our canned query.
- msgChan, syncer, chanSeries := newTestSyncer(
- lnwire.NewShortChanIDFromInt(10), defaultEncoding,
- defaultChunkSize,
- )
-
- // Next, we'll craft a query to ask for all the new chan ID's after
- // block 100.
- query := &lnwire.QueryChannelRange{
- FirstBlockHeight: 100,
- NumBlocks: 50,
- }
-
- // We'll then launch a goroutine to reply to the query no new channels.
- resp := []lnwire.ShortChannelID{}
- errCh := make(chan er.R, 1)
- go func() {
- select {
- case <-time.After(time.Second * 15):
- errCh <- er.New("no query recvd")
- return
- case filterReq := <-chanSeries.filterRangeReqs:
- // We should be querying for block 100 to 150.
- if filterReq.startHeight != 100 && filterReq.endHeight != 150 {
- errCh <- er.Errorf("wrong height range: %v",
- spew.Sdump(filterReq))
- return
- }
- // If the proper request was sent, then we'll respond
- // with our blank set of short chan ID's.
- chanSeries.filterRangeResp <- resp
- errCh <- nil
- }
- }()
-
- // With our goroutine active, we'll now issue the query.
- if err := syncer.replyChanRangeQuery(query); err != nil {
- t.Fatalf("unable to issue query: %v", err)
- }
-
- // We should get back exactly one message, and the message should
- // indicate that this is the final in the series.
- select {
- case <-time.After(time.Second * 15):
- t.Fatalf("no msgs received")
-
- case msg := <-msgChan:
- resp := msg[0]
- rangeResp, ok := resp.(*lnwire.ReplyChannelRange)
- if !ok {
- t.Fatalf("expected ReplyChannelRange instead got %T", msg)
- }
-
- if len(rangeResp.ShortChanIDs) != 0 {
- t.Fatalf("expected no chan ID's, instead "+
- "got: %v", spew.Sdump(rangeResp.ShortChanIDs))
- }
- if rangeResp.Complete != 1 {
- t.Fatalf("complete wasn't set")
- }
- }
-
- // Wait for error from goroutine.
- select {
- case <-time.After(time.Second * 30):
- t.Fatalf("goroutine did not return within 30 seconds")
- case err := <-errCh:
- if err != nil {
- t.Fatal(err)
- }
- }
-}
-
-// TestGossipSyncerGenChanRangeQuery tests that given the current best known
-// channel ID, we properly generate an correct initial channel range response.
-func TestGossipSyncerGenChanRangeQuery(t *testing.T) {
- t.Parallel()
-
- // First, we'll create a GossipSyncer instance with a canned sendToPeer
- // message to allow us to intercept their potential sends.
- const startingHeight = 200
- _, syncer, _ := newTestSyncer(
- lnwire.ShortChannelID{BlockHeight: startingHeight},
- defaultEncoding, defaultChunkSize,
- )
-
- // If we now ask the syncer to generate an initial range query, it
- // should return a start height that's back chanRangeQueryBuffer
- // blocks.
- rangeQuery, err := syncer.genChanRangeQuery(false)
- if err != nil {
- t.Fatalf("unable to resp: %v", err)
- }
-
- firstHeight := uint32(startingHeight - chanRangeQueryBuffer)
- if rangeQuery.FirstBlockHeight != firstHeight {
- t.Fatalf("incorrect chan range query: expected %v, %v",
- rangeQuery.FirstBlockHeight,
- startingHeight-chanRangeQueryBuffer)
- }
- if rangeQuery.NumBlocks != math.MaxUint32-firstHeight {
- t.Fatalf("wrong num blocks: expected %v, got %v",
- math.MaxUint32-firstHeight, rangeQuery.NumBlocks)
- }
-
- // Generating a historical range query should result in a start height
- // of 0.
- rangeQuery, err = syncer.genChanRangeQuery(true)
- if err != nil {
- t.Fatalf("unable to resp: %v", err)
- }
- if rangeQuery.FirstBlockHeight != 0 {
- t.Fatalf("incorrect chan range query: expected %v, %v", 0,
- rangeQuery.FirstBlockHeight)
- }
- if rangeQuery.NumBlocks != math.MaxUint32 {
- t.Fatalf("wrong num blocks: expected %v, got %v",
- math.MaxUint32, rangeQuery.NumBlocks)
- }
-}
-
-// TestGossipSyncerProcessChanRangeReply tests that we'll properly buffer
-// replied channel replies until we have the complete version.
-func TestGossipSyncerProcessChanRangeReply(t *testing.T) {
- t.Parallel()
-
- t.Run("legacy", func(t *testing.T) {
- testGossipSyncerProcessChanRangeReply(t, true)
- })
- t.Run("block ranges", func(t *testing.T) {
- testGossipSyncerProcessChanRangeReply(t, false)
- })
-}
-
-// testGossipSyncerProcessChanRangeReply tests that we'll properly buffer
-// replied channel replies until we have the complete version. The legacy
-// option, if set, uses the Complete field of the reply to determine when we've
-// received all expected replies. Otherwise, it looks at the block ranges of
-// each reply instead.
-func testGossipSyncerProcessChanRangeReply(t *testing.T, legacy bool) {
- t.Parallel()
-
- // First, we'll create a GossipSyncer instance with a canned sendToPeer
- // message to allow us to intercept their potential sends.
- highestID := lnwire.ShortChannelID{
- BlockHeight: latestKnownHeight,
- }
- _, syncer, chanSeries := newTestSyncer(
- highestID, defaultEncoding, defaultChunkSize,
- )
-
- startingState := syncer.state
-
- query, err := syncer.genChanRangeQuery(true)
- if err != nil {
- t.Fatalf("unable to generate channel range query: %v", err)
- }
-
- var replyQueries []*lnwire.QueryChannelRange
- if legacy {
- // Each reply query is the same as the original query in the
- // legacy mode.
- replyQueries = []*lnwire.QueryChannelRange{query, query, query}
- } else {
- // When interpreting block ranges, the first reply should start
- // from our requested first block, and the last should end at
- // our requested last block.
- replyQueries = []*lnwire.QueryChannelRange{
- {
- FirstBlockHeight: 0,
- NumBlocks: 11,
- },
- {
- FirstBlockHeight: 11,
- NumBlocks: 1,
- },
- {
- FirstBlockHeight: 12,
- NumBlocks: query.NumBlocks - 12,
- },
- }
- }
-
- replies := []*lnwire.ReplyChannelRange{
- {
- QueryChannelRange: *replyQueries[0],
- ShortChanIDs: []lnwire.ShortChannelID{
- {
- BlockHeight: 10,
- },
- },
- },
- {
- QueryChannelRange: *replyQueries[1],
- ShortChanIDs: []lnwire.ShortChannelID{
- {
- BlockHeight: 11,
- },
- },
- },
- {
- QueryChannelRange: *replyQueries[2],
- Complete: 1,
- ShortChanIDs: []lnwire.ShortChannelID{
- {
- BlockHeight: 12,
- },
- },
- },
- }
-
- // We'll begin by sending the syncer a set of non-complete channel
- // range replies.
- if err := syncer.processChanRangeReply(replies[0]); err != nil {
- t.Fatalf("unable to process reply: %v", err)
- }
- if err := syncer.processChanRangeReply(replies[1]); err != nil {
- t.Fatalf("unable to process reply: %v", err)
- }
-
- // At this point, we should still be in our starting state as the query
- // hasn't finished.
- if syncer.state != startingState {
- t.Fatalf("state should not have transitioned")
- }
-
- expectedReq := []lnwire.ShortChannelID{
- {
- BlockHeight: 10,
- },
- {
- BlockHeight: 11,
- },
- {
- BlockHeight: 12,
- },
- }
-
- // As we're about to send the final response, we'll launch a goroutine
- // to respond back with a filtered set of chan ID's.
- errCh := make(chan er.R, 1)
- go func() {
- select {
- case <-time.After(time.Second * 15):
- errCh <- er.New("no query received")
- return
-
- case req := <-chanSeries.filterReq:
- // We should get a request for the entire range of short
- // chan ID's.
- if !reflect.DeepEqual(expectedReq, req) {
- errCh <- er.Errorf("wrong request: expected %v, got %v",
- expectedReq, req)
- return
- }
-
- // We'll send back only the last two to simulate filtering.
- chanSeries.filterResp <- expectedReq[1:]
- errCh <- nil
- }
- }()
-
- // If we send the final message, then we should transition to
- // queryNewChannels as we've sent a non-empty set of new channels.
- if err := syncer.processChanRangeReply(replies[2]); err != nil {
- t.Fatalf("unable to process reply: %v", err)
- }
-
- if syncer.syncState() != queryNewChannels {
- t.Fatalf("wrong state: expected %v instead got %v",
- queryNewChannels, syncer.state)
- }
- if !reflect.DeepEqual(syncer.newChansToQuery, expectedReq[1:]) {
- t.Fatalf("wrong set of chans to query: expected %v, got %v",
- syncer.newChansToQuery, expectedReq[1:])
- }
-
- // Wait for error from goroutine.
- select {
- case <-time.After(time.Second * 30):
- t.Fatalf("goroutine did not return within 30 seconds")
- case err := <-errCh:
- if err != nil {
- t.Fatal(err)
- }
- }
-}
-
-// TestGossipSyncerSynchronizeChanIDs tests that we properly request chunks of
-// the short chan ID's which were unknown to us. We'll ensure that we request
-// chunk by chunk, and after the last chunk, we return true indicating that we
-// can transition to the synced stage.
-func TestGossipSyncerSynchronizeChanIDs(t *testing.T) {
- t.Parallel()
-
- // We'll modify the chunk size to be a smaller value, so we can ensure
- // our chunk parsing works properly. With this value we should get 3
- // queries: two full chunks, and one lingering chunk.
- const chunkSize = 2
-
- // First, we'll create a GossipSyncer instance with a canned sendToPeer
- // message to allow us to intercept their potential sends.
- msgChan, syncer, _ := newTestSyncer(
- lnwire.NewShortChanIDFromInt(10), defaultEncoding, chunkSize,
- )
-
- // Next, we'll construct a set of chan ID's that we should query for,
- // and set them as newChansToQuery within the state machine.
- newChanIDs := []lnwire.ShortChannelID{
- lnwire.NewShortChanIDFromInt(1),
- lnwire.NewShortChanIDFromInt(2),
- lnwire.NewShortChanIDFromInt(3),
- lnwire.NewShortChanIDFromInt(4),
- lnwire.NewShortChanIDFromInt(5),
- }
- syncer.newChansToQuery = newChanIDs
-
- for i := 0; i < chunkSize*2; i += 2 {
- // With our set up complete, we'll request a sync of chan ID's.
- done, err := syncer.synchronizeChanIDs()
- if err != nil {
- t.Fatalf("unable to sync chan IDs: %v", err)
- }
-
- // At this point, we shouldn't yet be done as only 2 items
- // should have been queried for.
- if done {
- t.Fatalf("syncer shown as done, but shouldn't be!")
- }
-
- // We should've received a new message from the syncer.
- select {
- case <-time.After(time.Second * 15):
- t.Fatalf("no msgs received")
-
- case msg := <-msgChan:
- queryMsg, ok := msg[0].(*lnwire.QueryShortChanIDs)
- if !ok {
- t.Fatalf("expected QueryShortChanIDs instead "+
- "got %T", msg)
- }
-
- // The query message should have queried for the first
- // two chan ID's, and nothing more.
- if !reflect.DeepEqual(queryMsg.ShortChanIDs, newChanIDs[i:i+chunkSize]) {
- t.Fatalf("wrong query: expected %v, got %v",
- spew.Sdump(newChanIDs[i:i+chunkSize]),
- queryMsg.ShortChanIDs)
- }
- }
-
- // With the proper message sent out, the internal state of the
- // syncer should reflect that it still has more channels to
- // query for.
- if !reflect.DeepEqual(syncer.newChansToQuery, newChanIDs[i+chunkSize:]) {
- t.Fatalf("incorrect chans to query for: expected %v, got %v",
- spew.Sdump(newChanIDs[i+chunkSize:]),
- syncer.newChansToQuery)
- }
- }
-
- // At this point, only one more channel should be lingering for the
- // syncer to query for.
- if !reflect.DeepEqual(newChanIDs[chunkSize*2:], syncer.newChansToQuery) {
- t.Fatalf("wrong chans to query: expected %v, got %v",
- newChanIDs[chunkSize*2:], syncer.newChansToQuery)
- }
-
- // If we issue another query, the syncer should tell us that it's done.
- done, err := syncer.synchronizeChanIDs()
- if err != nil {
- t.Fatalf("unable to sync chan IDs: %v", err)
- }
- if done {
- t.Fatalf("syncer should be finished!")
- }
-
- select {
- case <-time.After(time.Second * 15):
- t.Fatalf("no msgs received")
-
- case msg := <-msgChan:
- queryMsg, ok := msg[0].(*lnwire.QueryShortChanIDs)
- if !ok {
- t.Fatalf("expected QueryShortChanIDs instead "+
- "got %T", msg)
- }
-
- // The query issued should simply be the last item.
- if !reflect.DeepEqual(queryMsg.ShortChanIDs, newChanIDs[chunkSize*2:]) {
- t.Fatalf("wrong query: expected %v, got %v",
- spew.Sdump(newChanIDs[chunkSize*2:]),
- queryMsg.ShortChanIDs)
- }
-
- // There also should be no more channels to query.
- if len(syncer.newChansToQuery) != 0 {
- t.Fatalf("should be no more chans to query for, "+
- "instead have %v",
- spew.Sdump(syncer.newChansToQuery))
- }
- }
-}
-
-// TestGossipSyncerDelayDOS tests that the gossip syncer will begin delaying
-// queries after its prescribed allotment of undelayed query responses. Once
-// this happens, all query replies should be delayed by the configurated
-// interval.
-func TestGossipSyncerDelayDOS(t *testing.T) {
- t.Parallel()
-
- // We'll modify the chunk size to be a smaller value, since we'll be
- // sending a modest number of queries. After exhausting our undelayed
- // gossip queries, we'll send two extra queries and ensure that they are
- // delayed properly.
- const chunkSize = 2
- const numDelayedQueries = 2
- const delayTolerance = time.Millisecond * 200
-
- // First, we'll create two GossipSyncer instances with a canned
- // sendToPeer message to allow us to intercept their potential sends.
- highestID := lnwire.ShortChannelID{
- BlockHeight: 1144,
- }
- msgChan1, syncer1, chanSeries1 := newTestSyncer(
- highestID, defaultEncoding, chunkSize, true, false,
- )
- syncer1.Start()
- defer syncer1.Stop()
-
- msgChan2, syncer2, chanSeries2 := newTestSyncer(
- highestID, defaultEncoding, chunkSize, false, true,
- )
- syncer2.Start()
- defer syncer2.Stop()
-
- // Record the delayed query reply interval used by each syncer.
- delayedQueryInterval := syncer1.cfg.delayedQueryReplyInterval
-
- // Record the number of undelayed queries allowed by the syncers.
- numUndelayedQueries := syncer1.cfg.maxUndelayedQueryReplies
-
- // We will send enough queries to exhaust the undelayed responses, and
- // then send two more queries which should be delayed. An additional one
- // is subtracted from the total since undelayed message will be consumed
- // by the initial QueryChannelRange.
- numQueryResponses := numUndelayedQueries + numDelayedQueries - 1
-
- // The total number of responses must include the initial reply each
- // syncer will make to QueryChannelRange.
- numTotalQueries := 1 + numQueryResponses
-
- // The total number of channels each syncer needs to request must be
- // scaled by the chunk size being used.
- numTotalChans := numQueryResponses * chunkSize
-
- // Construct enough channels so that all of the queries will have enough
- // channels. Since syncer1 won't know of any channels, their sets are
- // inherently disjoint.
- var syncer2Chans []lnwire.ShortChannelID
- for i := 0; i < numTotalChans; i++ {
- syncer2Chans = append(syncer2Chans, lnwire.ShortChannelID{
- BlockHeight: highestID.BlockHeight - 1,
- TxIndex: uint32(i),
- })
- }
-
- // We'll kick off the test by asserting syncer1 sends over the
- // QueryChannelRange message the other node.
- select {
- case <-time.After(time.Second * 2):
- t.Fatalf("didn't get msg from syncer1")
-
- case msgs := <-msgChan1:
- for _, msg := range msgs {
- // The message MUST be a QueryChannelRange message.
- _, ok := msg.(*lnwire.QueryChannelRange)
- if !ok {
- t.Fatalf("wrong message: expected "+
- "QueryChannelRange for %T", msg)
- }
-
- select {
- case <-time.After(time.Second * 2):
- t.Fatalf("node 2 didn't read msg")
-
- case syncer2.queryMsgs <- msg:
-
- }
- }
- }
-
- // At this point, we'll need to a response from syncer2's channel
- // series. This will cause syncer1 to simply request the entire set of
- // channels from syncer2. This will count as the first undelayed
- // response for sycner2.
- select {
- case <-time.After(time.Second * 2):
- t.Fatalf("no query recvd")
-
- case <-chanSeries2.filterRangeReqs:
- // We'll send back all the channels that it should know of.
- chanSeries2.filterRangeResp <- syncer2Chans
- }
-
- // At this point, we'll assert that the ReplyChannelRange message is
- // sent by sycner2.
- for i := 0; i < numQueryResponses; i++ {
- select {
- case <-time.After(time.Second * 2):
- t.Fatalf("didn't get msg from syncer2")
-
- case msgs := <-msgChan2:
- for _, msg := range msgs {
- // The message MUST be a ReplyChannelRange message.
- _, ok := msg.(*lnwire.ReplyChannelRange)
- if !ok {
- t.Fatalf("wrong message: expected "+
- "QueryChannelRange for %T", msg)
- }
-
- select {
- case <-time.After(time.Second * 2):
- t.Fatalf("node 2 didn't read msg")
-
- case syncer1.gossipMsgs <- msg:
- }
- }
- }
- }
-
- // We'll now have syncer1 process the received sids from syncer2.
- select {
- case <-time.After(time.Second * 2):
- t.Fatalf("no query recvd")
-
- case <-chanSeries1.filterReq:
- chanSeries1.filterResp <- syncer2Chans
- }
-
- // At this point, syncer1 should start to send out initial requests to
- // query the chan IDs of the remote party. We'll keep track of the
- // number of queries made using the iterated value, which starts at one
- // due the initial contribution of the QueryChannelRange msgs.
- for i := 1; i < numTotalQueries; i++ {
- expDelayResponse := i >= numUndelayedQueries
- queryBatch(t,
- msgChan1, msgChan2,
- syncer1, syncer2,
- chanSeries2,
- expDelayResponse,
- delayedQueryInterval,
- delayTolerance,
- )
- }
-}
-
-// queryBatch is a helper method that will query for a single batch of channels
-// from a peer and assert the responses. The method can also be used to assert
-// the same transition happens, but is delayed by the remote peer's DOS
-// rate-limiting. The provided chanSeries should belong to syncer2.
-//
-// The state transition performed is the following:
-// syncer1 -- QueryShortChanIDs --> syncer2
-// chanSeries.FetchChanAnns()
-// syncer1 <-- ReplyShortChanIDsEnd -- syncer2
-//
-// If expDelayResponse is true, this method will assert that the call the
-// FetchChanAnns happens between:
-// [delayedQueryInterval-delayTolerance, delayedQueryInterval+delayTolerance].
-func queryBatch(t *testing.T,
- msgChan1, msgChan2 chan []lnwire.Message,
- syncer1, syncer2 *GossipSyncer,
- chanSeries *mockChannelGraphTimeSeries,
- expDelayResponse bool,
- delayedQueryInterval, delayTolerance time.Duration) {
-
- t.Helper()
-
- // First, we'll assert that syncer1 sends a QueryShortChanIDs message to
- // the remote peer.
- select {
- case <-time.After(time.Second * 2):
- t.Fatalf("didn't get msg from syncer2")
-
- case msgs := <-msgChan1:
- for _, msg := range msgs {
- // The message MUST be a QueryShortChanIDs message.
- _, ok := msg.(*lnwire.QueryShortChanIDs)
- if !ok {
- t.Fatalf("wrong message: expected "+
- "QueryShortChanIDs for %T", msg)
- }
-
- select {
- case <-time.After(time.Second * 2):
- t.Fatalf("node 2 didn't read msg")
-
- case syncer2.queryMsgs <- msg:
- }
- }
- }
-
- // We'll then respond to with an empty set of replies (as it doesn't
- // affect the test).
- switch {
-
- // If this query has surpassed the undelayed query threshold, we will
- // impose stricter timing constraints on the response times. We'll first
- // test that syncer2's chanSeries doesn't immediately receive a query,
- // and then check that the query hasn't gone unanswered entirely.
- case expDelayResponse:
- // Create a before and after timeout to test, our test
- // will ensure the messages are delivered to the peer
- // in this timeframe.
- before := time.After(
- delayedQueryInterval - delayTolerance,
- )
- after := time.After(
- delayedQueryInterval + delayTolerance,
- )
-
- // First, ensure syncer2 doesn't try to respond up until the
- // before time fires.
- select {
- case <-before:
- // Query is delayed, proceed.
-
- case <-chanSeries.annReq:
- t.Fatalf("DOSy query was not delayed")
- }
-
- // If syncer2 doesn't attempt a response within the allowed
- // interval, then the messages are probably lost.
- select {
- case <-after:
- t.Fatalf("no delayed query received")
-
- case <-chanSeries.annReq:
- chanSeries.annResp <- []lnwire.Message{}
- }
-
- // Otherwise, syncer2 should query its chanSeries promtly.
- default:
- select {
- case <-time.After(50 * time.Millisecond):
- t.Fatalf("no query recvd")
-
- case <-chanSeries.annReq:
- chanSeries.annResp <- []lnwire.Message{}
- }
- }
-
- // Finally, assert that syncer2 replies to syncer1 with a
- // ReplyShortChanIDsEnd.
- select {
- case <-time.After(50 * time.Millisecond):
- t.Fatalf("didn't get msg from syncer2")
-
- case msgs := <-msgChan2:
- for _, msg := range msgs {
- // The message MUST be a ReplyShortChanIDsEnd message.
- _, ok := msg.(*lnwire.ReplyShortChanIDsEnd)
- if !ok {
- t.Fatalf("wrong message: expected "+
- "ReplyShortChanIDsEnd for %T", msg)
- }
-
- select {
- case <-time.After(time.Second * 2):
- t.Fatalf("node 2 didn't read msg")
-
- case syncer1.gossipMsgs <- msg:
- }
- }
- }
-}
-
-// TestGossipSyncerRoutineSync tests all state transitions of the main syncer
-// goroutine. This ensures that given an encounter with a peer that has a set
-// of distinct channels, then we'll properly synchronize our channel state with
-// them.
-func TestGossipSyncerRoutineSync(t *testing.T) {
- t.Parallel()
-
- // We'll modify the chunk size to be a smaller value, so we can ensure
- // our chunk parsing works properly. With this value we should get 3
- // queries: two full chunks, and one lingering chunk.
- const chunkSize = 2
-
- // First, we'll create two GossipSyncer instances with a canned
- // sendToPeer message to allow us to intercept their potential sends.
- highestID := lnwire.ShortChannelID{
- BlockHeight: 1144,
- }
- msgChan1, syncer1, chanSeries1 := newTestSyncer(
- highestID, defaultEncoding, chunkSize, true, false,
- )
- syncer1.Start()
- defer syncer1.Stop()
-
- msgChan2, syncer2, chanSeries2 := newTestSyncer(
- highestID, defaultEncoding, chunkSize, false, true,
- )
- syncer2.Start()
- defer syncer2.Stop()
-
- // Although both nodes are at the same height, syncer will have 3 chan
- // ID's that syncer1 doesn't know of.
- syncer2Chans := []lnwire.ShortChannelID{
- {BlockHeight: highestID.BlockHeight - 3},
- {BlockHeight: highestID.BlockHeight - 2},
- {BlockHeight: highestID.BlockHeight - 1},
- }
-
- // We'll kick off the test by passing over the QueryChannelRange
- // messages from syncer1 to syncer2.
- select {
- case <-time.After(time.Second * 2):
- t.Fatalf("didn't get msg from syncer1")
-
- case msgs := <-msgChan1:
- for _, msg := range msgs {
- // The message MUST be a QueryChannelRange message.
- _, ok := msg.(*lnwire.QueryChannelRange)
- if !ok {
- t.Fatalf("wrong message: expected "+
- "QueryChannelRange for %T", msg)
- }
-
- select {
- case <-time.After(time.Second * 2):
- t.Fatalf("node 2 didn't read msg")
-
- case syncer2.queryMsgs <- msg:
-
- }
- }
- }
-
- // At this point, we'll need to send a response from syncer2 to syncer1
- // using syncer2's channels This will cause syncer1 to simply request
- // the entire set of channels from the other.
- select {
- case <-time.After(time.Second * 2):
- t.Fatalf("no query recvd")
-
- case <-chanSeries2.filterRangeReqs:
- // We'll send back all the channels that it should know of.
- chanSeries2.filterRangeResp <- syncer2Chans
- }
-
- // At this point, we'll assert that syncer2 replies with the
- // ReplyChannelRange messages. Two replies are expected since the chunk
- // size is 2, and we need to query for 3 channels.
- for i := 0; i < chunkSize; i++ {
- select {
- case <-time.After(time.Second * 2):
- t.Fatalf("didn't get msg from syncer2")
-
- case msgs := <-msgChan2:
- for _, msg := range msgs {
- // The message MUST be a ReplyChannelRange message.
- _, ok := msg.(*lnwire.ReplyChannelRange)
- if !ok {
- t.Fatalf("wrong message: expected "+
- "QueryChannelRange for %T", msg)
- }
-
- select {
- case <-time.After(time.Second * 2):
- t.Fatalf("node 2 didn't read msg")
-
- case syncer1.gossipMsgs <- msg:
- }
- }
- }
- }
-
- // We'll now send back a chunked response from syncer2 back to sycner1.
- select {
- case <-time.After(time.Second * 2):
- t.Fatalf("no query recvd")
-
- case <-chanSeries1.filterReq:
- chanSeries1.filterResp <- syncer2Chans
- }
-
- // At this point, syncer1 should start to send out initial requests to
- // query the chan IDs of the remote party. As the chunk size is 2,
- // they'll need 2 rounds in order to fully reconcile the state.
- for i := 0; i < chunkSize; i++ {
- queryBatch(t,
- msgChan1, msgChan2,
- syncer1, syncer2,
- chanSeries2,
- false, 0, 0,
- )
- }
-
- // At this stage syncer1 should now be sending over its initial
- // GossipTimestampRange messages as it should be fully synced.
- select {
- case <-time.After(time.Second * 2):
- t.Fatalf("didn't get msg from syncer1")
-
- case msgs := <-msgChan1:
- for _, msg := range msgs {
- // The message MUST be a GossipTimestampRange message.
- _, ok := msg.(*lnwire.GossipTimestampRange)
- if !ok {
- t.Fatalf("wrong message: expected "+
- "QueryChannelRange for %T", msg)
- }
-
- select {
- case <-time.After(time.Second * 2):
- t.Fatalf("node 2 didn't read msg")
-
- case syncer2.gossipMsgs <- msg:
-
- }
- }
- }
-}
-
-// TestGossipSyncerAlreadySynced tests that if we attempt to synchronize two
-// syncers that have the exact same state, then they'll skip straight to the
-// final state and not perform any channel queries.
-func TestGossipSyncerAlreadySynced(t *testing.T) {
- t.Parallel()
-
- // We'll modify the chunk size to be a smaller value, so we can ensure
- // our chunk parsing works properly. With this value we should get 3
- // queries: two full chunks, and one lingering chunk.
- const chunkSize = 2
- const numChans = 3
-
- // First, we'll create two GossipSyncer instances with a canned
- // sendToPeer message to allow us to intercept their potential sends.
- highestID := lnwire.ShortChannelID{
- BlockHeight: 1144,
- }
- msgChan1, syncer1, chanSeries1 := newTestSyncer(
- highestID, defaultEncoding, chunkSize,
- )
- syncer1.Start()
- defer syncer1.Stop()
-
- msgChan2, syncer2, chanSeries2 := newTestSyncer(
- highestID, defaultEncoding, chunkSize,
- )
- syncer2.Start()
- defer syncer2.Stop()
-
- // The channel state of both syncers will be identical. They should
- // recognize this, and skip the sync phase below.
- var syncer1Chans, syncer2Chans []lnwire.ShortChannelID
- for i := numChans; i > 0; i-- {
- shortChanID := lnwire.ShortChannelID{
- BlockHeight: highestID.BlockHeight - uint32(i),
- }
- syncer1Chans = append(syncer1Chans, shortChanID)
- syncer2Chans = append(syncer2Chans, shortChanID)
- }
-
- // We'll now kick off the test by allowing both side to send their
- // QueryChannelRange messages to each other.
- select {
- case <-time.After(time.Second * 2):
- t.Fatalf("didn't get msg from syncer1")
-
- case msgs := <-msgChan1:
- for _, msg := range msgs {
- // The message MUST be a QueryChannelRange message.
- _, ok := msg.(*lnwire.QueryChannelRange)
- if !ok {
- t.Fatalf("wrong message: expected "+
- "QueryChannelRange for %T", msg)
- }
-
- select {
- case <-time.After(time.Second * 2):
- t.Fatalf("node 2 didn't read msg")
-
- case syncer2.queryMsgs <- msg:
-
- }
- }
- }
- select {
- case <-time.After(time.Second * 2):
- t.Fatalf("didn't get msg from syncer2")
-
- case msgs := <-msgChan2:
- for _, msg := range msgs {
- // The message MUST be a QueryChannelRange message.
- _, ok := msg.(*lnwire.QueryChannelRange)
- if !ok {
- t.Fatalf("wrong message: expected "+
- "QueryChannelRange for %T", msg)
- }
-
- select {
- case <-time.After(time.Second * 2):
- t.Fatalf("node 2 didn't read msg")
-
- case syncer1.queryMsgs <- msg:
-
- }
- }
- }
-
- // We'll now send back the range each side should send over: the set of
- // channels they already know about.
- select {
- case <-time.After(time.Second * 2):
- t.Fatalf("no query recvd")
-
- case <-chanSeries1.filterRangeReqs:
- // We'll send all the channels that it should know of.
- chanSeries1.filterRangeResp <- syncer1Chans
- }
- select {
- case <-time.After(time.Second * 2):
- t.Fatalf("no query recvd")
-
- case <-chanSeries2.filterRangeReqs:
- // We'll send back all the channels that it should know of.
- chanSeries2.filterRangeResp <- syncer2Chans
- }
-
- // Next, we'll thread through the replies of both parties. As the chunk
- // size is 2, and they both know of 3 channels, it'll take two around
- // and two chunks.
- for i := 0; i < chunkSize; i++ {
- select {
- case <-time.After(time.Second * 2):
- t.Fatalf("didn't get msg from syncer1")
-
- case msgs := <-msgChan1:
- for _, msg := range msgs {
- // The message MUST be a ReplyChannelRange message.
- _, ok := msg.(*lnwire.ReplyChannelRange)
- if !ok {
- t.Fatalf("wrong message: expected "+
- "QueryChannelRange for %T", msg)
- }
-
- select {
- case <-time.After(time.Second * 2):
- t.Fatalf("node 2 didn't read msg")
-
- case syncer2.gossipMsgs <- msg:
- }
- }
- }
- }
- for i := 0; i < chunkSize; i++ {
- select {
- case <-time.After(time.Second * 2):
- t.Fatalf("didn't get msg from syncer2")
-
- case msgs := <-msgChan2:
- for _, msg := range msgs {
- // The message MUST be a ReplyChannelRange message.
- _, ok := msg.(*lnwire.ReplyChannelRange)
- if !ok {
- t.Fatalf("wrong message: expected "+
- "QueryChannelRange for %T", msg)
- }
-
- select {
- case <-time.After(time.Second * 2):
- t.Fatalf("node 2 didn't read msg")
-
- case syncer1.gossipMsgs <- msg:
- }
- }
- }
- }
-
- // Now that both sides have the full responses, we'll send over the
- // channels that they need to filter out. As both sides have the exact
- // same set of channels, they should skip to the final state.
- select {
- case <-time.After(time.Second * 2):
- t.Fatalf("no query recvd")
-
- case <-chanSeries1.filterReq:
- chanSeries1.filterResp <- []lnwire.ShortChannelID{}
- }
- select {
- case <-time.After(time.Second * 2):
- t.Fatalf("no query recvd")
-
- case <-chanSeries2.filterReq:
- chanSeries2.filterResp <- []lnwire.ShortChannelID{}
- }
-
- // As both parties are already synced, the next message they send to
- // each other should be the GossipTimestampRange message.
- select {
- case <-time.After(time.Second * 2):
- t.Fatalf("didn't get msg from syncer1")
-
- case msgs := <-msgChan1:
- for _, msg := range msgs {
- // The message MUST be a GossipTimestampRange message.
- _, ok := msg.(*lnwire.GossipTimestampRange)
- if !ok {
- t.Fatalf("wrong message: expected "+
- "QueryChannelRange for %T", msg)
- }
-
- select {
- case <-time.After(time.Second * 2):
- t.Fatalf("node 2 didn't read msg")
-
- case syncer2.gossipMsgs <- msg:
-
- }
- }
- }
- select {
- case <-time.After(time.Second * 2):
- t.Fatalf("didn't get msg from syncer1")
-
- case msgs := <-msgChan2:
- for _, msg := range msgs {
- // The message MUST be a GossipTimestampRange message.
- _, ok := msg.(*lnwire.GossipTimestampRange)
- if !ok {
- t.Fatalf("wrong message: expected "+
- "QueryChannelRange for %T", msg)
- }
-
- select {
- case <-time.After(time.Second * 2):
- t.Fatalf("node 2 didn't read msg")
-
- case syncer1.gossipMsgs <- msg:
-
- }
- }
- }
-}
-
-// TestGossipSyncerSyncTransitions ensures that the gossip syncer properly
-// carries out its duties when accepting a new sync transition request.
-func TestGossipSyncerSyncTransitions(t *testing.T) {
- t.Parallel()
-
- assertMsgSent := func(t *testing.T, msgChan chan []lnwire.Message,
- msg lnwire.Message) {
-
- t.Helper()
-
- var msgSent lnwire.Message
- select {
- case msgs := <-msgChan:
- if len(msgs) != 1 {
- t.Fatal("expected to send a single message at "+
- "a time, got %d", len(msgs))
- }
- msgSent = msgs[0]
- case <-time.After(time.Second):
- t.Fatalf("expected to send %T message", msg)
- }
-
- if !reflect.DeepEqual(msgSent, msg) {
- t.Fatalf("expected to send message: %v\ngot: %v",
- spew.Sdump(msg), spew.Sdump(msgSent))
- }
- }
-
- tests := []struct {
- name string
- entrySyncType SyncerType
- finalSyncType SyncerType
- assert func(t *testing.T, msgChan chan []lnwire.Message,
- syncer *GossipSyncer)
- }{
- {
- name: "active to passive",
- entrySyncType: ActiveSync,
- finalSyncType: PassiveSync,
- assert: func(t *testing.T, msgChan chan []lnwire.Message,
- g *GossipSyncer) {
-
- // When transitioning from active to passive, we
- // should expect to see a new local update
- // horizon sent to the remote peer indicating
- // that it would not like to receive any future
- // updates.
- assertMsgSent(t, msgChan, &lnwire.GossipTimestampRange{
- FirstTimestamp: uint32(zeroTimestamp.Unix()),
- TimestampRange: 0,
- })
-
- syncState := g.syncState()
- if syncState != chansSynced {
- t.Fatalf("expected syncerState %v, "+
- "got %v", chansSynced, syncState)
- }
- },
- },
- {
- name: "passive to active",
- entrySyncType: PassiveSync,
- finalSyncType: ActiveSync,
- assert: func(t *testing.T, msgChan chan []lnwire.Message,
- g *GossipSyncer) {
-
- // When transitioning from historical to active,
- // we should expect to see a new local update
- // horizon sent to the remote peer indicating
- // that it would like to receive any future
- // updates.
- firstTimestamp := uint32(time.Now().Unix())
- assertMsgSent(t, msgChan, &lnwire.GossipTimestampRange{
- FirstTimestamp: firstTimestamp,
- TimestampRange: math.MaxUint32,
- })
-
- syncState := g.syncState()
- if syncState != chansSynced {
- t.Fatalf("expected syncerState %v, "+
- "got %v", chansSynced, syncState)
- }
- },
- },
- }
-
- for _, test := range tests {
- t.Run(test.name, func(t *testing.T) {
- t.Parallel()
-
- // We'll start each test by creating our syncer. We'll
- // initialize it with a state of chansSynced, as that's
- // the only time when it can process sync transitions.
- msgChan, syncer, _ := newTestSyncer(
- lnwire.ShortChannelID{
- BlockHeight: latestKnownHeight,
- },
- defaultEncoding, defaultChunkSize,
- )
- syncer.setSyncState(chansSynced)
-
- // We'll set the initial syncType to what the test
- // demands.
- syncer.setSyncType(test.entrySyncType)
-
- // We'll then start the syncer in order to process the
- // request.
- syncer.Start()
- defer syncer.Stop()
-
- syncer.ProcessSyncTransition(test.finalSyncType)
-
- // The syncer should now have the expected final
- // SyncerType that the test expects.
- syncType := syncer.SyncType()
- if syncType != test.finalSyncType {
- t.Fatalf("expected syncType %v, got %v",
- test.finalSyncType, syncType)
- }
-
- // Finally, we'll run a set of assertions for each test
- // to ensure the syncer performed its expected duties
- // after processing its sync transition.
- test.assert(t, msgChan, syncer)
- })
- }
-}
-
-// TestGossipSyncerHistoricalSync tests that a gossip syncer can perform a
-// historical sync with the remote peer.
-func TestGossipSyncerHistoricalSync(t *testing.T) {
- t.Parallel()
-
- // We'll create a new gossip syncer and manually override its state to
- // chansSynced. This is necessary as the syncer can only process
- // historical sync requests in this state.
- msgChan, syncer, _ := newTestSyncer(
- lnwire.ShortChannelID{BlockHeight: latestKnownHeight},
- defaultEncoding, defaultChunkSize,
- )
- syncer.setSyncType(PassiveSync)
- syncer.setSyncState(chansSynced)
-
- syncer.Start()
- defer syncer.Stop()
-
- syncer.historicalSync()
-
- // We should expect to see a single lnwire.QueryChannelRange message be
- // sent to the remote peer with a FirstBlockHeight of 0.
- expectedMsg := &lnwire.QueryChannelRange{
- FirstBlockHeight: 0,
- NumBlocks: math.MaxUint32,
- }
-
- select {
- case msgs := <-msgChan:
- if len(msgs) != 1 {
- t.Fatalf("expected to send a single "+
- "lnwire.QueryChannelRange message, got %d",
- len(msgs))
- }
- if !reflect.DeepEqual(msgs[0], expectedMsg) {
- t.Fatalf("expected to send message: %v\ngot: %v",
- spew.Sdump(expectedMsg), spew.Sdump(msgs[0]))
- }
- case <-time.After(time.Second):
- t.Fatalf("expected to send a lnwire.QueryChannelRange message")
- }
-}
-
-// TestGossipSyncerSyncedSignal ensures that we receive a signal when a gossip
-// syncer reaches its terminal chansSynced state.
-func TestGossipSyncerSyncedSignal(t *testing.T) {
- t.Parallel()
-
- // We'll create a new gossip syncer and manually override its state to
- // chansSynced.
- _, syncer, _ := newTestSyncer(
- lnwire.NewShortChanIDFromInt(10), defaultEncoding,
- defaultChunkSize,
- )
- syncer.setSyncState(chansSynced)
-
- // We'll go ahead and request a signal to be notified of when it reaches
- // this state.
- signalChan := syncer.ResetSyncedSignal()
-
- // Starting the gossip syncer should cause the signal to be delivered.
- syncer.Start()
-
- select {
- case <-signalChan:
- case <-time.After(time.Second):
- t.Fatal("expected to receive chansSynced signal")
- }
-
- syncer.Stop()
-
- // We'll try this again, but this time we'll request the signal after
- // the syncer is active and has already reached its chansSynced state.
- _, syncer, _ = newTestSyncer(
- lnwire.NewShortChanIDFromInt(10), defaultEncoding,
- defaultChunkSize,
- )
-
- syncer.setSyncState(chansSynced)
-
- syncer.Start()
- defer syncer.Stop()
-
- signalChan = syncer.ResetSyncedSignal()
-
- // The signal should be delivered immediately.
- select {
- case <-signalChan:
- case <-time.After(time.Second):
- t.Fatal("expected to receive chansSynced signal")
- }
-}
diff --git a/lnd/doc.go b/lnd/doc.go
deleted file mode 100644
index 4e2eb511..00000000
--- a/lnd/doc.go
+++ /dev/null
@@ -1 +0,0 @@
-package lnd
diff --git a/lnd/docker/README.md b/lnd/docker/README.md
deleted file mode 100644
index a6343622..00000000
--- a/lnd/docker/README.md
+++ /dev/null
@@ -1,337 +0,0 @@
-This document is written for people who are eager to do something with
-the Lightning Network Daemon (`lnd`). This folder uses `docker-compose` to
-package `lnd` and `btcd` together to make deploying the two daemons as easy as
-typing a few commands. All configuration between `lnd` and `btcd` are handled
-automatically by their `docker-compose` config file.
-
-### Prerequisites
-Name | Version
---------|---------
-docker-compose | 1.9.0
-docker | 1.13.0
-
-### Table of content
- * [Create lightning network cluster](#create-lightning-network-cluster)
- * [Connect to faucet lightning node](#connect-to-faucet-lightning-node)
- * [Questions](#questions)
-
-### Create lightning network cluster
-This section describes a workflow on `simnet`, a development/test network
-that's similar to Bitcoin Core's `regtest` mode. In `simnet` mode blocks can be
-generated at will, as the difficulty is very low. This makes it an ideal
-environment for testing as one doesn't need to wait tens of minutes for blocks
-to arrive in order to test channel related functionality. Additionally, it's
-possible to spin up an arbitrary number of `lnd` instances within containers to
-create a mini development cluster. All state is saved between instances using a
-shared volume.
-
-Current workflow is big because we recreate the whole network by ourselves,
-next versions will use the started `btcd` bitcoin node in `testnet` and
-`faucet` wallet from which you will get the bitcoins.
-
-In the workflow below, we describe the steps required to recreate the following
-topology, and send a payment from `Alice` to `Bob`.
-```
-+ ----- + + --- +
-| Alice | <--- channel ---> | Bob | <--- Bob and Alice are the lightning network daemons which
-+ ----- + + --- + create channels and interact with each other using the
- | | Bitcoin network as source of truth.
- | |
- + - - - - - + - - - - - - +
- |
- + --------------- +
- | Bitcoin network | <--- In the current scenario for simplicity we create only one
- + --------------- + "btcd" node which represents the Bitcoin network, in a
- real situation Alice and Bob will likely be
- connected to different Bitcoin nodes.
-```
-
-**General workflow is the following:**
-
- * Create a `btcd` node running on a private `simnet`.
- * Create `Alice`, one of the `lnd` nodes in our simulation network.
- * Create `Bob`, the other `lnd` node in our simulation network.
- * Mine some blocks to send `Alice` some bitcoins.
- * Open channel between `Alice` and `Bob`.
- * Send payment from `Alice` to `Bob`.
- * Close the channel between `Alice` and `Bob`.
- * Check that on-chain `Bob` balance was changed.
-
-Start `btcd`, and then create an address for `Alice` that we'll directly mine
-bitcoin into.
-```bash
-# Init bitcoin network env variable:
-$ export NETWORK="simnet"
-
-# Create persistent volumes for alice and bob.
-$ docker volume create simnet_lnd_alice
-$ docker volume create simnet_lnd_bob
-
-# Run the "Alice" container and log into it:
-$ docker-compose run -d --name alice --volume simnet_lnd_alice:/root/.lnd lnd
-$ docker exec -i -t alice bash
-
-# Generate a new backward compatible nested p2sh address for Alice:
-alice$ lncli --network=simnet newaddress np2wkh
-
-# Recreate "btcd" node and set Alice's address as mining address:
-$ MINING_ADDRESS= docker-compose up -d btcd
-
-# Generate 400 blocks (we need at least "100 >=" blocks because of coinbase
-# block maturity and "300 ~=" in order to activate segwit):
-$ docker exec -it btcd /start-btcctl.sh generate 400
-
-# Check that segwit is active:
-$ docker exec -it btcd /start-btcctl.sh getblockchaininfo | grep -A 1 segwit
-```
-
-Check `Alice` balance:
-```
-alice$ lncli --network=simnet walletbalance
-```
-
-Connect `Bob` node to `Alice` node.
-
-```bash
-# Run "Bob" node and log into it:
-$ docker-compose run -d --name bob --volume simnet_lnd_bob:/root/.lnd lnd
-$ docker exec -i -t bob bash
-
-# Get the identity pubkey of "Bob" node:
-bob$ lncli --network=simnet getinfo
-
-{
- ----->"identity_pubkey": "0343bc80b914aebf8e50eb0b8e445fc79b9e6e8e5e018fa8c5f85c7d429c117b38",
- "alias": "",
- "num_pending_channels": 0,
- "num_active_channels": 0,
- "num_inactive_channels": 0,
- "num_peers": 0,
- "block_height": 1215,
- "block_hash": "7d0bc86ea4151ed3b5be908ea883d2ac3073263537bcf8ca2dca4bec22e79d50",
- "synced_to_chain": true,
- "testnet": false
- "chains": [
- "bitcoin"
- ]
-}
-
-# Get the IP address of "Bob" node:
-$ docker inspect bob | grep IPAddress
-
-# Connect "Alice" to the "Bob" node:
-alice$ lncli --network=simnet connect @
-
-# Check list of peers on "Alice" side:
-alice$ lncli --network=simnet listpeers
-{
- "peers": [
- {
- "pub_key": "0343bc80b914aebf8e50eb0b8e445fc79b9e6e8e5e018fa8c5f85c7d429c117b38",
- "address": "172.19.0.4:9735",
- "bytes_sent": "357",
- "bytes_recv": "357",
- "sat_sent": "0",
- "sat_recv": "0",
- "inbound": true,
- "ping_time": "0"
- }
- ]
-}
-
-# Check list of peers on "Bob" side:
-bob$ lncli --network=simnet listpeers
-{
- "peers": [
- {
- "pub_key": "03d0cd35b761f789983f3cfe82c68170cd1c3266b39220c24f7dd72ef4be0883eb",
- "address": "172.19.0.3:51932",
- "bytes_sent": "357",
- "bytes_recv": "357",
- "sat_sent": "0",
- "sat_recv": "0",
- "inbound": false,
- "ping_time": "0"
- }
- ]
-}
-```
-
-Create the `Alice<->Bob` channel.
-```bash
-# Open the channel with "Bob":
-alice$ lncli --network=simnet openchannel --node_key= --local_amt=1000000
-
-# Include funding transaction in block thereby opening the channel:
-$ docker exec -it btcd /start-btcctl.sh generate 3
-
-# Check that channel with "Bob" was opened:
-alice$ lncli --network=simnet listchannels
-{
- "channels": [
- {
- "active": true,
- "remote_pubkey": "0343bc80b914aebf8e50eb0b8e445fc79b9e6e8e5e018fa8c5f85c7d429c117b38",
- "channel_point": "3511ae8a52c97d957eaf65f828504e68d0991f0276adff94c6ba91c7f6cd4275:0",
- "chan_id": "1337006139441152",
- "capacity": "1005000",
- "local_balance": "1000000",
- "remote_balance": "0",
- "commit_fee": "8688",
- "commit_weight": "600",
- "fee_per_kw": "12000",
- "unsettled_balance": "0",
- "total_satoshis_sent": "0",
- "total_satoshis_received": "0",
- "num_updates": "0",
- "pending_htlcs": [
- ],
- "csv_delay": 4
- }
- ]
-}
-```
-
-Send the payment from `Alice` to `Bob`.
-```bash
-# Add invoice on "Bob" side:
-bob$ lncli --network=simnet addinvoice --amt=10000
-{
- "r_hash": "",
- "pay_req": "",
-}
-
-# Send payment from "Alice" to "Bob":
-alice$ lncli --network=simnet sendpayment --pay_req=
-
-# Check "Alice"'s channel balance
-alice$ lncli --network=simnet channelbalance
-
-# Check "Bob"'s channel balance
-bob$ lncli --network=simnet channelbalance
-```
-
-Now we have open channel in which we sent only one payment, let's imagine
-that we sent lots of them and we'd now like to close the channel. Let's do
-it!
-```bash
-# List the "Alice" channel and retrieve "channel_point" which represents
-# the opened channel:
-alice$ lncli --network=simnet listchannels
-{
- "channels": [
- {
- "active": true,
- "remote_pubkey": "0343bc80b914aebf8e50eb0b8e445fc79b9e6e8e5e018fa8c5f85c7d429c117b38",
- ---->"channel_point": "3511ae8a52c97d957eaf65f828504e68d0991f0276adff94c6ba91c7f6cd4275:0",
- "chan_id": "1337006139441152",
- "capacity": "1005000",
- "local_balance": "990000",
- "remote_balance": "10000",
- "commit_fee": "8688",
- "commit_weight": "724",
- "fee_per_kw": "12000",
- "unsettled_balance": "0",
- "total_satoshis_sent": "10000",
- "total_satoshis_received": "0",
- "num_updates": "2",
- "pending_htlcs": [
- ],
- "csv_delay": 4
- }
- ]
-}
-
-# Channel point consists of two numbers separated by a colon. The first one
-# is "funding_txid" and the second one is "output_index":
-alice$ lncli --network=simnet closechannel --funding_txid= --output_index=
-
-# Include close transaction in a block thereby closing the channel:
-$ docker exec -it btcd /start-btcctl.sh generate 3
-
-# Check "Alice" on-chain balance was credited by her settled amount in the channel:
-alice$ lncli --network=simnet walletbalance
-
-# Check "Bob" on-chain balance was credited with the funds he received in the
-# channel:
-bob$ lncli --network=simnet walletbalance
-{
- "total_balance": "10000",
- "confirmed_balance": "10000",
- "unconfirmed_balance": "0"
-}
-```
-
-### Connect to faucet lightning node
-In order to be more confident with `lnd` commands I suggest you to try
-to create a mini lightning network cluster ([Create lightning network cluster](#create-lightning-network-cluster)).
-
-In this section we will try to connect our node to the faucet/hub node
-which we will create a channel with and send some amount of
-bitcoins. The schema will be following:
-
-```
-+ ----- + + ------ + (1) + --- +
-| Alice | <--- channel ---> | Faucet | <--- channel ---> | Bob |
-+ ----- + + ------ + + --- +
- | | |
- | | | <--- (2)
- + - - - - - - - - - - - - - + - - - - - - - - - - - - - +
- |
- + --------------- +
- | Bitcoin network | <--- (3)
- + --------------- +
-
-
- (1) You may connect an additional node "Bob" and make the multihop
- payment Alice->Faucet->Bob
-
- (2) "Faucet", "Alice" and "Bob" are the lightning network daemons which
- create channels to interact with each other using the Bitcoin network
- as source of truth.
-
- (3) In current scenario "Alice" and "Faucet" lightning network nodes
- connect to different Bitcoin nodes. If you decide to connect "Bob"
- to "Faucet" then the already created "btcd" node would be sufficient.
-```
-
-First of all you need to run `btcd` node in `testnet` and wait for it to be
-synced with test network (`May the Force and Patience be with you`).
-```bash
-# Init bitcoin network env variable:
-$ NETWORK="testnet" docker-compose up
-```
-
-After `btcd` synced, connect `Alice` to the `Faucet` node.
-
-The `Faucet` node address can be found at the [Faucet Lightning Community webpage](https://faucet.lightning.community).
-
-```bash
-# Run "Alice" container and log into it:
-$ docker-compose run -d --name alice lnd_btc; docker exec -i -t "alice" bash
-
-# Connect "Alice" to the "Faucet" node:
-alice$ lncli --network=testnet connect @
-```
-
-After a connection is achieved, the `Faucet` node should create the channel
-and send some amount of bitcoins to `Alice`.
-
-**What you may do next?:**
-- Send some amount to `Faucet` node back.
-- Connect `Bob` node to the `Faucet` and make multihop payment (`Alice->Faucet->Bob`)
-- Close channel with `Faucet` and check the onchain balance.
-
-### Building standalone docker images
-
-Instructions on how to build standalone docker images (for development or
-production), outside of `docker-compose`, see the
-[docker docs](../docs/DOCKER.md).
-
-### Questions
-[![Irc](https://img.shields.io/badge/chat-on%20freenode-brightgreen.svg)](https://webchat.freenode.net/?channels=lnd)
-
-* How to see `alice` | `bob` | `btcd` logs?
-```bash
-docker-compose logs
-```
diff --git a/lnd/docker/btcd/Dockerfile b/lnd/docker/btcd/Dockerfile
deleted file mode 100644
index 5de4a389..00000000
--- a/lnd/docker/btcd/Dockerfile
+++ /dev/null
@@ -1,60 +0,0 @@
-FROM golang:1.12-alpine as builder
-
-LABEL maintainer="Olaoluwa Osuntokun "
-
-# Install build dependencies such as git and glide.
-RUN apk add --no-cache git gcc musl-dev
-
-WORKDIR $GOPATH/src/github.com/btcsuite/btcd
-
-# Pin down btcd to a version that we know works with lnd.
-ARG BTCD_VERSION=v0.20.1-beta
-
-# Grab and install the latest version of of btcd and all related dependencies.
-RUN git clone https://github.com/btcsuite/btcd.git . \
- && git checkout $BTCD_VERSION \
- && GO111MODULE=on go install -v . ./cmd/...
-
-# Start a new image
-FROM alpine as final
-
-# Expose mainnet ports (server, rpc)
-EXPOSE 8333 8334
-
-# Expose testnet ports (server, rpc)
-EXPOSE 18333 18334
-
-# Expose simnet ports (server, rpc)
-EXPOSE 18555 18556
-
-# Expose segnet ports (server, rpc)
-EXPOSE 28901 28902
-
-# Copy the compiled binaries from the builder image.
-COPY --from=builder /go/bin/addblock /bin/
-COPY --from=builder /go/bin/btcctl /bin/
-COPY --from=builder /go/bin/btcd /bin/
-COPY --from=builder /go/bin/findcheckpoint /bin/
-COPY --from=builder /go/bin/gencerts /bin/
-
-COPY "start-btcctl.sh" .
-COPY "start-btcd.sh" .
-
-RUN apk add --no-cache \
- bash \
- ca-certificates \
-&& mkdir "/rpc" "/root/.btcd" "/root/.btcctl" \
-&& touch "/root/.btcd/btcd.conf" \
-&& chmod +x start-btcctl.sh \
-&& chmod +x start-btcd.sh \
-# Manually generate certificate and add all domains, it is needed to connect
-# "btcctl" and "lnd" to "btcd" over docker links.
-&& "/bin/gencerts" --host="*" --directory="/rpc" --force
-
-# Create a volume to house pregenerated RPC credentials. This will be
-# shared with any lnd, btcctl containers so they can securely query btcd's RPC
-# server.
-# You should NOT do this before certificate generation!
-# Otherwise manually generated certificate will be overridden with shared
-# mounted volume! For more info read dockerfile "VOLUME" documentation.
-VOLUME ["/rpc"]
diff --git a/lnd/docker/btcd/start-btcctl.sh b/lnd/docker/btcd/start-btcctl.sh
deleted file mode 100755
index 8bd5fda0..00000000
--- a/lnd/docker/btcd/start-btcctl.sh
+++ /dev/null
@@ -1,59 +0,0 @@
-#!/usr/bin/env bash
-
-# exit from script if error was raised.
-set -e
-
-# error function is used within a bash function in order to send the error
-# message directly to the stderr output and exit.
-error() {
- echo "$1" > /dev/stderr
- exit 0
-}
-
-# return is used within bash function in order to return the value.
-return() {
- echo "$1"
-}
-
-# set_default function gives the ability to move the setting of default
-# env variable from docker file to the script thereby giving the ability to the
-# user override it durin container start.
-set_default() {
- # docker initialized env variables with blank string and we can't just
- # use -z flag as usually.
- BLANK_STRING='""'
-
- VARIABLE="$1"
- DEFAULT="$2"
-
- if [[ -z "$VARIABLE" || "$VARIABLE" == "$BLANK_STRING" ]]; then
-
- if [ -z "$DEFAULT" ]; then
- error "You should specify default variable"
- else
- VARIABLE="$DEFAULT"
- fi
- fi
-
- return "$VARIABLE"
-}
-
-# Set default variables if needed.
-RPCUSER=$(set_default "$RPCUSER" "devuser")
-RPCPASS=$(set_default "$RPCPASS" "devpass")
-NETWORK=$(set_default "$NETWORK" "simnet")
-
-PARAMS=""
-if [ "$NETWORK" != "mainnet" ]; then
- PARAMS="--$NETWORK"
-fi
-
-PARAMS=$(echo $PARAMS \
- "--rpccert=/rpc/rpc.cert" \
- "--rpcuser=$RPCUSER" \
- "--rpcpass=$RPCPASS" \
- "--rpcserver=localhost" \
-)
-
-PARAMS="$PARAMS $@"
-exec btcctl $PARAMS
diff --git a/lnd/docker/btcd/start-btcd.sh b/lnd/docker/btcd/start-btcd.sh
deleted file mode 100755
index 4f5f7ba3..00000000
--- a/lnd/docker/btcd/start-btcd.sh
+++ /dev/null
@@ -1,74 +0,0 @@
-#!/usr/bin/env bash
-
-# exit from script if error was raised.
-set -e
-
-# error function is used within a bash function in order to send the error
-# message directly to the stderr output and exit.
-error() {
- echo "$1" > /dev/stderr
- exit 0
-}
-
-# return is used within bash function in order to return the value.
-return() {
- echo "$1"
-}
-
-# set_default function gives the ability to move the setting of default
-# env variable from docker file to the script thereby giving the ability to the
-# user override it durin container start.
-set_default() {
- # docker initialized env variables with blank string and we can't just
- # use -z flag as usually.
- BLANK_STRING='""'
-
- VARIABLE="$1"
- DEFAULT="$2"
-
- if [[ -z "$VARIABLE" || "$VARIABLE" == "$BLANK_STRING" ]]; then
-
- if [ -z "$DEFAULT" ]; then
- error "You should specify default variable"
- else
- VARIABLE="$DEFAULT"
- fi
- fi
-
- return "$VARIABLE"
-}
-
-# Set default variables if needed.
-RPCUSER=$(set_default "$RPCUSER" "devuser")
-RPCPASS=$(set_default "$RPCPASS" "devpass")
-DEBUG=$(set_default "$DEBUG" "info")
-NETWORK=$(set_default "$NETWORK" "simnet")
-
-PARAMS=""
-if [ "$NETWORK" != "mainnet" ]; then
- PARAMS="--$NETWORK"
-fi
-
-PARAMS=$(echo $PARAMS \
- "--debuglevel=$DEBUG" \
- "--rpcuser=$RPCUSER" \
- "--rpcpass=$RPCPASS" \
- "--datadir=/data" \
- "--logdir=/data" \
- "--rpccert=/rpc/rpc.cert" \
- "--rpckey=/rpc/rpc.key" \
- "--rpclisten=0.0.0.0" \
- "--txindex"
-)
-
-# Set the mining flag only if address is non empty.
-if [[ -n "$MINING_ADDRESS" ]]; then
- PARAMS="$PARAMS --miningaddr=$MINING_ADDRESS"
-fi
-
-# Add user parameters to command.
-PARAMS="$PARAMS $@"
-
-# Print command and start bitcoin node.
-echo "Command: btcd $PARAMS"
-exec btcd $PARAMS
diff --git a/lnd/docker/docker-compose.ltc.yml b/lnd/docker/docker-compose.ltc.yml
deleted file mode 100644
index 5bf9c323..00000000
--- a/lnd/docker/docker-compose.ltc.yml
+++ /dev/null
@@ -1,55 +0,0 @@
-version: '2'
-services:
- # ltc is an image of litecoin node which used as base image for ltcd and
- # ltcctl. The environment variables default values determined on stage of
- # container start within starting script.
- ltcd:
- image: ltcd
- container_name: ltcd
- build:
- context: ltcd/
- volumes:
- - shared:/rpc
- - litecoin:/data
- environment:
- - RPCUSER
- - RPCPASS
- - NETWORK
- - DEBUG
- - MINING_ADDRESS
- entrypoint: ["./start-ltcd.sh"]
-
- lnd:
- image: lnd
- container_name: lnd_ltc
- build:
- context: ../
- dockerfile: dev.Dockerfile
- environment:
- - RPCUSER
- - RPCPASS
- - NETWORK
- - CHAIN
- - DEBUG
- volumes:
- - shared:/rpc
- - lnd_ltc:/root/.lnd
- entrypoint: ["./start-lnd.sh"]
- links:
- - "ltcd:blockchain"
-
-volumes:
- # shared volume is need to store the btcd rpc certificates and use it within
- # ltcctl and lnd containers.
- shared:
- driver: local
-
- # litecoin volume is needed for maintaining blockchain persistence
- # during ltcd container recreation.
- litecoin:
- driver: local
-
- # lnd volume is used for persisting lnd application data and chain state
- # during container lifecycle.
- lnd_ltc:
- driver: local
diff --git a/lnd/docker/docker-compose.yml b/lnd/docker/docker-compose.yml
deleted file mode 100644
index 61cd58a5..00000000
--- a/lnd/docker/docker-compose.yml
+++ /dev/null
@@ -1,55 +0,0 @@
-version: '2'
-services:
- # btc is an image of bitcoin node which used as base image for btcd and
- # btccli. The environment variables default values determined on stage of
- # container start within starting script.
- btcd:
- image: btcd
- container_name: btcd
- build:
- context: btcd/
- volumes:
- - shared:/rpc
- - bitcoin:/data
- environment:
- - RPCUSER
- - RPCPASS
- - NETWORK
- - DEBUG
- - MINING_ADDRESS
- entrypoint: ["./start-btcd.sh"]
-
- lnd:
- image: lnd
- container_name: lnd
- build:
- context: ../
- dockerfile: dev.Dockerfile
- environment:
- - RPCUSER
- - RPCPASS
- - NETWORK
- - CHAIN
- - DEBUG
- volumes:
- - shared:/rpc
- - lnd:/root/.lnd
- entrypoint: ["./start-lnd.sh"]
- links:
- - "btcd:blockchain"
-
-volumes:
- # shared volume is need to store the btcd rpc certificates and use it within
- # btcctl and lnd containers.
- shared:
- driver: local
-
- # bitcoin volume is needed for maintaining blockchain persistence
- # during btcd container recreation.
- bitcoin:
- driver: local
-
- # lnd volume is used for persisting lnd application data and chain state
- # during container lifecycle.
- lnd:
- driver: local
diff --git a/lnd/docker/lnd/start-lnd.sh b/lnd/docker/lnd/start-lnd.sh
deleted file mode 100755
index c7bfe305..00000000
--- a/lnd/docker/lnd/start-lnd.sh
+++ /dev/null
@@ -1,69 +0,0 @@
-#!/usr/bin/env bash
-
-# exit from script if error was raised.
-set -e
-
-# error function is used within a bash function in order to send the error
-# message directly to the stderr output and exit.
-error() {
- echo "$1" > /dev/stderr
- exit 0
-}
-
-# return is used within bash function in order to return the value.
-return() {
- echo "$1"
-}
-
-# set_default function gives the ability to move the setting of default
-# env variable from docker file to the script thereby giving the ability to the
-# user override it during container start.
-set_default() {
- # docker initialized env variables with blank string and we can't just
- # use -z flag as usually.
- BLANK_STRING='""'
-
- VARIABLE="$1"
- DEFAULT="$2"
-
- if [[ -z "$VARIABLE" || "$VARIABLE" == "$BLANK_STRING" ]]; then
-
- if [ -z "$DEFAULT" ]; then
- error "You should specify default variable"
- else
- VARIABLE="$DEFAULT"
- fi
- fi
-
- return "$VARIABLE"
-}
-
-# Set default variables if needed.
-RPCUSER=$(set_default "$RPCUSER" "devuser")
-RPCPASS=$(set_default "$RPCPASS" "devpass")
-DEBUG=$(set_default "$DEBUG" "debug")
-NETWORK=$(set_default "$NETWORK" "simnet")
-CHAIN=$(set_default "$CHAIN" "bitcoin")
-BACKEND="btcd"
-HOSTNAME=$(hostname)
-if [[ "$CHAIN" == "litecoin" ]]; then
- BACKEND="ltcd"
-fi
-
-# CAUTION: DO NOT use the --noseedback for production/mainnet setups, ever!
-# Also, setting --rpclisten to $HOSTNAME will cause it to listen on an IP
-# address that is reachable on the internal network. If you do this outside of
-# docker, this might be a security concern!
-
-exec lnd \
- --noseedbackup \
- "--$CHAIN.active" \
- "--$CHAIN.$NETWORK" \
- "--$CHAIN.node"="btcd" \
- "--$BACKEND.rpccert"="/rpc/rpc.cert" \
- "--$BACKEND.rpchost"="blockchain" \
- "--$BACKEND.rpcuser"="$RPCUSER" \
- "--$BACKEND.rpcpass"="$RPCPASS" \
- "--rpclisten=$HOSTNAME:10009" \
- --debuglevel="$DEBUG" \
- "$@"
diff --git a/lnd/docker/ltcd/Dockerfile b/lnd/docker/ltcd/Dockerfile
deleted file mode 100644
index e82ee9d0..00000000
--- a/lnd/docker/ltcd/Dockerfile
+++ /dev/null
@@ -1,49 +0,0 @@
-FROM golang:1.12-alpine as builder
-
-LABEL maintainer="Olaoluwa Osuntokun "
-
-# Grab and install the latest version of roasbeef's fork of ltcd and all
-# related dependencies.
-WORKDIR $GOPATH/src/github.com/ltcsuite/ltcd
-RUN apk add --no-cache --update alpine-sdk git
-RUN git clone https://github.com/ltcsuite/ltcd ./
-RUN GO111MODULE=on go install -v . ./cmd/...
-RUN GO111MODULE=on go install . ./cmd/ltcctl ./cmd/gencerts
-
-# Start a new image
-FROM alpine as final
-
-# Expose mainnet ports (server, rpc)
-EXPOSE 9333 9334
-
-# Expose testnet ports (server, rpc)
-EXPOSE 19334 19335
-
-# Expose simnet ports (server, rpc)
-EXPOSE 18555 18556
-
-# Copy the compiled binaries from the builder image.
-COPY --from=builder /go/bin/ltcctl /bin/
-COPY --from=builder /go/bin/ltcd /bin/
-COPY --from=builder /go/bin/gencerts /bin/
-
-COPY "start-ltcctl.sh" .
-COPY "start-ltcd.sh" .
-
-RUN apk add --no-cache \
- bash \
- ca-certificates \
-&& chmod +x start-ltcctl.sh \
-&& chmod +x start-ltcd.sh \
-&& mkdir "/rpc" "/root/.ltcd" "/root/.ltcctl" \
-&& touch "/root/.ltcd/ltcd.conf" \
-# "ltcctl" and "lnd" to "ltcd" over docker links.
-&& "/bin/gencerts" --host="*" --directory="/rpc" --force
-
-# Create a volume to house pregenerated RPC credentials. This will be
-# shared with any lnd, btcctl containers so they can securely query ltcd's RPC
-# server.
-# You should NOT do this before certificate generation!
-# Otherwise manually generated certificate will be overridden with shared
-# mounted volume! For more info read dockerfile "VOLUME" documentation.
-VOLUME ["/rpc"]
diff --git a/lnd/docker/ltcd/start-ltcctl.sh b/lnd/docker/ltcd/start-ltcctl.sh
deleted file mode 100755
index 2888ab9c..00000000
--- a/lnd/docker/ltcd/start-ltcctl.sh
+++ /dev/null
@@ -1,59 +0,0 @@
-#!/usr/bin/env bash
-
-# exit from script if error was raised.
-set -e
-
-# error function is used within a bash function in order to send the error
-# message directly to the stderr output and exit.
-error() {
- echo "$1" > /dev/stderr
- exit 0
-}
-
-# return is used within bash function in order to return the value.
-return() {
- echo "$1"
-}
-
-# set_default function gives the ability to move the setting of default
-# env variable from docker file to the script thereby giving the ability to the
-# user override it durin container start.
-set_default() {
- # docker initialized env variables with blank string and we can't just
- # use -z flag as usually.
- BLANK_STRING='""'
-
- VARIABLE="$1"
- DEFAULT="$2"
-
- if [[ -z "$VARIABLE" || "$VARIABLE" == "$BLANK_STRING" ]]; then
-
- if [ -z "$DEFAULT" ]; then
- error "You should specify default variable"
- else
- VARIABLE="$DEFAULT"
- fi
- fi
-
- return "$VARIABLE"
-}
-
-# Set default variables if needed.
-RPCUSER=$(set_default "$RPCUSER" "devuser")
-RPCPASS=$(set_default "$RPCPASS" "devpass")
-NETWORK=$(set_default "$NETWORK" "simnet")
-
-PARAMS=""
-if [ "$NETWORK" != "mainnet" ]; then
- PARAMS="--$NETWORK"
-fi
-
-PARAMS=$(echo $PARAMS \
- "--rpccert=/rpc/rpc.cert" \
- "--rpcuser=$RPCUSER" \
- "--rpcpass=$RPCPASS" \
- "--rpcserver=localhost" \
-)
-
-PARAMS="$PARAMS $@"
-exec ltcctl $PARAMS
diff --git a/lnd/docker/ltcd/start-ltcd.sh b/lnd/docker/ltcd/start-ltcd.sh
deleted file mode 100755
index b6c6d699..00000000
--- a/lnd/docker/ltcd/start-ltcd.sh
+++ /dev/null
@@ -1,74 +0,0 @@
-#!/usr/bin/env bash
-
-# exit from script if error was raised.
-set -e
-
-# error function is used within a bash function in order to send the error
-# message directly to the stderr output and exit.
-error() {
- echo "$1" > /dev/stderr
- exit 0
-}
-
-# return is used within bash function in order to return the value.
-return() {
- echo "$1"
-}
-
-# set_default function gives the ability to move the setting of default
-# env variable from docker file to the script thereby giving the ability to the
-# user override it durin container start.
-set_default() {
- # docker initialized env variables with blank string and we can't just
- # use -z flag as usually.
- BLANK_STRING='""'
-
- VARIABLE="$1"
- DEFAULT="$2"
-
- if [[ -z "$VARIABLE" || "$VARIABLE" == "$BLANK_STRING" ]]; then
-
- if [ -z "$DEFAULT" ]; then
- error "You should specify default variable"
- else
- VARIABLE="$DEFAULT"
- fi
- fi
-
- return "$VARIABLE"
-}
-
-# Set default variables if needed.
-RPCUSER=$(set_default "$RPCUSER" "devuser")
-RPCPASS=$(set_default "$RPCPASS" "devpass")
-DEBUG=$(set_default "$DEBUG" "info")
-NETWORK=$(set_default "$NETWORK" "simnet")
-
-PARAMS=""
-if [ "$NETWORK" != "mainnet" ]; then
- PARAMS="--$NETWORK"
-fi
-
-PARAMS=$(echo $PARAMS \
- "--debuglevel=$DEBUG" \
- "--rpcuser=$RPCUSER" \
- "--rpcpass=$RPCPASS" \
- "--datadir=/data" \
- "--logdir=/data" \
- "--rpccert=/rpc/rpc.cert" \
- "--rpckey=/rpc/rpc.key" \
- "--rpclisten=0.0.0.0" \
- "--txindex"
-)
-
-# Set the mining flag only if address is non empty.
-if [[ -n "$MINING_ADDRESS" ]]; then
- PARAMS="$PARAMS --miningaddr=$MINING_ADDRESS"
-fi
-
-# Add user parameters to command.
-PARAMS="$PARAMS $@"
-
-# Print command and start bitcoin node.
-echo "Command: ltcd $PARAMS"
-exec ltcd $PARAMS
diff --git a/lnd/docs/DOCKER.md b/lnd/docs/DOCKER.md
deleted file mode 100644
index 700f5677..00000000
--- a/lnd/docs/DOCKER.md
+++ /dev/null
@@ -1,110 +0,0 @@
-# Docker Instructions
-
-There are two flavors of Dockerfiles available:
- - `Dockerfile`: Used for production builds. Checks out the source code from
- GitHub during build. The build argument `--build-arg checkout=v0.x.x-beta`
- can be used to specify what git tag or commit to check out before building.
- - `dev.Dockerfile` Used for development or testing builds. Uses the local code
- when building and allows local changes to be tested more easily.
-
-## Development/testing
-
-To build a standalone development image from the local source directory, use the
-following command:
-
-```
-$ docker build --tag=myrepository/lnd-dev -f dev.Dockerfile .
-```
-
-There is also a `docker-compose` setup available for development or testing that
-spins up a `btcd` backend alongside `lnd`. Check out the documentation at
-[docker/README.md](../docker/README.md) to learn more about how to use that
-setup to create a small local Lightning Network.
-
-## Production
-
-To use Docker in a production environment, you can run `lnd` by creating a
-Docker container, adding the appropriate command-line options as parameters.
-
-You first need to build the `lnd` docker image:
-
-```
-$ docker build --tag=myrepository/lnd --build-arg checkout=v0.11.1-beta .
-```
-
-It is recommended that you checkout the latest released tag.
-
-You can continue by creating and running the container:
-
-```
-$ docker run lnd [command-line options]
-```
-
-Note: there currently are no automated docker image builds available.
-
-## Volumes
-
-A Docker volume will be created with your `.lnd` directory automatically, and will
-persist through container restarts.
-
-You can also optionally manually specify a local folder to be used as a volume:
-
-```
-$ docker create --name=mylndcontainer -v /media/lnd-docker/:/root/.lnd myrepository/lnd [command-line options]
-```
-
-## Example
-
-Here is an example testnet `lnd` that uses Neutrino:
-
-```
-$ docker run --name lnd-testnet myrepository/lnd --bitcoin.active --bitcoin.testnet --bitcoin.node=neutrino --neutrino.connect=faucet.lightning.community
-```
-
-Create a wallet (and write down the seed):
-
-```
-$ docker exec -it lnd-testnet lncli create
-```
-
-Confirm `lnd` has begun to synchronize:
-
-```
-$ docker logs lnd-testnet
-[snipped]
-2018-05-01 02:28:01.201 [INF] RPCS: RPC server listening on 127.0.0.1:10009
-2018-05-01 02:28:01.201 [INF] LTND: Waiting for chain backend to finish sync, start_height=2546
-2018-05-01 02:28:01.201 [INF] RPCS: gRPC proxy started at 127.0.0.1:8080
-2018-05-01 02:28:08.999 [INF] LNWL: Caught up to height 10000
-2018-05-01 02:28:09.872 [INF] BTCN: Processed 10547 blocks in the last 10.23s (height 10547, 2012-05-28 05:02:32 +0000 UTC)
-```
-
-This is a simple example, it is possible to use any command-line options necessary
-to expose RPC ports, use `btcd` or `bitcoind`, or add additional chains.
-
-## LND Development and Testing
-
-To test the Docker production image locally, run the following from
-the project root:
-
-```
-$ docker build . -t myrepository/lnd:master
-```
-
-To choose a specific branch or tag instead, use the "checkout" build-arg. For example, to build the latest commits in master:
-
-```
-$ docker build . --build-arg checkout=v0.8.0-beta -t myrepository/lnd:v0.8.0-beta
-```
-
-To build the image using the most current tag:
-
-```
-$ docker build . --build-arg checkout=$(git describe --tags `git rev-list --tags --max-count=1`) -t myrepository/lnd:latest-tag
-```
-
-Once the image has been built and tagged locally, start the container:
-
-```
-docker run --name=lnd-testnet -it myrepository/lnd:latest-tag --bitcoin.active --bitcoin.testnet --bitcoin.node=neutrino --neutrino.connect=faucet.lightning.community
-```
diff --git a/lnd/docs/INSTALL.md b/lnd/docs/INSTALL.md
deleted file mode 100644
index 4ee1caa1..00000000
--- a/lnd/docs/INSTALL.md
+++ /dev/null
@@ -1,481 +0,0 @@
-# Table of Contents
-* [Installation](#installation)
- * [Preliminaries](#preliminaries)
- * [Installing lnd](#installing-lnd)
-* [Available Backend Operating Modes](#available-backend-operating-modes)
- * [btcd Options](#btcd-options)
- * [Neutrino Options](#neutrino-options)
- * [Bitcoind Options](#bitcoind-options)
- * [Using btcd](#using-btcd)
- * [Installing btcd](#installing-btcd)
- * [Starting btcd](#starting-btcd)
- * [Running lnd using the btcd backend](#running-lnd-using-the-btcd-backend)
- * [Using Neutrino](#using-neutrino)
- * [Using bitcoind or litecoind](#using-bitcoind-or-litecoind)
-* [Creating a Wallet](#creating-a-wallet)
-* [Macaroons](#macaroons)
-* [Network Reachability](#network-reachability)
-* [Simnet vs. Testnet Development](#simnet-vs-testnet-development)
-* [Creating an lnd.conf (Optional)](#creating-an-lndconf-optional)
-
-# Installation
-
-### Preliminaries
- In order to work with [`lnd`](https://github.com/lightningnetwork/lnd), the
- following build dependencies are required:
-
- * **Go:** `lnd` is written in Go. To install, run one of the following commands:
-
-
- **Note**: The minimum version of Go supported is Go 1.13. We recommend that
- users use the latest version of Go, which at the time of writing is
- [`1.15`](https://blog.golang.org/go1.15).
-
-
- On Linux:
-
- (x86-64)
- ```
- wget https://dl.google.com/go/go1.13.linux-amd64.tar.gz
- sha256sum go1.13.linux-amd64.tar.gz | awk -F " " '{ print $1 }'
- ```
-
- The final output of the command above should be
- `68a2297eb099d1a76097905a2ce334e3155004ec08cdea85f24527be3c48e856`. If it
- isn't, then the target REPO HAS BEEN MODIFIED, and you shouldn't install
- this version of Go. If it matches, then proceed to install Go:
- ```
- tar -C /usr/local -xzf go1.13.linux-amd64.tar.gz
- export PATH=$PATH:/usr/local/go/bin
- ```
-
- (ARMv6)
- ```
- wget https://dl.google.com/go/go1.13.linux-armv6l.tar.gz
- sha256sum go1.13.linux-armv6l.tar.gz | awk -F " " '{ print $1 }'
- ```
-
- The final output of the command above should be
- `931906d67cae1222f501e7be26e0ee73ba89420be0c4591925901cb9a4e156f0`. If it
- isn't, then the target REPO HAS BEEN MODIFIED, and you shouldn't install
- this version of Go. If it matches, then proceed to install Go:
- ```
- tar -C /usr/local -xzf go1.13.linux-armv6l.tar.gz
- export PATH=$PATH:/usr/local/go/bin
- ```
-
- On Mac OS X:
- ```
- brew install go@1.13
- ```
-
- On FreeBSD:
- ```
- pkg install go
- ```
-
- Alternatively, one can download the pre-compiled binaries hosted on the
- [Golang download page](https://golang.org/dl/). If one seeks to install
- from source, then more detailed installation instructions can be found
- [here](https://golang.org/doc/install).
-
- At this point, you should set your `$GOPATH` environment variable, which
- represents the path to your workspace. By default, `$GOPATH` is set to
- `~/go`. You will also need to add `$GOPATH/bin` to your `PATH`. This ensures
- that your shell will be able to detect the binaries you install.
-
- ```bash
- export GOPATH=~/gocode
- export PATH=$PATH:$GOPATH/bin
- ```
-
- We recommend placing the above in your .bashrc or in a setup script so that
- you can avoid typing this every time you open a new terminal window.
-
- * **Go modules:** This project uses [Go modules](https://github.com/golang/go/wiki/Modules)
- to manage dependencies as well as to provide *reproducible builds*.
-
- Usage of Go modules (with Go 1.13) means that you no longer need to clone
- `lnd` into your `$GOPATH` for development purposes. Instead, your `lnd`
- repo can now live anywhere!
-
-### Installing lnd
-
-With the preliminary steps completed, to install `lnd`, `lncli`, and all
-related dependencies run the following commands:
-```
-git clone https://github.com/lightningnetwork/lnd
-cd lnd
-make install
-```
-
-The command above will install the current _master_ branch of `lnd`. If you
-wish to install a tagged release of `lnd` (as the master branch can at times be
-unstable), then [visit then release page to locate the latest
-release](https://github.com/lightningnetwork/lnd/releases). Assuming the name
-of the release is `v0.x.x`, then you can compile this release from source with
-a small modification to the above command:
-```
-git clone https://github.com/lightningnetwork/lnd
-cd lnd
-git checkout v0.x.x
-make install
-```
-
-
-**NOTE**: Our instructions still use the `$GOPATH` directory from prior
-versions of Go, but with Go 1.13, it's now possible for `lnd` to live
-_anywhere_ on your file system.
-
-For Windows WSL users, make will need to be referenced directly via
-/usr/bin/make/, or alternatively by wrapping quotation marks around make,
-like so:
-
-```
-/usr/bin/make && /usr/bin/make install
-
-"make" && "make" install
-```
-
-On FreeBSD, use gmake instead of make.
-
-Alternatively, if one doesn't wish to use `make`, then the `go` commands can be
-used directly:
-```
-GO111MODULE=on go install -v ./...
-```
-
-**Updating**
-
-To update your version of `lnd` to the latest version run the following
-commands:
-```
-cd $GOPATH/src/github.com/lightningnetwork/lnd
-git pull
-make clean && make && make install
-```
-
-On FreeBSD, use gmake instead of make.
-
-Alternatively, if one doesn't wish to use `make`, then the `go` commands can be
-used directly:
-```
-cd $GOPATH/src/github.com/lightningnetwork/lnd
-git pull
-GO111MODULE=on go install -v ./...
-```
-
-**Tests**
-
-To check that `lnd` was installed properly run the following command:
-```
-make check
-```
-
-This command requires `bitcoind` (almost any version should do) to be available
-in the system's `$PATH` variable. Otherwise some of the tests will fail.
-
-# Available Backend Operating Modes
-
-In order to run, `lnd` requires, that the user specify a chain backend. At the
-time of writing of this document, there are three available chain backends:
-`btcd`, `neutrino`, `bitcoind`. All including neutrino can run on mainnet with
-an out of the box `lnd` instance. We don't require `--txindex` when running
-with `bitcoind` or `btcd` but activating the `txindex` will generally make
-`lnd` run faster.
-
-**NOTE: WE DO NOT FULLY SUPPORT PRUNED OPERATING MODES FOR FULL NODES.** It's
-possible to run a node in a pruned mode and have it serve lnd, however one must
-take care to ensure that `lnd` has all blocks on disk since the birth of the
-wallet, and the age of the earliest channels (which were created around March
-2018).
-
-The set of arguments for each of the backend modes is as follows:
-
-## btcd Options
-```
-btcd:
- --btcd.dir= The base directory that contains the node's data, logs, configuration file, etc. (default: /Users/roasbeef/Library/Application Support/Btcd)
- --btcd.rpchost= The daemon's rpc listening address. If a port is omitted, then the default port for the selected chain parameters will be used. (default: localhost)
- --btcd.rpcuser= Username for RPC connections
- --btcd.rpcpass= Password for RPC connections
- --btcd.rpccert= File containing the daemon's certificate file (default: /Users/roasbeef/Library/Application Support/Btcd/rpc.cert)
- --btcd.rawrpccert= The raw bytes of the daemon's PEM-encoded certificate chain which will be used to authenticate the RPC connection.
-```
-
-## Neutrino Options
-```
-neutrino:
- -a, --neutrino.addpeer= Add a peer to connect with at startup
- --neutrino.connect= Connect only to the specified peers at startup
- --neutrino.maxpeers= Max number of inbound and outbound peers
- --neutrino.banduration= How long to ban misbehaving peers. Valid time units are {s, m, h}. Minimum 1 second
- --neutrino.banthreshold= Maximum allowed ban score before disconnecting and banning misbehaving peers.
- --neutrino.useragentname= Used to help identify ourselves to other bitcoin peers.
- --neutrino.useragentversion= Used to help identify ourselves to other bitcoin peers.
-```
-
-## Bitcoind Options
-```
-bitcoind:
- --bitcoind.dir= The base directory that contains the node's data, logs, configuration file, etc. (default: /Users/roasbeef/Library/Application Support/Bitcoin)
- --bitcoind.rpchost= The daemon's rpc listening address. If a port is omitted, then the default port for the selected chain parameters will be used. (default: localhost)
- --bitcoind.rpcuser= Username for RPC connections
- --bitcoind.rpcpass= Password for RPC connections
- --bitcoind.zmqpubrawblock= The address listening for ZMQ connections to deliver raw block notifications
- --bitcoind.zmqpubrawtx= The address listening for ZMQ connections to deliver raw transaction notifications
- --bitcoind.estimatemode= The fee estimate mode. Must be either "ECONOMICAL" or "CONSERVATIVE". (default: CONSERVATIVE)
-```
-
-## Using btcd
-
-### Installing btcd
-
-On FreeBSD, use gmake instead of make.
-
-To install btcd, run the following commands:
-
-Install **btcd**:
-```
-make btcd
-```
-
-Alternatively, you can install [`btcd` directly from its
-repo](https://github.com/btcsuite/btcd).
-
-### Starting btcd
-
-Running the following command will create `rpc.cert` and default `btcd.conf`.
-
-```
-btcd --testnet --rpcuser=REPLACEME --rpcpass=REPLACEME
-```
-If you want to use `lnd` on testnet, `btcd` needs to first fully sync the
-testnet blockchain. Depending on your hardware, this may take up to a few
-hours. Note that adding `--txindex` is optional, as it will take longer to sync
-the node, but then `lnd` will generally operate faster as it can hit the index
-directly, rather than scanning blocks or BIP 158 filters for relevant items.
-
-(NOTE: It may take several minutes to find segwit-enabled peers.)
-
-While `btcd` is syncing you can check on its progress using btcd's `getinfo`
-RPC command:
-```
-btcctl --testnet --rpcuser=REPLACEME --rpcpass=REPLACEME getinfo
-{
- "version": 120000,
- "protocolversion": 70002,
- "blocks": 1114996,
- "timeoffset": 0,
- "connections": 7,
- "proxy": "",
- "difficulty": 422570.58270815,
- "testnet": true,
- "relayfee": 0.00001,
- "errors": ""
-}
-```
-
-Additionally, you can monitor btcd's logs to track its syncing progress in real
-time.
-
-You can test your `btcd` node's connectivity using the `getpeerinfo` command:
-```
-btcctl --testnet --rpcuser=REPLACEME --rpcpass=REPLACEME getpeerinfo | more
-```
-
-### Running lnd using the btcd backend
-
-If you are on testnet, run this command after `btcd` has finished syncing.
-Otherwise, replace `--bitcoin.testnet` with `--bitcoin.simnet`. If you are
-installing `lnd` in preparation for the
-[tutorial](https://dev.lightning.community/tutorial), you may skip this step.
-```
-lnd --bitcoin.active --bitcoin.testnet --debuglevel=debug --btcd.rpcuser=kek --btcd.rpcpass=kek --externalip=X.X.X.X
-```
-
-## Using Neutrino
-
-In order to run `lnd` in its light client mode, you'll need to locate a
-full-node which is capable of serving this new light client mode. `lnd` uses
-[BIP 157](https://github.com/bitcoin/bips/blob/master/bip-0157.mediawiki) and [BIP
-158](https://github.com/bitcoin/bips/blob/master/bip-0158.mediawiki) for its light client
-mode. A public instance of such a node can be found at
-`faucet.lightning.community`.
-
-To run lnd in neutrino mode, run `lnd` with the following arguments, (swapping
-in `--bitcoin.simnet` if needed), and also your own `btcd` node if available:
-```
-lnd --bitcoin.active --bitcoin.testnet --debuglevel=debug --bitcoin.node=neutrino --neutrino.connect=faucet.lightning.community
-```
-
-
-## Using bitcoind or litecoind
-
-The configuration for bitcoind and litecoind are nearly identical, the
-following steps can be mirrored with loss of generality to enable a litecoind
-backend. Setup will be described in regards to `bitcoind`, but note that `lnd`
-uses a distinct `litecoin.node=litecoind` argument and analogous
-subconfigurations prefixed by `litecoind`. Note that adding `--txindex` is
-optional, as it will take longer to sync the node, but then `lnd` will
-generally operate faster as it can hit the index directly, rather than scanning
-blocks or BIP 158 filters for relevant items.
-
-To configure your bitcoind backend for use with lnd, first complete and verify
-the following:
-
-- Since `lnd` uses
- [ZeroMQ](https://github.com/bitcoin/bitcoin/blob/master/doc/zmq.md) to
- interface with `bitcoind`, *your `bitcoind` installation must be compiled with
- ZMQ*. Note that if you installed `bitcoind` from source and ZMQ was not present,
- then ZMQ support will be disabled, and `lnd` will quit on a `connection refused` error.
- If you installed `bitcoind` via Homebrew in the past ZMQ may not be included
- ([this has now been fixed](https://github.com/Homebrew/homebrew-core/pull/23088)
- in the latest Homebrew recipe for bitcoin)
-- Configure the `bitcoind` instance for ZMQ with `--zmqpubrawblock` and
- `--zmqpubrawtx`. These options must each use their own unique address in order
- to provide a reliable delivery of notifications (e.g.
- `--zmqpubrawblock=tcp://127.0.0.1:28332` and
- `--zmqpubrawtx=tcp://127.0.0.1:28333`).
-- Start `bitcoind` running against testnet, and let it complete a full sync with
- the testnet chain (alternatively, use `--bitcoind.regtest` instead).
-
-Here's a sample `bitcoin.conf` for use with lnd:
-```
-testnet=1
-server=1
-daemon=1
-zmqpubrawblock=tcp://127.0.0.1:28332
-zmqpubrawtx=tcp://127.0.0.1:28333
-```
-
-Once all of the above is complete, and you've confirmed `bitcoind` is fully
-updated with the latest blocks on testnet, run the command below to launch
-`lnd` with `bitcoind` as your backend (as with `bitcoind`, you can create an
-`lnd.conf` to save these options, more info on that is described further
-below):
-
-```
-lnd --bitcoin.active --bitcoin.testnet --debuglevel=debug --bitcoin.node=bitcoind --bitcoind.rpcuser=REPLACEME --bitcoind.rpcpass=REPLACEME --bitcoind.zmqpubrawblock=tcp://127.0.0.1:28332 --bitcoind.zmqpubrawtx=tcp://127.0.0.1:28333 --externalip=X.X.X.X
-```
-
-*NOTE:*
-- The auth parameters `rpcuser` and `rpcpass` parameters can typically be
- determined by `lnd` for a `bitcoind` instance running under the same user,
- including when using cookie auth. In this case, you can exclude them from the
- `lnd` options entirely.
-- If you DO choose to explicitly pass the auth parameters in your `lnd.conf` or
- command line options for `lnd` (`bitcoind.rpcuser` and `bitcoind.rpcpass` as
- shown in example command above), you must also specify the
- `bitcoind.zmqpubrawblock` and `bitcoind.zmqpubrawtx` options. Otherwise, `lnd`
- will attempt to get the configuration from your `bitcoin.conf`.
-- You must ensure the same addresses are used for the `bitcoind.zmqpubrawblock`
- and `bitcoind.zmqpubrawtx` options passed to `lnd` as for the `zmqpubrawblock`
- and `zmqpubrawtx` passed in the `bitcoind` options respectively.
-- When running lnd and bitcoind on the same Windows machine, ensure you use
- 127.0.0.1, not localhost, for all configuration options that require a TCP/IP
- host address. If you use "localhost" as the host name, you may see extremely
- slow inter-process-communication between lnd and the bitcoind backend. If lnd
- is experiencing this issue, you'll see "Waiting for chain backend to finish
- sync, start_height=XXXXXX" as the last entry in the console or log output, and
- lnd will appear to hang. Normal lnd output will quickly show multiple
- messages like this as lnd consumes blocks from bitcoind.
-- Don't connect more than two or three instances of `lnd` to `bitcoind`. With
- the default `bitcoind` settings, having more than one instance of `lnd`, or
- `lnd` plus any application that consumes the RPC could cause `lnd` to miss
- crucial updates from the backend.
-- The default fee estimate mode in `bitcoind` is CONSERVATIVE. You can set
- `bitcoind.estimatemode=ECONOMICAL` to change it into ECONOMICAL. Futhermore,
- if you start `bitcoind` in `regtest`, this configuration won't take any effect.
-
-
-# Creating a wallet
-If `lnd` is being run for the first time, create a new wallet with:
-```
-lncli create
-```
-This will prompt for a wallet password, and optionally a cipher seed
-passphrase.
-
-`lnd` will then print a 24 word cipher seed mnemonic, which can be used to
-recover the wallet in case of data loss. The user should write this down and
-keep in a safe place.
-
-
-# Macaroons
-
-`lnd`'s authentication system is called **macaroons**, which are decentralized
-bearer credentials allowing for delegation, attenuation, and other cool
-features. You can learn more about them in Alex Akselrod's [writeup on
-Github](https://github.com/lightningnetwork/lnd/issues/20).
-
-Running `lnd` for the first time will by default generate the `admin.macaroon`,
-`read_only.macaroon`, and `macaroons.db` files that are used to authenticate
-into `lnd`. They will be stored in the network directory (default:
-`lnddir/data/chain/bitcoin/mainnet`) so that it's possible to use a distinct
-password for mainnet, testnet, simnet, etc. Note that if you specified an
-alternative data directory (via the `--datadir` argument), you will have to
-additionally pass the updated location of the `admin.macaroon` file into `lncli`
-using the `--macaroonpath` argument.
-
-To disable macaroons for testing, pass the `--no-macaroons` flag into *both*
-`lnd` and `lncli`.
-
-# Network Reachability
-
-If you'd like to signal to other nodes on the network that you'll accept
-incoming channels (as peers need to connect inbound to initiate a channel
-funding workflow), then the `--externalip` flag should be set to your publicly
-reachable IP address.
-
-# Simnet vs. Testnet Development
-
-If you are doing local development, such as for the tutorial, you'll want to
-start both `btcd` and `lnd` in the `simnet` mode. Simnet is similar to regtest
-in that you'll be able to instantly mine blocks as needed to test `lnd`
-locally. In order to start either daemon in the `simnet` mode use `simnet`
-instead of `testnet`, adding the `--bitcoin.simnet` flag instead of the
-`--bitcoin.testnet` flag.
-
-Another relevant command line flag for local testing of new `lnd` developments
-is the `--debughtlc` flag. When starting `lnd` with this flag, it'll be able to
-automatically settle a special type of HTLC sent to it. This means that you
-won't need to manually insert invoices in order to test payment connectivity.
-To send this "special" HTLC type, include the `--debugsend` command at the end
-of your `sendpayment` commands.
-
-
-There are currently two primary ways to run `lnd`: one requires a local `btcd`
-instance with the RPC service exposed, and the other uses a fully integrated
-light client powered by [neutrino](https://github.com/pkt-cash/pktd/neutrino).
-
-# Creating an lnd.conf (Optional)
-
-Optionally, if you'd like to have a persistent configuration between `lnd`
-launches, allowing you to simply type `lnd --bitcoin.testnet --bitcoin.active`
-at the command line, you can create an `lnd.conf`.
-
-**On MacOS, located at:**
-`/Users/[username]/Library/Application Support/Lnd/lnd.conf`
-
-**On Linux, located at:**
-`~/.lnd/lnd.conf`
-
-Here's a sample `lnd.conf` for `btcd` to get you started:
-```
-[Application Options]
-debuglevel=trace
-maxpendingchannels=10
-
-[Bitcoin]
-bitcoin.active=1
-```
-
-Notice the `[Bitcoin]` section. This section houses the parameters for the
-Bitcoin chain. `lnd` also supports Litecoin testnet4 (but not both BTC and LTC
-at the same time), so when working with Litecoin be sure to set to parameters
-for Litecoin accordingly. See a more detailed sample config file available
-[here](https://github.com/lightningnetwork/lnd/blob/master/sample-lnd.conf)
-and explore the other sections for node configuration, including `[Btcd]`,
-`[Bitcoind]`, `[Neutrino]`, `[Ltcd]`, and `[Litecoind]` depending on which
-chain and node type you're using.
diff --git a/lnd/docs/MAKEFILE.md b/lnd/docs/MAKEFILE.md
deleted file mode 100644
index 98532882..00000000
--- a/lnd/docs/MAKEFILE.md
+++ /dev/null
@@ -1,209 +0,0 @@
-Makefile
-========
-
-To build, verify, and install `lnd` from source, use the following
-commands:
-```
-make
-make check
-make install
-```
-
-The command `make check` requires `bitcoind` (almost any version should do) to
-be available in the system's `$PATH` variable. Otherwise some of the tests will
-fail.
-
-Developers
-==========
-
-This document specifies all commands available from `lnd`'s `Makefile`.
-The commands included handle:
-- Installation of all go-related dependencies.
-- Compilation and installation of `lnd` and `lncli`.
-- Compilation and installation of `btcd` and `btcctl`.
-- Running unit and integration suites.
-- Testing, debugging, and flake hunting.
-- Formatting and linting.
-
-Commands
-========
-
-- [`all`](#scratch)
-- [`btcd`](#btcd)
-- [`build`](#build)
-- [`check`](#check)
-- [`clean`](#clean)
-- [`default`](#default)
-- [`dep`](#dep)
-- [`flake-unit`](#flake-unit)
-- [`flakehunter`](#flakehunter)
-- [`fmt`](#fmt)
-- [`install`](#install)
-- [`itest`](#itest)
-- [`lint`](#lint)
-- [`list`](#list)
-- [`rpc`](#rpc)
-- [`scratch`](#scratch)
-- [`travis`](#travis)
-- [`unit`](#unit)
-- [`unit-cover`](#unit-cover)
-- [`unit-race`](#unit-race)
-
-`all`
------
-Compiles, tests, and installs `lnd` and `lncli`. Equivalent to
-[`scratch`](#scratch) [`check`](#check) [`install`](#install).
-
-`btcd`
-------
-Ensures that the [`github.com/btcsuite/btcd`][btcd] repository is checked out
-locally. Lastly, installs the version of
-[`github.com/btcsuite/btcd`][btcd] specified in `Gopkg.toml`
-
-`build`
--------
-Compiles the current source and vendor trees, creating `./lnd` and
-`./lncli`.
-
-`check`
--------
-Installs the version of [`github.com/btcsuite/btcd`][btcd] specified
-in `Gopkg.toml`, then runs the unit tests followed by the integration
-tests.
-
-Related: [`unit`](#unit) [`itest`](#itest)
-
-`clean`
--------
-Removes compiled versions of both `./lnd` and `./lncli`, and removes the
-`vendor` tree.
-
-`default`
----------
-Alias for [`scratch`](#scratch).
-
-`flake-unit`
-------------
-Runs the unit test endlessly until a failure is detected.
-
-Arguments:
-- `pkg=`
-- `case=`
-- `timeout=`
-
-Related: [`unit`](#unit)
-
-`flakehunter`
--------------
-Runs the itegration test suite endlessly until a failure is detected.
-
-Arguments:
-- `icase=`
-- `timeout=`
-
-Related: [`itest`](#itest)
-
-`fmt`
------
-Runs `go fmt` on the entire project.
-
-`install`
----------
-Copies the compiled `lnd` and `lncli` binaries into `$GOPATH/bin`.
-
-`itest`
--------
-Installs the version of [`github.com/btcsuite/btcd`][btcd] specified in
-`Gopkg.toml`, builds the `./lnd` and `./lncli` binaries, then runs the
-integration test suite.
-
-Arguments:
-- `icase=` (the snake_case version of the testcase name field in the testCases slice (i.e. sweep_coins), not the test func name)
-- `timeout=`
-
-`itest-parallel`
-------
-Does the same as `itest` but splits the total set of tests into
-`NUM_ITEST_TRANCHES` tranches (currently set to 6 by default, can be overwritten
-by setting `tranches=Y`) and runs them in parallel.
-
-Arguments:
-- `icase=`: The snake_case version of the testcase name field in the
- testCases slice (i.e. `sweep_coins`, not the test func name) or any regular
- expression describing a set of tests.
-- `timeout=`
-- `tranches=`: The number of parts/tranches to split the
- total set of tests into.
-- `parallel=`: The number of threads to run in parallel. Must
- be greater or equal to `tranches`, otherwise undefined behavior is expected.
-
-`flakehunter-parallel`
-------
-Runs the test specified by `icase` simultaneously `parallel` (default=6) times
-until an error occurs. Useful for hunting flakes.
-
-Example:
-```shell
-$ make flakehunter-parallel icase='(data_loss_protection|channel_backup)' backend=neutrino
-```
-
-`lint`
-------
-Ensures that [`gopkg.in/alecthomas/gometalinter.v1`][gometalinter] is
-installed, then lints the project.
-
-`list`
-------
-Lists all known make targets.
-
-`rpc`
------
-Compiles the `lnrpc` proto files.
-
-`scratch`
----------
-Compiles all dependencies and builds the `./lnd` and `./lncli` binaries.
-Equivalent to [`lint`](#lint) [`btcd`](#btcd)
-[`unit-race`](#unit-race).
-
-`unit`
-------
-Runs the unit test suite. By default, this will run all known unit tests.
-
-Arguments:
-- `pkg=`
-- `case=`
-- `timeout=`
-- `log="stdlog[ ]"` prints logs to stdout
- - `` can be `info` (default), `debug`, `trace`, `warn`, `error`, `critical`, or `off`
-
-`unit-cover`
-------------
-Runs the unit test suite with test coverage, compiling the statisitics in
-`profile.cov`.
-
-Arguments:
-- `pkg=`
-- `case=`
-- `timeout=`
-- `log="stdlog[ ]"` prints logs to stdout
- - `` can be `info` (default), `debug`, `trace`, `warn`, `error`, `critical`, or `off`
-
-Related: [`unit`](#unit)
-
-`unit-race`
------------
-Runs the unit test suite with go's race detector.
-
-Arguments:
-- `pkg=`
-- `case=`
-- `timeout=`
-- `log="stdlog[ ]"` prints logs to stdout
- - `` can be `info` (default), `debug`, `trace`, `warn`, `error`, `critical`, or `off`
-
-Related: [`unit`](#unit)
-
-[btcd]: https://github.com/btcsuite/btcd (github.com/btcsuite/btcd")
-[gometalinter]: https://gopkg.in/alecthomas/gometalinter.v1 (gopkg.in/alecthomas/gometalinter.v1)
-[goveralls]: https://github.com/mattn/goveralls (github.com/mattn/goveralls)
diff --git a/lnd/docs/code_contribution_guidelines.md b/lnd/docs/code_contribution_guidelines.md
deleted file mode 100644
index 3e069d15..00000000
--- a/lnd/docs/code_contribution_guidelines.md
+++ /dev/null
@@ -1,647 +0,0 @@
-### Table of Contents
-1. [Overview](#Overview)
-2. [Minimum Recommended Skillset](#MinSkillset)
-3. [Required Reading](#ReqReading)
-4. [Development Practices](#DevelopmentPractices)
-4.1. [Share Early, Share Often](#ShareEarly)
-4.2. [Testing](#Testing)
-4.3. [Code Documentation and Commenting](#CodeDocumentation)
-4.4. [Model Git Commit Messages](#ModelGitCommitMessages)
-4.5. [Ideal Git Commit Structure](#IdealGitCommitStructure)
-4.6. [Code Spacing](#CodeSpacing)
-4.7. [Protobuf Compilation](#Protobuf)
-4.8. [Additional Style Constraints On Top of gofmt](ExtraGoFmtStyle)
-4.9. [Pointing to Remote Dependant Branches in Go Modules](ModulesReplace)
-4.10. [Use of Log Levels](#LogLevels)
-5. [Code Approval Process](#CodeApproval)
-5.1. [Code Review](#CodeReview)
-5.2. [Rework Code (if needed)](#CodeRework)
-5.3. [Acceptance](#CodeAcceptance)
-6. [Contribution Standards](#Standards)
-6.1. [Contribution Checklist](#Checklist)
-6.2. [Licensing of Contributions](#Licensing)
-
-
-
-### 1. Overview
-
-Developing cryptocurrencies is an exciting endeavor that touches a wide variety
-of areas such as wire protocols, peer-to-peer networking, databases,
-cryptography, language interpretation (transaction scripts), adversarial
-threat-modeling, and RPC systems. They also represent a radical shift to the
-current fiscal system and as a result provide an opportunity to help reshape
-the entire financial system. With the advent of the [Lightning Network
-(LN)](https://lightning.network/), new layers are being constructed upon the
-base blockchain layer which have the potential to alleviate many of the
-limitations and constraints inherent in the design of blockchains. There are
-few projects that offer this level of diversity and impact all in one code
-base.
-
-However, as exciting as it is, one must keep in mind that cryptocurrencies
-represent real money and introducing bugs and security vulnerabilities can have
-far more dire consequences than in typical projects where having a small bug is
-minimal by comparison. In the world of cryptocurrencies, even the smallest bug
-in the wrong area can cost people a significant amount of money. For this
-reason, the Lightning Network Daemon (`lnd`) has a formalized and rigorous
-development process (heavily inspired by
-[btcsuite](https://github.com/btcsuite)) which is outlined on this page.
-
-We highly encourage code contributions, however it is imperative that you adhere
-to the guidelines established on this page.
-
-
-
-### 2. Minimum Recommended Skillset
-
-The following list is a set of core competencies that we recommend you possess
-before you really start attempting to contribute code to the project. These are
-not hard requirements as we will gladly accept code contributions as long as
-they follow the guidelines set forth on this page. That said, if you don't have
-the following basic qualifications you will likely find it quite difficult to
-contribute to the core layers of Lightning. However, there are still a number
-of low hanging fruit which can be tackled without having full competency in the
-areas mentioned below.
-
-- A reasonable understanding of bitcoin at a high level (see the
- [Required Reading](#ReqReading) section for the original white paper)
-- A reasonable understanding of the Lightning Network at a high level
-- Experience in some type of C-like language
-- An understanding of data structures and their performance implications
-- Familiarity with unit testing
-- Debugging experience
-- Ability to understand not only the area you are making a change in, but also
- the code your change relies on, and the code which relies on your changed code
-
-Building on top of those core competencies, the recommended skill set largely
-depends on the specific areas you are looking to contribute to. For example,
-if you wish to contribute to the cryptography code, you should have a good
-understanding of the various aspects involved with cryptography such as the
-security and performance implications.
-
-
-
-### 3. Required Reading
-
-- [Effective Go](http://golang.org/doc/effective_go.html) - The entire `lnd`
- project follows the guidelines in this document. For your code to be accepted,
- it must follow the guidelines therein.
-- [Original Satoshi Whitepaper](https://bitcoin.org/bitcoin.pdf) - This is the white paper that started it all. Having a solid
- foundation to build on will make the code much more comprehensible.
-- [Lightning Network Whitepaper](https://lightning.network/lightning-network-paper.pdf) - This is the white paper that kicked off the Layer 2 revolution. Having a good grasp of the concepts of Lightning will make the core logic within the daemon much more comprehensible: Bitcoin Script, off-chain blockchain protocols, payment channels, bi-directional payment channels, relative and absolute time-locks, commitment state revocations, and Segregated Witness.
- - The original LN was written for a rather narrow audience, the paper may be a bit unapproachable to many. Thanks to the Bitcoin community, there exist many easily accessible supplemental resources which can help one see how all the pieces fit together from double-spend protection all the way up to commitment state transitions and Hash Time Locked Contracts (HTLCs):
- - [Lightning Network Summary](https://lightning.network/lightning-network-summary.pdf)
- - [Understanding the Lightning Network 3-Part series](https://bitcoinmagazine.com/articles/understanding-the-lightning-network-part-building-a-bidirectional-payment-channel-1464710791)
- - [Deployable Lightning](https://github.com/ElementsProject/lightning/blob/master/doc/deployable-lightning.pdf)
-
-
-Note that the core design of the Lightning Network has shifted over time as
-concrete implementation and design has expanded our knowledge beyond the
-original white paper. Therefore, specific information outlined in the resources
-above may be a bit out of date. Many implementers are currently working on an
-initial [Lightning Network Specifications](https://github.com/lightningnetwork/lightning-rfc).
-Once the specification is finalized, it will be the most up-to-date
-comprehensive document explaining the Lightning Network. As a result, it will
-be recommended for newcomers to read first in order to get up to speed.
-
-
-
-### 4. Development Practices
-
-Developers are expected to work in their own trees and submit pull requests when
-they feel their feature or bug fix is ready for integration into the master
-branch.
-
-
-
-#### 4.1. Share Early, Share Often
-
-We firmly believe in the share early, share often approach. The basic premise
-of the approach is to announce your plans **before** you start work, and once
-you have started working, craft your changes into a stream of small and easily
-reviewable commits.
-
-This approach has several benefits:
-
-- Announcing your plans to work on a feature **before** you begin work avoids
- duplicate work
-- It permits discussions which can help you achieve your goals in a way that is
- consistent with the existing architecture
-- It minimizes the chances of you spending time and energy on a change that
- might not fit with the consensus of the community or existing architecture and
- potentially be rejected as a result
-- The quicker your changes are merged to master, the less time you will need to
- spend rebasing and otherwise trying to keep up with the main code base
-
-
-
-#### 4.2. Testing
-
-One of the major design goals of all of `lnd`'s packages and the daemon itself is
-to aim for a high degree of test coverage. This is financial software so bugs
-and regressions in the core logic can cost people real money. For this reason
-every effort must be taken to ensure the code is as accurate and bug-free as
-possible. Thorough testing is a good way to help achieve that goal.
-
-Unless a new feature you submit is completely trivial, it will probably be
-rejected unless it is also accompanied by adequate test coverage for both
-positive and negative conditions. That is to say, the tests must ensure your
-code works correctly when it is fed correct data as well as incorrect data
-(error paths).
-
-
-Go provides an excellent test framework that makes writing test code and
-checking coverage statistics straightforward. For more information about the
-test coverage tools, see the [golang cover blog post](http://blog.golang.org/cover).
-
-A quick summary of test practices follows:
-- All new code should be accompanied by tests that ensure the code behaves
- correctly when given expected values, and, perhaps even more importantly, that
- it handles errors gracefully
-- When you fix a bug, it should be accompanied by tests which exercise the bug
- to both prove it has been resolved and to prevent future regressions
-- Changes to publicly exported packages such as
- [brontide](https://github.com/lightningnetwork/lnd/tree/master/brontide) should
- be accompanied by unit tests exercising the new or changed behavior.
-- Changes to behavior within the daemon's interaction with the P2P protocol,
- or RPC's will need to be accompanied by integration tests which use the
- [`networkHarness`framework](https://github.com/lightningnetwork/lnd/blob/master/lntest/harness.go)
- contained within `lnd`. For example integration tests, see
- [`lnd_test.go`](https://github.com/lightningnetwork/lnd/blob/master/lnd_test.go#L181).
-- The itest log files are automatically scanned for `[ERR]` lines. There
- shouldn't be any of those in the logs, see [Use of Log Levels](#LogLevels).
-
-Throughout the process of contributing to `lnd`, you'll likely also be
-extensively using the commands within our `Makefile`. As a result, we recommend
-[perusing the make file documentation](https://github.com/lightningnetwork/lnd/blob/master/docs/MAKEFILE.md).
-
-
-
-#### 4.3. Code Documentation and Commenting
-
-- At a minimum every function must be commented with its intended purpose and
- any assumptions that it makes
- - Function comments must always begin with the name of the function per
- [Effective Go](http://golang.org/doc/effective_go.html)
- - Function comments should be complete sentences since they allow a wide
- variety of automated presentations such as [godoc.org](https://godoc.org)
- - The general rule of thumb is to look at it as if you were completely
- unfamiliar with the code and ask yourself, would this give me enough
- information to understand what this function does and how I'd probably want
- to use it?
-- Exported functions should also include detailed information the caller of the
- function will likely need to know and/or understand:
-
-**WRONG**
-```go
-// generates a revocation key
-func DeriveRevocationPubkey(commitPubKey *btcec.PublicKey,
- revokePreimage []byte) *btcec.PublicKey {
-```
-**RIGHT**
-```go
-// DeriveRevocationPubkey derives the revocation public key given the
-// counterparty's commitment key, and revocation preimage derived via a
-// pseudo-random-function. In the event that we (for some reason) broadcast a
-// revoked commitment transaction, then if the other party knows the revocation
-// preimage, then they'll be able to derive the corresponding private key to
-// this private key by exploiting the homomorphism in the elliptic curve group:
-// * https://en.wikipedia.org/wiki/Group_homomorphism#Homomorphisms_of_abelian_groups
-//
-// The derivation is performed as follows:
-//
-// revokeKey := commitKey + revokePoint
-// := G*k + G*h
-// := G * (k+h)
-//
-// Therefore, once we divulge the revocation preimage, the remote peer is able to
-// compute the proper private key for the revokeKey by computing:
-// revokePriv := commitPriv + revokePreimge mod N
-//
-// Where N is the order of the sub-group.
-func DeriveRevocationPubkey(commitPubKey *btcec.PublicKey,
- revokePreimage []byte) *btcec.PublicKey {
-```
-- Comments in the body of the code are highly encouraged, but they should
- explain the intention of the code as opposed to just calling out the
- obvious
-
-**WRONG**
-```Go
-// return err if amt is less than 546
-if amt < 546 {
- return err
-}
-```
-**RIGHT**
-```go
-// Treat transactions with amounts less than the amount which is considered dust
-// as non-standard.
-if amt < 546 {
- return err
-}
-```
-**NOTE:** The above should really use a constant as opposed to a magic number,
-but it was left as a magic number to show how much of a difference a good
-comment can make.
-
-
-
-#### 4.4. Model Git Commit Messages
-
-This project prefers to keep a clean commit history with well-formed commit
-messages. This section illustrates a model commit message and provides a bit
-of background for it. This content was originally created by Tim Pope and made
-available on his website, however that website is no longer active, so it is
-being provided here.
-
-Here’s a model Git commit message:
-
-```
-Short (50 chars or less) summary of changes
-
-More detailed explanatory text, if necessary. Wrap it to about 72
-characters or so. In some contexts, the first line is treated as the
-subject of an email and the rest of the text as the body. The blank
-line separating the summary from the body is critical (unless you omit
-the body entirely); tools like rebase can get confused if you run the
-two together.
-
-Write your commit message in the present tense: "Fix bug" and not "Fixed
-bug." This convention matches up with commit messages generated by
-commands like git merge and git revert.
-
-Further paragraphs come after blank lines.
-
-- Bullet points are okay, too
-- Typically a hyphen or asterisk is used for the bullet, preceded by a
- single space, with blank lines in between, but conventions vary here
-- Use a hanging indent
-```
-
-Here are some of the reasons why wrapping your commit messages to 72 columns is
-a good thing.
-
-- git log doesn't do any special wrapping of the commit messages. With
- the default pager of less -S, this means your paragraphs flow far off the edge
- of the screen, making them difficult to read. On an 80 column terminal, if we
- subtract 4 columns for the indent on the left and 4 more for symmetry on the
- right, we’re left with 72 columns.
-- git format-patch --stdout converts a series of commits to a series of emails,
- using the messages for the message body. Good email netiquette dictates we
- wrap our plain text emails such that there’s room for a few levels of nested
- reply indicators without overflow in an 80 column terminal.
-
-In addition to the Git commit message structure adhered to within the daemon
-all short-[commit messages are to be prefixed according to the convention
-outlined in the Go project](https://golang.org/doc/contribute.html#change). All
-commits should begin with the subsystem or package primarily affected by the
-change. In the case of a widespread change, the packages are to be delimited by
-either a '+' or a ','. This prefix seems minor but can be extremely helpful in
-determining the scope of a commit at a glance, or when bug hunting to find a
-commit which introduced a bug or regression.
-
-
-
-#### 4.5. Ideal Git Commit Structure
-
-Within the project we prefer small, contained commits for a pull request over a
-single giant commit that touches several files/packages. Ideal commits build on
-their own, in order to facilitate easy usage of tools like `git bisect` to `git
-cherry-pick`. It's preferred that commits contain an isolated change in a
-single package. In this case, the commit header message should begin with the
-prefix of the modified package. For example, if a commit was made to modify the
-`lnwallet` package, it should start with `lnwallet: `.
-
-In the case of changes that only build in tandem with changes made in other
-packages, it is permitted for a single commit to be made which contains several
-prefixes such as: `lnwallet+htlcswitch`. This prefix structure along with the
-requirement for atomic contained commits (when possible) make things like
-scanning the set of commits and debugging easier. In the case of changes that
-touch several packages, and can only compile with the change across several
-packages, a `multi: ` prefix should be used.
-
-Examples of common patterns w.r.t commit structures within the project:
-
- * It is common that during the work on a PR, existing bugs are found and
- fixed. If they can be fixed in isolation, they should have their own
- commit.
- * File restructuring like moving a function to another file or changing order
- of functions: with a separate commit because it is much easier to review
- the real changes that go on top of the restructuring.
- * Preparatory refactorings that are functionally equivalent: own commit.
- * Project or package wide file renamings should be in their own commit.
- * Ideally if a new package/struct/sub-system is added in a PR, there should
- be a single commit which adds the new functionality, with follow up
- induvidual commits that begin to intergrate the functionality within the
- codebase.
-
-
-
-#### 4.6. Code Spacing
-
-Blocks of code within `lnd` should be segmented into logical stanzas of
-operation. Such spacing makes the code easier to follow at a skim, and reduces
-unnecessary line noise. Coupled with the commenting scheme specified above,
-proper spacing allows readers to quickly scan code, extracting semantics quickly.
-Functions should _not_ just be laid out as a bare contiguous block of code.
-
-**WRONG**
-```go
- witness := make([][]byte, 4)
- witness[0] = nil
- if bytes.Compare(pubA, pubB) == -1 {
- witness[1] = sigB
- witness[2] = sigA
- } else {
- witness[1] = sigA
- witness[2] = sigB
- }
- witness[3] = witnessScript
- return witness
-```
-**RIGHT**
-```go
- witness := make([][]byte, 4)
-
- // When spending a p2wsh multi-sig script, rather than an OP_0, we add
- // a nil stack element to eat the extra pop.
- witness[0] = nil
-
- // When initially generating the witnessScript, we sorted the serialized
- // public keys in descending order. So we do a quick comparison in order
- // to ensure the signatures appear on the Script Virtual Machine stack in
- // the correct order.
- if bytes.Compare(pubA, pubB) == -1 {
- witness[1] = sigB
- witness[2] = sigA
- } else {
- witness[1] = sigA
- witness[2] = sigB
- }
-
- // Finally, add the preimage as the last witness element.
- witness[3] = witnessScript
-
- return witness
-```
-
-Additionally, we favor spacing between stanzas within syntax like: switch case
-statements and select statements.
-
-**WRONG**
-```go
- switch {
- case a:
-
- case b:
-
- case c:
-
- case d:
-
- default:
-
- }
-```
-**RIGHT**
-```go
- switch {
- // Brief comment detailing instances of this case (repeat below).
- case a:
-
-
- case b:
-
-
- case c:
-
-
- case d:
-
-
- default:
-
- }
-```
-
-If one is forced to wrap lines of function arguments that exceed the 80
-character limit, then a new line should be inserted before the first stanza in
-the comment body.
-
-**WRONG**
-```go
- func foo(a, b, c,
- d, e) er.R {
- var a int
- }
-```
-**RIGHT**
-```go
- func foo(a, b, c,
- d, e) er.R {
-
- var a int
- }
-```
-
-
-
-#### 4.7. Protobuf Compilation
-
-The `lnd` project uses `protobuf`, and its extension [`gRPC`](www.grpc.io) in
-several areas and as the primary RPC interface. In order to ensure uniformity
-of all protos checked, in we require that all contributors pin against the
-_exact same_ version of `protoc`. As of the writing of this article, the `lnd`
-project uses [v3.4.0](https://github.com/google/protobuf/releases/tag/v3.4.0)
-of `protoc`.
-
-The following two libraries must be installed with the exact commit hash as
-described in [lnrpc README](https://github.com/lightningnetwork/lnd/blob/master/lnrpc/README.md)
-otherwise the CI pipeline on Travis will fail:
-- grpc-ecosystem/grpc-gateway
-- golang/protobuf
-
-For detailed instructions on how to compile modifications to `lnd`'s `protobuf`
-definitions, check out the [lnrpc README](https://github.com/lightningnetwork/lnd/blob/master/lnrpc/README.md).
-
-
-
-#### 4.8. Additional Style Constraints On Top of `gofmt`
-
-Before a PR is submitted, the proposer should ensure that the file passes the
-set of linting scripts run by `make lint`. These include `gofmt`. In addition
-to `gofmt` we've opted to enforce the following style guidelines.
-
- * ALL columns (on a best effort basis) should be wrapped to 80 line columns.
- Editors should be set to treat a tab as 8 spaces.
- * When wrapping a line that contains a function call as the unwrapped line
- exceeds the column limit, the close paren should be placed on its own
- line. Additionally, all arguments should begin in a new line after the
- open paren.
-
- **WRONG**
- ```go
- value, err := bar(a,
- a, b, c)
- ```
-
- **RIGHT**
- ```go
- value, err := bar(
- a, a, b, c,
- )
- ```
-
-Note that the above guidelines don't apply to log messages. For log messages,
-committers should attempt to minimize the of number lines utilized, while still
-adhering to the 80-character column limit.
-
-
-
-#### 4.9 Pointing to Remote Dependant Branches in Go Modules
-
-It's common that a developer may need to make a change in a dependent project
-of `lnd` such as `btcd`, `neutrino`, `btcwallet`, etc. In order to test changes
-with out testing infrastructure, or simply make a PR into `lnd` that will build
-without any further work, the `go.mod` and `go.sum` files will need to be
-updated. Luckily, the `go mod` command has a handy tool to do this
-automatically so developers don't need to manually edit the `go.mod` file:
-```
- go mod edit -replace=IMPORT-PATH-IN-LND@LND-VERSION=DEV-FORK-IMPORT-PATH@DEV-FORK-VERSION
-```
-
-Here's an example replacing the `lightning-onion` version checked into `lnd` with a version in roasbeef's fork:
-```
- go mod edit -replace=github.com/lightningnetwork/lightning-onion@v0.0.0-20180605012408-ac4d9da8f1d6=github.com/roasbeef/lightning-onion@2e5ae87696046298365ab43bcd1cf3a7a1d69695
-```
-
-
-
-#### 4.10 Use of Log Levels
-
-There are six log levels available: `trace`, `debug`, `info`, `warn`, `error` and `critical`.
-
-Only use `error` for internal errors that are never expected to happen during
-normal operation. No event triggered by external sources (rpc, chain backend,
-etc) should lead to an `error` log.
-
-
-
-### 5. Code Approval Process
-
-This section describes the code approval process that is used for code
-contributions. This is how to get your changes into `lnd`.
-
-
-
-#### 5.1. Code Review
-
-All code which is submitted will need to be reviewed before inclusion into the
-master branch. This process is performed by the project maintainers and usually
-other committers who are interested in the area you are working in as well.
-
-##### Code Review Timeframe
-
-The timeframe for a code review will vary greatly depending on factors such as
-the number of other pull requests which need to be reviewed, the size and
-complexity of the contribution, how well you followed the guidelines presented
-on this page, and how easy it is for the reviewers to digest your commits. For
-example, if you make one monolithic commit that makes sweeping changes to things
-in multiple subsystems, it will obviously take much longer to review. You will
-also likely be asked to split the commit into several smaller, and hence more
-manageable, commits.
-
-Keeping the above in mind, most small changes will be reviewed within a few
-days, while large or far reaching changes may take weeks. This is a good reason
-to stick with the [Share Early, Share Often](#ShareEarly) development practice
-outlined above.
-
-##### What is the review looking for?
-
-The review is mainly ensuring the code follows the [Development Practices](#DevelopmentPractices)
-and [Code Contribution Standards](#Standards). However, there are a few other
-checks which are generally performed as follows:
-
-- The code is stable and has no stability or security concerns
-- The code is properly using existing APIs and generally fits well into the
- overall architecture
-- The change is not something which is deemed inappropriate by community
- consensus
-
-
-
-#### 5.2. Rework Code (if needed)
-
-After the code review, the change will be accepted immediately if no issues are
-found. If there are any concerns or questions, you will be provided with
-feedback along with the next steps needed to get your contribution merged with
-master. In certain cases the code reviewer(s) or interested committers may help
-you rework the code, but generally you will simply be given feedback for you to
-make the necessary changes.
-
-During the process of responding to review comments, we prefer that changes be
-made with [fixup commits](https://robots.thoughtbot.com/autosquashing-git-commits).
-The reason for this is two fold: it makes it easier for the reviewer to see
-what changes have been made between versions (since Github doesn't easily show
-prior versions like Critique) and it makes it easier on the PR author as they
-can set it to auto squash the fix up commits on rebase.
-
-This process will continue until the code is finally accepted.
-
-
-
-#### 5.3. Acceptance
-
-Once your code is accepted, it will be integrated with the master branch. After
-2+ (sometimes 1) LGTM's (approvals) are given on a PR, it's eligible to land in
-master. At this final phase, it may be necessary to rebase the PR in order to
-resolve any conflicts and also squash fix up commits. Ideally, the set of
-[commits by new contributors are PGP signed](https://git-scm.com/book/en/v2/Git-Tools-Signing-Your-Work),
-although this isn't a strong requirement (but we prefer it!). In order to keep
-these signatures intact, we prefer using merge commits. PR proposers can use
-`git rebase --signoff` to sign and rebase at the same time as a final step.
-
-Rejoice as you will now be listed as a [contributor](https://github.com/lightningnetwork/lnd/graphs/contributors)!
-
-
-
-### 6. Contribution Standards
-
-
-
-#### 6.1. Contribution Checklist
-
-- [ ] All changes are Go version 1.12 compliant
-- [ ] The code being submitted is commented according to [Code Documentation and Commenting](#CodeDocumentation)
-- [ ] For new code: Code is accompanied by tests which exercise both
- the positive and negative (error paths) conditions (if applicable)
-- [ ] For bug fixes: Code is accompanied by new tests which trigger
- the bug being fixed to prevent regressions
-- [ ] Any new logging statements use an appropriate subsystem and
- logging level
-- [ ] Code has been formatted with `go fmt`
-- [ ] For code and documentation: lines are wrapped at 80 characters
- (the tab character should be counted as 8 characters, not 4, as some IDEs do
- per default)
-- [ ] Running `make check` does not fail any tests
-- [ ] Running `go vet` does not report any issues
-- [ ] Running `make lint` does not report any **new** issues that
- did not already exist
-- [ ] All commits build properly and pass tests. Only in exceptional
- cases it can be justifiable to violate this condition. In that case, the
- reason should be stated in the commit message.
-- [ ] Commits have a logical structure according to [Ideal Git Commit Structure](#IdealGitCommitStructure).
-
-
-
-#### 6.2. Licensing of Contributions
-****
-All contributions must be licensed with the
-[MIT license](https://github.com/lightningnetwork/lnd/blob/master/LICENSE). This is
-the same license as all of the code found within lnd.
-
-
-## Acknowledgements
-This document was heavily inspired by a [similar document outlining the code
-contribution](https://github.com/btcsuite/btcd/blob/master/docs/code_contribution_guidelines.md)
-guidelines for btcd.
diff --git a/lnd/docs/configuring_tor.md b/lnd/docs/configuring_tor.md
deleted file mode 100644
index f6372f59..00000000
--- a/lnd/docs/configuring_tor.md
+++ /dev/null
@@ -1,184 +0,0 @@
-# Table of Contents
-1. [Overview](#overview)
-2. [Getting Started](#getting-started)
-3. [Tor Stream Isolation](#tor-stream-isolation)
-4. [Authentication](#authentication)
-5. [Listening for Inbound Connections](#listening-for-inbound-connections)
-
-## Overview
-
-`lnd` currently has complete support for using Lightning over
-[Tor](https://www.torproject.org/). Usage of Lightning over Tor is valuable as
-routing nodes no longer need to potentially expose their location via their
-advertised IP address. Additionally, leaf nodes can also protect their location
-by using Tor for anonymous networking to establish connections.
-
-With widespread usage of Onion Services within the network, concerns about the
-difficulty of proper NAT traversal are alleviated, as usage of onion services
-allows nodes to accept inbound connections even if they're behind a NAT. At the
-time of writing this documentation, `lnd` supports both types of onion services:
-v2 and v3.
-
-Before following the remainder of this documentation, you should ensure that you
-already have Tor installed locally. **If you want to run v3 Onion Services, make
-sure that you run at least version 0.3.3.6.**
-Official instructions to install the latest release of Tor can be found
-[here](https://www.torproject.org/docs/tor-doc-unix.html.en).
-
-**NOTE**: This documentation covers how to ensure that `lnd`'s _Lightning
-protocol traffic_ is tunneled over Tor. Users must ensure that when also running
-a Bitcoin full-node, that it is also proxying all traffic over Tor. If using the
-`neutrino` backend for `lnd`, then it will automatically also default to Tor
-usage if active within `lnd`.
-
-## Getting Started
-
-First, you'll want to run `tor` locally before starting up `lnd`. Depending on
-how you installed Tor, you'll find the configuration file at
-`/usr/local/etc/tor/torrc`. Here's an example configuration file that we'll be
-using for the remainder of the tutorial:
-```
-SOCKSPort 9050
-Log notice stdout
-ControlPort 9051
-CookieAuthentication 1
-```
-
-With the configuration file created, you'll then want to start the Tor daemon:
-```
-⛰ tor
-Feb 05 17:02:06.501 [notice] Tor 0.3.1.8 (git-ad5027f7dc790624) running on Darwin with Libevent 2.1.8-stable, OpenSSL 1.0.2l, Zlib 1.2.8, Liblzma N/A, and Libzstd N/A.
-Feb 05 17:02:06.502 [notice] Tor can't help you if you use it wrong! Learn how to be safe at https://www.torproject.org/download/download#warning
-Feb 05 17:02:06.502 [notice] Read configuration file "/usr/local/etc/tor/torrc".
-Feb 05 17:02:06.506 [notice] Opening Socks listener on 127.0.0.1:9050
-Feb 05 17:02:06.506 [notice] Opening Control listener on 127.0.0.1:9051
-```
-
-Once the `tor` daemon has started and it has finished bootstrapping, you'll see this in the logs:
-```
-Feb 05 17:02:06.000 [notice] Bootstrapped 0%: Starting
-Feb 05 17:02:07.000 [notice] Starting with guard context "default"
-Feb 05 17:02:07.000 [notice] Bootstrapped 80%: Connecting to the Tor network
-Feb 05 17:02:07.000 [notice] Bootstrapped 85%: Finishing handshake with first hop
-Feb 05 17:02:08.000 [notice] Bootstrapped 90%: Establishing a Tor circuit
-Feb 05 17:02:11.000 [notice] Tor has successfully opened a circuit. Looks like client functionality is working.
-Feb 05 17:02:11.000 [notice] Bootstrapped 100%: Done
-```
-
-This indicates the daemon is fully bootstrapped and ready to proxy connections.
-At this point, we can now start `lnd` with the relevant arguments:
-
-```
-⛰ ./lnd -h
-
-
-
-Tor:
- --tor.active Allow outbound and inbound connections to be routed through Tor
- --tor.socks= The host:port that Tor's exposed SOCKS5 proxy is listening on (default: localhost:9050)
- --tor.dns= The DNS server as host:port that Tor will use for SRV queries - NOTE must have TCP resolution enabled (default: soa.nodes.lightning.directory:53)
- --tor.streamisolation Enable Tor stream isolation by randomizing user credentials for each connection.
- --tor.control= The host:port that Tor is listening on for Tor control connections (default: localhost:9051)
- --tor.targetipaddress= IP address that Tor should use as the target of the hidden service
- --tor.password= The password used to arrive at the HashedControlPassword for the control port. If provided, the HASHEDPASSWORD authentication method will be used instead of the SAFECOOKIE one.
- --tor.v2 Automatically set up a v2 onion service to listen for inbound connections
- --tor.v3 Automatically set up a v3 onion service to listen for inbound connections
- --tor.privatekeypath= The path to the private key of the onion service being created
-```
-
-There are a couple things here, so let's dissect them. The `--tor.active` flag
-allows `lnd` to route all outbound and inbound connections through Tor.
-
-Outbound connections are possible with the use of the `--tor.socks` and
-`--tor.dns` arguments. The `--tor.socks` argument should point to the interface
-that the `Tor` daemon is listening on to proxy connections. The `--tor.dns` flag
-is required in order to be able to properly automatically bootstrap a set of
-peer connections. The `tor` daemon doesn't currently support proxying `SRV`
-queries over Tor. So instead, we need to connect directly to the authoritative
-DNS server over TCP, in order query for `SRV` records that we can use to
-bootstrap our connections.
-
-Inbound connections are possible due to `lnd` automatically creating an onion
-service. A path to save the onion service's private key can be specified with
-the `--tor.privatekeypath` flag.
-
-Most of these arguments have defaults, so as long as they apply to you, routing
-all outbound and inbound connections through Tor can simply be done with either
-v2 or v3 onion services:
-```shell
-⛰ ./lnd --tor.active --tor.v2
-```
-```shell
-⛰ ./lnd --tor.active --tor.v3
-```
-See [Listening for Inbound Connections](#listening-for-inbound-connections) for
-more info about allowing inbound connections via Tor.
-
-Outbound support only can also be used with:
-```shell
-⛰ ./lnd --tor.active
-```
-
-This will allow you to make all outgoing connections over Tor. Listening is
-disabled to prevent inadvertent leaks.
-
-## Tor Stream Isolation
-
-Our support for Tor also has an additional privacy enhancing modified: stream
-isolation. Usage of this mode means that Tor will always use _new circuit_ for
-each connection. This added features means that it's harder to correlate
-connections. As otherwise, several applications using Tor might share the same
-circuit.
-
-Activating stream isolation is very straightforward, we only require the
-specification of an additional argument:
-```
-⛰ ./lnd --tor.active --tor.streamisolation
-```
-
-## Authentication
-
-In order for `lnd` to communicate with the Tor daemon securely, it must first
-establish an authenticated connection. `lnd` supports the following Tor control
-authentication methods (arguably, from most to least secure):
-
-* `SAFECOOKIE`: This authentication method relies on a cookie created and
- stored by the Tor daemon and is the default assuming the Tor daemon supports
- it by specifying `CookieAuthentication 1` in its configuration file.
-* `HASHEDPASSWORD`: This authentication method is stateless as it relies on a
- password hash scheme and may be useful if the Tor daemon is operating under a
- separate host from the `lnd` node. The password hash can be obtained through
- the Tor daemon with `tor --hash-password PASSWORD`, which should then be
- specified in Tor's configuration file with `HashedControlPassword
- PASSWORD_HASH`. Finally, to use it within `lnd`, the `--tor.password` flag
- should be provided with the corresponding password.
-* `NULL`: To bypass any authentication at all, this scheme can be used instead.
- It doesn't require any additional flags to `lnd` or configuration options to
- the Tor daemon.
-
-## Listening for Inbound Connections
-
-In order to listen for inbound connections through Tor, an onion service must be
-created. There are two types of onion services: v2 and v3. v3 onion services
-are the latest generation of onion services and they provide a number of
-advantages over the legacy v2 onion services. To learn more about these
-benefits, see [Intro to Next Gen Onion Services](https://trac.torproject.org/projects/tor/wiki/doc/NextGenOnions).
-
-Both types can be created and used automatically by `lnd`. Specifying which type
-should be used can easily be done by either using the `tor.v2` or `tor.v3` flag.
-To prevent unintentional leaking of identifying information, it is also necessary
-to add the flag `listen=localhost`.
-
-For example, v3 onion services can be used with the following flags:
-```
-⛰ ./lnd --tor.active --tor.v3 --listen=localhost
-```
-
-This will automatically create a hidden service for your node to use to listen
-for inbound connections and advertise itself to the network. The onion service's
-private key is saved to a file named `v2_onion_private_key` or
-`v3_onion_private_key` depending on the type of onion service used in `lnd`'s
-base directory. This will allow `lnd` to recreate the same hidden service upon
-restart. If you wish to generate a new onion service, you can simply delete this
-file. The path to this private key file can also be modified with the
-`--tor.privatekeypath` argument.
diff --git a/lnd/docs/debugging_lnd.md b/lnd/docs/debugging_lnd.md
deleted file mode 100644
index 44a07d92..00000000
--- a/lnd/docs/debugging_lnd.md
+++ /dev/null
@@ -1,47 +0,0 @@
-# Table of Contents
-1. [Overview](#overview)
-1. [Debug Logging](#debug-logging)
-1. [Capturing pprof data with `lnd`](#capturing-pprof-data-with-lnd)
-
-## Overview
-
-`lnd` ships with a few useful features for debugging, such as a built-in
-profiler and tunable logging levels. If you need to submit a bug report
-for `lnd`, it may be helpful to capture debug logging and performance
-data ahead of time.
-
-## Debug Logging
-
-You can enable debug logging in `lnd` by passing the `--debuglevel` flag. For
-example, to increase the log level from `info` to `debug`:
-
-```
-$ lnd --debuglevel=debug
-```
-
-You may also specify logging per-subsystem, like this:
-
-```
-$ lnd --debuglevel==,=,...
-```
-
-## Capturing pprof data with `lnd`
-
-`lnd` has a built-in feature which allows you to capture profiling data at
-runtime using [pprof](https://golang.org/pkg/runtime/pprof/), a profiler for
-Go. The profiler has negligible performance overhead during normal operations
-(unless you have explicitly enabled CPU profiling).
-
-To enable this ability, start `lnd` with the `--profile` option using a free port.
-
-```
-$ lnd --profile=9736
-```
-
-Now, with `lnd` running, you can use the pprof endpoint on port 9736 to collect
-runtime profiling data. You can fetch this data using `curl` like so:
-
-```
-$ curl http://localhost:9736/debug/pprof/goroutine?debug=1
-...
-```
diff --git a/lnd/docs/etcd.md b/lnd/docs/etcd.md
deleted file mode 100644
index cc107639..00000000
--- a/lnd/docs/etcd.md
+++ /dev/null
@@ -1,84 +0,0 @@
-# Experimental etcd support in LND
-
-With the recent introduction of the `kvdb` interface LND can support multiple
-database backends allowing experimentation with the storage model as well as
-improving robustness trough eg. replicating essential data.
-
-Building on `kvdb` in v0.11.0 we're adding experimental [etcd](https://etcd.io)
-support to LND. As this is an unstable feature heavily in development, it still
-has *many* rough edges for the time being. It is therefore highly recommended to
-not use LND on `etcd` in any kind of production environment especially not
-on bitcoin mainnet.
-
-## Building LND with etcd support
-
-To create a dev build of LND with etcd support use the following command:
-
-```
-make tags="kvdb_etcd"
-```
-
-The important tag is the `kvdb_etcd`, without which the binary is built without
-the etcd driver.
-
-For development it is advised to set the `GOFLAGS` environment variable to
-`"-tags=test"` otherwise `gopls` won't work on code in `channeldb/kvdb/etcd`
-directory.
-
-## Running a local etcd instance for testing
-
-To start your local etcd instance for testing run:
-
-```
-./etcd \
- --auto-tls \
- --advertise-client-urls=https://127.0.0.1:2379 \
- --listen-client-urls=https://0.0.0.0:2379 \
- --max-txn-ops=16384 \
- --max-request-bytes=104857600
-```
-
-The large `max-txn-ops` and `max-request-bytes` values are currently required in
-case of running LND with the full graph in etcd. Upcoming versions will split
-the database to local and replicated parts and only essential parts will remain
-in the replicated database, removing the requirement for these additional
-settings. These parameters have been tested to work with testnet LND.
-
-## Configuring LND to run on etcd
-
-To run LND with etcd, additional configuration is needed, specified either
-through command line flags or in `lnd.conf`.
-
-Sample command line:
-
-```
-./lnd-debug \
- --db.backend=etcd \
- --db.etcd.host=127.0.0.1:2379 \
- --db.etcd.certfile=/home/user/etcd/bin/default.etcd/fixtures/client/cert.pem \
- --db.etcd.keyfile=/home/user/etcd/bin/default.etcd/fixtures/client/key.pem \
- --db.etcd.insecure_skip_verify
-```
-
-Sample `lnd.conf` (with other setting omitted):
-
-```
-[db]
-backend=etcd
-etcd.host=127.0.0.1:2379
-etcd.cerfile=/home/user/etcd/bin/default.etcd/fixtures/client/cert.pem
-etcd.keyfile=/home/user/etcd/bin/default.etcd/fixtures/client/key.pem
-etcd.insecure_skip_verify=true
-```
-
-Optionally users can specifiy `db.etcd.user` and `db.etcd.pass` for db user
-authentication.
-
-## Migrating existing channel.db to etcd
-
-This is currently not supported.
-
-## Disclaimer
-
-As mentioned before this is an experimental feature, and with that your data
-may be lost. Use at your own risk!
diff --git a/lnd/docs/fuzz.md b/lnd/docs/fuzz.md
deleted file mode 100644
index 17634462..00000000
--- a/lnd/docs/fuzz.md
+++ /dev/null
@@ -1,54 +0,0 @@
-# Fuzzing LND #
-
-The `fuzz` package is organized into subpackages which are named after the `lnd` package they test. Each subpackage has its own set of fuzz targets.
-
-### Setup and Installation ###
-This section will cover setup and installation of `go-fuzz` and fuzzing binaries.
-
-* First, we must get `go-fuzz`.
-```
-$ go get -u github.com/dvyukov/go-fuzz/...
-```
-* The following is a command to build all fuzzing harnesses for a specific package.
-```
-$ cd fuzz/
-$ find * -maxdepth 1 -regex '[A-Za-z0-9\-_.]'* -not -name fuzz_utils.go | sed 's/\.go$//1' | xargs -I % sh -c 'go-fuzz-build -func Fuzz_% -o -%-fuzz.zip github.com/lightningnetwork/lnd/fuzz/'
-```
-
-* This may take a while since this will create zip files associated with each fuzzing target.
-
-* Now, run `go-fuzz` with `workdir` set as below!
-```
-$ go-fuzz -bin=<.zip archive here> -workdir= -procs=
-```
-
-`go-fuzz` will print out log lines every couple of seconds. Example output:
-```
-2017/09/19 17:44:23 workers: 8, corpus: 23 (3s ago), crashers: 1, restarts: 1/748, execs: 400690 (16694/sec), cover: 394, uptime: 24s
-```
-Corpus is the number of items in the corpus. `go-fuzz` may add valid inputs to
-the corpus in an attempt to gain more coverage. Crashers is the number of inputs
-resulting in a crash. The inputs, and their outputs are logged in:
-`fuzz///crashers`. `go-fuzz` also creates a `suppressions` directory
-of stacktraces to ignore so that it doesn't create duplicate stacktraces.
-Cover is a number representing edge coverage of the program being fuzzed.
-
-### Brontide ###
-The brontide fuzzers need to be run with a `-timeout` flag of 20 seconds or greater since there is a lot of machine state that must be printed on panic.
-
-### Corpus ###
-Fuzzing generally works best with a corpus that is of minimal size while achieving the maximum coverage. However, `go-fuzz` automatically minimizes the corpus in-memory before fuzzing so a large corpus shouldn't make a difference - edge coverage is all that really matters.
-
-### Test Harness ###
-If you take a look at the test harnesses that are used, you will see that they all consist of one function:
-```
-func Fuzz(data []byte) int
-```
-If:
-
-- `-1` is returned, the fuzzing input is ignored
-- `0` is returned, `go-fuzz` will add the input to the corpus and deprioritize it in future mutations.
-- `1` is returned, `go-fuzz` will add the input to the corpus and prioritize it in future mutations.
-
-### Conclusion ###
-Citizens, do your part and `go-fuzz` `lnd` today!
diff --git a/lnd/docs/grpc/c#.md b/lnd/docs/grpc/c#.md
deleted file mode 100644
index 913a333e..00000000
--- a/lnd/docs/grpc/c#.md
+++ /dev/null
@@ -1,198 +0,0 @@
-# How to write a C# gRPC client for the Lightning Network Daemon
-
-This section enumerates what you need to do to write a client that communicates with `lnd` in C#.
-
-
-### Prerequisites
-
-* .Net Core [SDK](https://dotnet.microsoft.com/download)
-* If using Windows, a unix terminal such as [Cygwin](https://www.cygwin.com/)
-
-
-### Setup and Installation
-
-`lnd` uses the `gRPC` protocol for communication with clients like `lncli`.
-
-.NET natively supports gRPC proto files and generates the necessary C# classes. You can see the official Microsoft gRPC documentation [here](https://docs.microsoft.com/en-gb/aspnet/core/grpc/?view=aspnetcore-3.1)
-
-This assumes you are using a Windows machine, but it applies equally to Mac and Linux.
-
-Create a new `.net core` console application called `lndclient` at your root directory (On Windows : `C:/`).
-
-Create a folder `Grpc` in the root of your project and fetch the lnd proto files
-
-```bash
-mkdir Grpc
-curl -o Grpc/rpc.proto -s https://raw.githubusercontent.com/lightningnetwork/lnd/master/lnrpc/rpc.proto
-```
-
-Install `Grpc.Tools`, `Google.Protobuf`, `Grpc.Core` using NuGet or manually with `dotnet add`:
-
-```bash
-dotnet add package Grpc.Tools
-dotnet add package Google.Protobuf
-dotnet add package Grpc.Core
-```
-
-Add the `rpc.proto` file to the `.csproj` file in an ItemGroup. (In Visual Studio you can do this by unloading the project, editing the `.csproj` file and then reloading it)
-
-```
-
-
-
-```
-
-You're done! Build the project and verify that it works.
-
-#### Imports and Client
-
-Use the code below to set up a channel and client to connect to your `lnd` node:
-
-```c#
-
-using System.Collections.Generic;
-using System.IO;
-using System.Threading.Tasks;
-using Grpc.Core;
-using Lnrpc;
-...
-
-// Due to updated ECDSA generated tls.cert we need to let gprc know that
-// we need to use that cipher suite otherwise there will be a handshake
-// error when we communicate with the lnd rpc server.
-System.Environment.SetEnvironmentVariable("GRPC_SSL_CIPHER_SUITES", "HIGH+ECDSA");
-
-// Lnd cert is at AppData/Local/Lnd/tls.cert on Windows
-// ~/.lnd/tls.cert on Linux and ~/Library/Application Support/Lnd/tls.cert on Mac
-var cert = File.ReadAllText();
-
-var sslCreds = new SslCredentials(cert);
-var channel = new Grpc.Core.Channel("localhost:10009", sslCreds);
-var client = new Lnrpc.Lightning.LightningClient(channel);
-
-```
-
-### Examples
-
-Let's walk through some examples of C# `gRPC` clients. These examples assume that you have at least two `lnd` nodes running, the RPC location of one of which is at the default `localhost:10009`, with an open channel between the two nodes.
-
-#### Simple RPC
-
-```c#
-// Retrieve and display the wallet balance
-// Use "WalletBalanceAsync" if in async context
-var response = client.WalletBalance(new WalletBalanceRequest());
-Console.WriteLine(response);
-```
-
-#### Response-streaming RPC
-
-```c#
-var request = new InvoiceSubscription();
-using (var call = client.SubscribeInvoices(request))
-{
- while (await call.ResponseStream.MoveNext())
- {
- var invoice = call.ResponseStream.Current;
- Console.WriteLine(invoice.ToString());
- }
-}
-```
-
-Now, create an invoice for your node at `localhost:10009` and send a payment to it from another node.
-```bash
-$ lncli addinvoice --amt=100
-{
- "r_hash": ,
- "pay_req":
-}
-$ lncli sendpayment --pay_req=
-```
-
-Your console should now display the details of the recently satisfied invoice.
-
-#### Bidirectional-streaming RPC
-
-```c#
-using (var call = client.SendPayment())
-{
- var responseReaderTask = Task.Run(async () =>
- {
- while (await call.ResponseStream.MoveNext())
- {
- var payment = call.ResponseStream.Current;
- Console.WriteLine(payment.ToString());
- }
- });
-
- foreach (SendRequest sendRequest in SendPayment())
- {
- await call.RequestStream.WriteAsync(sendRequest);
- }
- await call.RequestStream.CompleteAsync();
- await responseReaderTask;
-}
-
-
-IEnumerable SendPayment()
-{
- while (true)
- {
- SendRequest req = new SendRequest() {
- DestString = ,
- Amt = 100,
- PaymentHashString = ,
- FinalCltvDelta = 144
- };
- yield return req;
- System.Threading.Thread.Sleep(2000);
- }
-}
-```
-This example will send a payment of 100 satoshis every 2 seconds.
-
-#### Using Macaroons
-
-To authenticate using macaroons you need to include the macaroon in the metadata of the request.
-
-```c#
-// Lnd admin macaroon is at /data/chain/bitcoin/simnet/admin.macaroon on Windows
-// ~/.lnd/data/chain/bitcoin/simnet/admin.macaroon on Linux and ~/Library/Application Support/Lnd/data/chain/bitcoin/simnet/admin.macaroon on Mac
-byte[] macaroonBytes = File.ReadAllBytes("/data/chain/bitcoin/simnet/admin.macaroon");
-var macaroon = BitConverter.ToString(macaroonBytes).Replace("-", ""); // hex format stripped of "-" chars
-```
-
-The simplest approach to use the macaroon is to include the metadata in each request as shown below.
-
-```c#
-client.GetInfo(new GetInfoRequest(), new Metadata() { new Metadata.Entry("macaroon", macaroon) });
-```
-
-However, this can get tiresome to do for each request, so to avoid explicitly including the macaroon we can update the credentials to include it automatically.
-
-```c#
-// build ssl credentials using the cert the same as before
-var sslCreds = new SslCredentials(cert);
-
-// combine the cert credentials and the macaroon auth credentials using interceptors
-// so every call is properly encrypted and authenticated
-Task AddMacaroon(AuthInterceptorContext context, Metadata metadata)
-{
- metadata.Add(new Metadata.Entry("macaroon", macaroon));
- return Task.CompletedTask;
-}
-var macaroonInterceptor = new AsyncAuthInterceptor(AddMacaroon);
-var combinedCreds = ChannelCredentials.Create(sslCreds, CallCredentials.FromInterceptor(macaroonInterceptor));
-
-// finally pass in the combined credentials when creating a channel
-var channel = new Grpc.Core.Channel("localhost:10009", combinedCreds);
-var client = new Lnrpc.Lightning.LightningClient(channel);
-
-// now every call will be made with the macaroon already included
-client.GetInfo(new GetInfoRequest());
-```
-
-
-### Conclusion
-
-With the above, you should have all the `lnd` related `gRPC` dependencies installed locally in your project. In order to get up to speed with `protobuf` usage from C#, see [this official `protobuf` tutorial for C#](https://developers.google.com/protocol-buffers/docs/csharptutorial). Additionally, [this official gRPC resource](http://www.grpc.io/docs/tutorials/basic/csharp.html) provides more details around how to drive `gRPC` from C#.
\ No newline at end of file
diff --git a/lnd/docs/grpc/java.md b/lnd/docs/grpc/java.md
deleted file mode 100644
index 83ab7025..00000000
--- a/lnd/docs/grpc/java.md
+++ /dev/null
@@ -1,240 +0,0 @@
-
-# How to write a Java gRPC client for the Lightning Network Daemon
-
-This section enumerates what you need to do to write a client that communicates
-with lnd in Java. We'll be using Maven as our build tool.
-
-### Prerequisites
- - Maven
- - running lnd
- - running btcd
-
-### Setup and Installation
-#### Project Structure
-```
-.
-├── pom.xml
-└── src
- ├── main
- ├── java
- │ └── Main.java
- ├── proto
- ├── google
- │ └── api
- │ ├── annotations.proto
- │ └── http.proto
- └── lnrpc
- └── rpc.proto
-
-```
-Note the ***proto*** folder, where all the proto files are kept.
-
- - [rpc.proto](https://github.com/lightningnetwork/lnd/blob/master/lnrpc/rpc.proto)
- - [annotations.proto](https://github.com/grpc-ecosystem/grpc-gateway/blob/master/third_party/googleapis/google/api/annotations.proto)
- - [http.proto](https://github.com/grpc-ecosystem/grpc-gateway/blob/master/third_party/googleapis/google/api/http.proto)
-
-#### pom.xml
-```
-
- 1.8.0
-
-```
-The following dependencies are required.
-```
-
-
- io.grpc
- grpc-netty
- ${grpc.version}
-
-
- io.grpc
- grpc-protobuf
- ${grpc.version}
-
-
- io.grpc
- grpc-stub
- ${grpc.version}
-
-
- io.netty
- netty-tcnative-boringssl-static
- 2.0.7.Final
-
-
- commons-codec
- commons-codec
- 1.11
-
-
-```
-In the build section, we'll need to configure the following things :
-```
-
-
-
- kr.motd.maven
- os-maven-plugin
- 1.5.0.Final
-
-
-
-
- org.xolstice.maven.plugins
- protobuf-maven-plugin
- 0.5.0
-
- com.google.protobuf:protoc:3.4.0:exe:${os.detected.classifier}
- grpc-java
- io.grpc:protoc-gen-grpc-java:${grpc.version}:exe:${os.detected.classifier}
-
-
-
-
- compile
- compile-custom
-
-
-
-
-
-
-```
-#### Main.java
-```java
-import io.grpc.Attributes;
-import io.grpc.CallCredentials;
-import io.grpc.ManagedChannel;
-import io.grpc.Metadata;
-import io.grpc.MethodDescriptor;
-import io.grpc.Status;
-import io.grpc.netty.GrpcSslContexts;
-import io.grpc.netty.NettyChannelBuilder;
-import io.netty.handler.ssl.SslContext;
-import lnrpc.LightningGrpc;
-import lnrpc.LightningGrpc.LightningBlockingStub;
-import lnrpc.Rpc.GetInfoRequest;
-import lnrpc.Rpc.GetInfoResponse;
-import org.apache.commons.codec.binary.Hex;
-
-import java.io.File;
-import java.io.IOException;
-import java.nio.file.Files;
-import java.nio.file.Paths;
-import java.util.concurrent.Executor;
-
-public class Main {
- static class MacaroonCallCredential implements CallCredentials {
- private final String macaroon;
-
- MacaroonCallCredential(String macaroon) {
- this.macaroon = macaroon;
- }
-
- public void thisUsesUnstableApi() {}
-
- public void applyRequestMetadata(
- MethodDescriptor < ? , ? > methodDescriptor,
- Attributes attributes,
- Executor executor,
- final MetadataApplier metadataApplier
- ) {
- String authority = attributes.get(ATTR_AUTHORITY);
- System.out.println(authority);
- executor.execute(new Runnable() {
- public void run() {
- try {
- Metadata headers = new Metadata();
- Metadata.Key < String > macaroonKey = Metadata.Key.of("macaroon", Metadata.ASCII_STRING_MARSHALLER);
- headers.put(macaroonKey, macaroon);
- metadataApplier.apply(headers);
- } catch (Throwable e) {
- metadataApplier.fail(Status.UNAUTHENTICATED.withCause(e));
- }
- }
- });
- }
- }
-
- private static final String CERT_PATH = "/Users/user/Library/Application Support/Lnd/tls.cert";
- private static final String MACAROON_PATH = "/Users/user/Library/Application Support/Lnd/data/chain/bitcoin/simnet/admin.macaroon";
- private static final String HOST = "localhost";
- private static final int PORT = 10009;
-
- public static void main(String...args) throws IOException {
- SslContext sslContext = GrpcSslContexts.forClient().trustManager(new File(CERT_PATH)).build();
- NettyChannelBuilder channelBuilder = NettyChannelBuilder.forAddress(HOST, PORT);
- ManagedChannel channel = channelBuilder.sslContext(sslContext).build();
-
- String macaroon =
- Hex.encodeHexString(
- Files.readAllBytes(Paths.get(MACAROON_PATH))
- );
-
- LightningBlockingStub stub = LightningGrpc
- .newBlockingStub(channel)
- .withCallCredentials(new MacaroonCallCredential(macaroon));
-
-
- GetInfoResponse response = stub.getInfo(GetInfoRequest.getDefaultInstance());
- System.out.println(response.getIdentityPubkey());
- }
-}
-```
-#### Running the example
-Execute the following command in the directory where the **pom.xml** file is located.
-```
-mvn compile exec:java -Dexec.mainClass="Main" -Dexec.cleanupDaemonThreads=false
-```
-##### Sample output
-```
-[INFO] Scanning for projects...
-[INFO] ------------------------------------------------------------------------
-[INFO] Detecting the operating system and CPU architecture
-[INFO] ------------------------------------------------------------------------
-[INFO] os.detected.name: osx
-[INFO] os.detected.arch: x86_64
-[INFO] os.detected.version: 10.13
-[INFO] os.detected.version.major: 10
-[INFO] os.detected.version.minor: 13
-[INFO] os.detected.classifier: osx-x86_64
-[INFO]
-[INFO] ------------------------------------------------------------------------
-[INFO] Building lightning-client 0.0.1-SNAPSHOT
-[INFO] ------------------------------------------------------------------------
-[INFO]
-[INFO] --- protobuf-maven-plugin:0.5.0:compile (default) @ lightning-client ---
-[INFO] Compiling 3 proto file(s) to /Users/user/Documents/Projects/lightningclient/target/generated-sources/protobuf/java
-[INFO]
-[INFO] --- protobuf-maven-plugin:0.5.0:compile-custom (default) @ lightning-client ---
-[INFO] Compiling 3 proto file(s) to /Users/user/Documents/Projects/lightningclient/target/generated-sources/protobuf/grpc-java
-[INFO]
-[INFO] --- maven-resources-plugin:2.6:resources (default-resources) @ lightning-client ---
-[INFO] Using 'UTF-8' encoding to copy filtered resources.
-[INFO] Copying 0 resource
-[INFO] Copying 3 resources
-[INFO] Copying 3 resources
-[INFO]
-[INFO] --- maven-compiler-plugin:3.1:compile (default-compile) @ lightning-client ---
-[INFO] Changes detected - recompiling the module!
-[INFO] Compiling 12 source files to /Users/user/Documents/Projects/lightningclient/target/classes
-[INFO]
-[INFO] --- exec-maven-plugin:1.6.0:java (default-cli) @ lightning-client ---
-032562215c38dede6f1f2f262ff4c8db58a38ecf889e8e907eee8e4c320e0b5e81
-[INFO] ------------------------------------------------------------------------
-[INFO] BUILD SUCCESS
-[INFO] ------------------------------------------------------------------------
-[INFO] Total time: 7.408 s
-[INFO] Finished at: 2018-01-13T19:05:49+01:00
-[INFO] Final Memory: 30M/589M
-[INFO] ------------------------------------------------------------------------
-```
-
-### Java proto options
-
-There are 2 options available that can be used in the *rpc.proto* file :
-
-* option java_multiple_files = true;
-* option java_package = "network.lightning.rpc";
->The package you want to use for your generated Java classes. If no explicit java_package option is given in the .proto file, then by default the proto package (specified using the "package" keyword in the .proto file) will be used. However, proto packages generally do not make good Java packages since proto packages are not expected to start with reverse domain names. If not generating Java code, this option has no effect.
diff --git a/lnd/docs/grpc/javascript.md b/lnd/docs/grpc/javascript.md
deleted file mode 100644
index d4dc0a3a..00000000
--- a/lnd/docs/grpc/javascript.md
+++ /dev/null
@@ -1,246 +0,0 @@
-# How to write a simple `lnd` client in Javascript using `node.js`
-
-## Setup and Installation
-
-First, you'll need to initialize a simple nodejs project:
-```
-npm init (or npm init -f if you want to use the default values without prompt)
-```
-
-Then you need to install the Javascript grpc and proto loader library
-dependencies:
-```
-npm install grpc @grpc/proto-loader --save
-```
-
-You also need to copy the `lnd` `rpc.proto` file in your project directory (or
-at least somewhere reachable by your Javascript code).
-
-The `rpc.proto` file is [located in the `lnrpc` directory of the `lnd`
-sources](https://github.com/lightningnetwork/lnd/blob/master/lnrpc/rpc.proto).
-
-### Imports and Client
-
-Every time you work with Javascript gRPC, you will have to import `grpc`, load
-`rpc.proto`, and create a connection to your client like so:
-
-```js
-const grpc = require('grpc');
-const protoLoader = require('@grpc/proto-loader');
-const fs = require("fs");
-
-// Due to updated ECDSA generated tls.cert we need to let gprc know that
-// we need to use that cipher suite otherwise there will be a handhsake
-// error when we communicate with the lnd rpc server.
-process.env.GRPC_SSL_CIPHER_SUITES = 'HIGH+ECDSA'
-
-// We need to give the proto loader some extra options, otherwise the code won't
-// fully work with lnd.
-const loaderOptions = {
- keepCase: true,
- longs: String,
- enums: String,
- defaults: true,
- oneofs: true
-};
-const packageDefinition = protoLoader.loadSync('rpc.proto', loaderOptions);
-
-// Lnd cert is at ~/.lnd/tls.cert on Linux and
-// ~/Library/Application Support/Lnd/tls.cert on Mac
-let lndCert = fs.readFileSync("~/.lnd/tls.cert");
-let credentials = grpc.credentials.createSsl(lndCert);
-let lnrpcDescriptor = grpc.loadPackageDefinition(packageDefinition);
-let lnrpc = lnrpcDescriptor.lnrpc;
-let lightning = new lnrpc.Lightning('localhost:10009', credentials);
-```
-
-## Examples
-
-Let's walk through some examples of Javascript gRPC clients. These examples
-assume that you have at least two `lnd` nodes running, the RPC location of one
-of which is at the default `localhost:10009`, with an open channel between the
-two nodes.
-
-### Simple RPC
-
-```js
-lightning.getInfo({}, function(err, response) {
- if (err) {
- console.log('Error: ' + err);
- }
- console.log('GetInfo:', response);
-});
-```
-
-You should get something like this in your console:
-
-```
-GetInfo: { identity_pubkey: '03c892e3f3f077ea1e381c081abb36491a2502bc43ed37ffb82e264224f325ff27',
- alias: '',
- num_pending_channels: 0,
- num_active_channels: 1,
- num_inactive_channels: 0,
- num_peers: 1,
- block_height: 1006,
- block_hash: '198ba1dc43b4190e507fa5c7aea07a74ec0009a9ab308e1736dbdab5c767ff8e',
- synced_to_chain: false,
- testnet: false,
- chains: [ 'bitcoin' ] }
-```
-
-### Response-streaming RPC
-
-```js
-let call = lightning.subscribeInvoices({});
-call.on('data', function(invoice) {
- console.log(invoice);
-})
-.on('end', function() {
- // The server has finished sending
-})
-.on('status', function(status) {
- // Process status
- console.log("Current status" + status);
-});
-```
-
-Now, create an invoice for your node at `localhost:10009`and send a payment to
-it from another node.
-```bash
-$ lncli addinvoice --amt=100
-{
- "r_hash": ,
- "pay_req":
-}
-$ lncli sendpayment --pay_req=
-```
-Your Javascript console should now display the details of the recently satisfied
-invoice.
-
-### Bidirectional-streaming RPC
-
-This example has a few dependencies:
-```shell
-npm install --save async lodash bytebuffer
-```
-
-You can run the following in your shell or put it in a program and run it like
-`node script.js`
-
-```js
-// Load some libraries specific to this example
-const async = require('async');
-const _ = require('lodash');
-const ByteBuffer = require('bytebuffer');
-
-let dest_pubkey = ;
-let dest_pubkey_bytes = ByteBuffer.fromHex(dest_pubkey);
-
-// Set a listener on the bidirectional stream
-let call = lightning.sendPayment();
-call.on('data', function(payment) {
- console.log("Payment sent:");
- console.log(payment);
-});
-call.on('end', function() {
- // The server has finished
- console.log("END");
-});
-
-// You can send single payments like this
-call.write({ dest: dest_pubkey_bytes, amt: 6969 });
-
-// Or send a bunch of them like this
-function paymentSender(destination, amount) {
- return function(callback) {
- console.log("Sending " + amount + " satoshis");
- console.log("To: " + destination);
- call.write({
- dest: destination,
- amt: amount
- });
- _.delay(callback, 2000);
- };
-}
-let payment_senders = [];
-for (let i = 0; i < 10; i++) {
- payment_senders[i] = paymentSender(dest_pubkey_bytes, 100);
-}
-async.series(payment_senders, function() {
- call.end();
-});
-
-```
-This example will send a payment of 100 satoshis every 2 seconds.
-
-
-### Using Macaroons
-
-To authenticate using macaroons you need to include the macaroon in the metadata
-of each request.
-
-The following snippet will add the macaroon to every request automatically:
-
-```js
-const fs = require('fs');
-const grpc = require('grpc');
-const protoLoader = require('@grpc/proto-loader');
-const loaderOptions = {
- keepCase: true,
- longs: String,
- enums: String,
- defaults: true,
- oneofs: true
-};
-const packageDefinition = protoLoader.loadSync('rpc.proto', loaderOptions);
-
-process.env.GRPC_SSL_CIPHER_SUITES = 'HIGH+ECDSA'
-
-// Lnd admin macaroon is at ~/.lnd/data/chain/bitcoin/simnet/admin.macaroon on Linux and
-// ~/Library/Application Support/Lnd/data/chain/bitcoin/simnet/admin.macaroon on Mac
-let m = fs.readFileSync('~/.lnd/data/chain/bitcoin/simnet/admin.macaroon');
-let macaroon = m.toString('hex');
-
-// build meta data credentials
-let metadata = new grpc.Metadata()
-metadata.add('macaroon', macaroon)
-let macaroonCreds = grpc.credentials.createFromMetadataGenerator((_args, callback) => {
- callback(null, metadata);
-});
-
-// build ssl credentials using the cert the same as before
-let lndCert = fs.readFileSync("~/.lnd/tls.cert");
-let sslCreds = grpc.credentials.createSsl(lndCert);
-
-// combine the cert credentials and the macaroon auth credentials
-// such that every call is properly encrypted and authenticated
-let credentials = grpc.credentials.combineChannelCredentials(sslCreds, macaroonCreds);
-
-// Pass the crendentials when creating a channel
-let lnrpcDescriptor = grpc.loadPackageDefinition(packageDefinition);
-let lnrpc = lnrpcDescriptor.lnrpc;
-let client = new lnrpc.Lightning('some.address:10009', credentials);
-
-client.getInfo({}, (err, response) => {
- if (err) {
- console.log('Error: ' + err);
- }
- console.log('GetInfo:', response);
-});
-```
-
-## Conclusion
-
-With the above, you should have all the `lnd` related `gRPC` dependencies
-installed locally in your project. In order to get up to speed with `protofbuf`
-usage from Javascript, see [this official `protobuf` reference for
-Javascript](https://developers.google.com/protocol-buffers/docs/reference/javascript-generated).
-Additionally, [this official gRPC
-resource](http://www.grpc.io/docs/tutorials/basic/node.html) provides more
-details around how to drive `gRPC` from `node.js`.
-
-## API documentation
-
-There is an [online API documentation](https://api.lightning.community?javascript)
-available that shows all currently existing RPC methods, including code snippets
-on how to use them.
diff --git a/lnd/docs/grpc/python.md b/lnd/docs/grpc/python.md
deleted file mode 100644
index 55452f28..00000000
--- a/lnd/docs/grpc/python.md
+++ /dev/null
@@ -1,212 +0,0 @@
-# How to write a Python gRPC client for the Lightning Network Daemon
-
-This section enumerates what you need to do to write a client that communicates
-with `lnd` in Python.
-
-## Setup and Installation
-
-Lnd uses the gRPC protocol for communication with clients like lncli. gRPC is
-based on protocol buffers and as such, you will need to compile the lnd proto
-file in Python before you can use it to communicate with lnd.
-
-1. Create a virtual environment for your project
- ```
- $ virtualenv lnd
- ```
-2. Activate the virtual environment
- ```
- $ source lnd/bin/activate
- ```
-3. Install dependencies (googleapis-common-protos is required due to the use of
- google/api/annotations.proto)
- ```
- (lnd)$ pip install grpcio grpcio-tools googleapis-common-protos
- ```
-4. Clone the google api's repository (required due to the use of
- google/api/annotations.proto)
- ```
- (lnd)$ git clone https://github.com/googleapis/googleapis.git
- ```
-5. Copy the lnd rpc.proto file (you'll find this at
- [lnrpc/rpc.proto](https://github.com/lightningnetwork/lnd/blob/master/lnrpc/rpc.proto))
- or just download it
- ```
- (lnd)$ curl -o rpc.proto -s https://raw.githubusercontent.com/lightningnetwork/lnd/master/lnrpc/rpc.proto
- ```
-6. Compile the proto file
- ```
- (lnd)$ python -m grpc_tools.protoc --proto_path=googleapis:. --python_out=. --grpc_python_out=. rpc.proto
- ```
-
-After following these steps, two files `rpc_pb2.py` and `rpc_pb2_grpc.py` will
-be generated. These files will be imported in your project anytime you use
-Python gRPC.
-
-### Generating RPC modules for subservers
-
-If you want to use any of the subservers' functionality, you also need to
-generate the python modules for them.
-
-For example, if you want to generate the RPC modules for the `Router` subserver
-(located/defined in `routerrpc/router.proto`), you need to run the following two
-extra steps (after completing all 6 step described above) to get the
-`router_pb2.py` and `router_pb2_grpc.py`:
-
-```
-(lnd)$ curl -o router.proto -s https://raw.githubusercontent.com/lightningnetwork/lnd/master/lnrpc/routerrpc/router.proto
-(lnd)$ python -m grpc_tools.protoc --proto_path=googleapis:. --python_out=. --grpc_python_out=. router.proto
-```
-
-### Imports and Client
-
-Every time you use Python gRPC, you will have to import the generated rpc modules
-and set up a channel and stub to your connect to your `lnd` node:
-
-```python
-import rpc_pb2 as ln
-import rpc_pb2_grpc as lnrpc
-import grpc
-import os
-
-# Due to updated ECDSA generated tls.cert we need to let gprc know that
-# we need to use that cipher suite otherwise there will be a handhsake
-# error when we communicate with the lnd rpc server.
-os.environ["GRPC_SSL_CIPHER_SUITES"] = 'HIGH+ECDSA'
-
-# Lnd cert is at ~/.lnd/tls.cert on Linux and
-# ~/Library/Application Support/Lnd/tls.cert on Mac
-cert = open(os.path.expanduser('~/.lnd/tls.cert'), 'rb').read()
-creds = grpc.ssl_channel_credentials(cert)
-channel = grpc.secure_channel('localhost:10009', creds)
-stub = lnrpc.LightningStub(channel)
-```
-
-## Examples
-
-Let's walk through some examples of Python gRPC clients. These examples assume
-that you have at least two `lnd` nodes running, the RPC location of one of which
-is at the default `localhost:10009`, with an open channel between the two nodes.
-
-### Simple RPC
-
-```python
-# Retrieve and display the wallet balance
-response = stub.WalletBalance(ln.WalletBalanceRequest())
-print(response.total_balance)
-```
-
-### Response-streaming RPC
-
-```python
-request = ln.InvoiceSubscription()
-for invoice in stub.SubscribeInvoices(request):
- print(invoice)
-```
-
-Now, create an invoice for your node at `localhost:10009`and send a payment to
-it from another node.
-```bash
-$ lncli addinvoice --amt=100
-{
- "r_hash": ,
- "pay_req":
-}
-$ lncli sendpayment --pay_req=
-```
-
-Your Python console should now display the details of the recently satisfied
-invoice.
-
-### Bidirectional-streaming RPC
-
-```python
-from time import sleep
-import codecs
-
-def request_generator(dest, amt):
- # Initialization code here
- counter = 0
- print("Starting up")
- while True:
- request = ln.SendRequest(
- dest=dest,
- amt=amt,
- )
- yield request
- # Alter parameters here
- counter += 1
- sleep(2)
-
-# Outputs from lncli are hex-encoded
-dest_hex =
-dest_bytes = codecs.decode(dest_hex, 'hex')
-
-request_iterable = request_generator(dest=dest_bytes, amt=100)
-
-for payment in stub.SendPayment(request_iterable):
- print(payment)
-```
-This example will send a payment of 100 satoshis every 2 seconds.
-
-### Using Macaroons
-
-To authenticate using macaroons you need to include the macaroon in the metadata of the request.
-
-```python
-import codecs
-
-# Lnd admin macaroon is at ~/.lnd/data/chain/bitcoin/simnet/admin.macaroon on Linux and
-# ~/Library/Application Support/Lnd/data/chain/bitcoin/simnet/admin.macaroon on Mac
-with open(os.path.expanduser('~/.lnd/data/chain/bitcoin/simnet/admin.macaroon'), 'rb') as f:
- macaroon_bytes = f.read()
- macaroon = codecs.encode(macaroon_bytes, 'hex')
-```
-
-The simplest approach to use the macaroon is to include the metadata in each request as shown below.
-
-```python
-stub.GetInfo(ln.GetInfoRequest(), metadata=[('macaroon', macaroon)])
-```
-
-However, this can get tiresome to do for each request, so to avoid explicitly including the macaroon we can update the credentials to include it automatically.
-
-```python
-def metadata_callback(context, callback):
- # for more info see grpc docs
- callback([('macaroon', macaroon)], None)
-
-
-# build ssl credentials using the cert the same as before
-cert_creds = grpc.ssl_channel_credentials(cert)
-
-# now build meta data credentials
-auth_creds = grpc.metadata_call_credentials(metadata_callback)
-
-# combine the cert credentials and the macaroon auth credentials
-# such that every call is properly encrypted and authenticated
-combined_creds = grpc.composite_channel_credentials(cert_creds, auth_creds)
-
-# finally pass in the combined credentials when creating a channel
-channel = grpc.secure_channel('localhost:10009', combined_creds)
-stub = lnrpc.LightningStub(channel)
-
-# now every call will be made with the macaroon already included
-stub.GetInfo(ln.GetInfoRequest())
-```
-
-
-## Conclusion
-
-With the above, you should have all the `lnd` related `gRPC` dependencies
-installed locally into your virtual environment. In order to get up to speed
-with `protofbuf` usage from Python, see [this official `protobuf` tutorial for
-Python](https://developers.google.com/protocol-buffers/docs/pythontutorial).
-Additionally, [this official gRPC
-resource](http://www.grpc.io/docs/tutorials/basic/python.html) provides more
-details around how to drive `gRPC` from Python.
-
-## API documentation
-
-There is an [online API documentation](https://api.lightning.community?python)
-available that shows all currently existing RPC methods, including code snippets
-on how to use them.
diff --git a/lnd/docs/grpc/ruby.md b/lnd/docs/grpc/ruby.md
deleted file mode 100644
index 867d2ce7..00000000
--- a/lnd/docs/grpc/ruby.md
+++ /dev/null
@@ -1,185 +0,0 @@
-# How to write a Ruby gRPC client for the Lightning Network Daemon
-
-This section enumerates what you need to do to write a client that communicates
-with `lnd` in Ruby.
-
-### Introduction
-
-`lnd` uses the `gRPC` protocol for communication with clients like `lncli`.
-
-`gRPC` is based on protocol buffers and as such, you will need to compile
-the `lnd` proto file in Ruby before you can use it to communicate with `lnd`.
-
-### Setup
-
-Install gRPC rubygems:
-
-```
-$ gem install grpc
-$ gem install grpc-tools
-```
-
-Clone the Google APIs repository:
-
-```
-$ git clone https://github.com/googleapis/googleapis.git
-```
-
-Fetch the `rpc.proto` file (or copy it from your local source directory):
-
-```
-$ curl -o rpc.proto -s https://raw.githubusercontent.com/lightningnetwork/lnd/master/lnrpc/rpc.proto
-```
-
-Compile the proto file:
-
-```
-$ grpc_tools_ruby_protoc --proto_path googleapis:. --ruby_out=. --grpc_out=. rpc.proto
-```
-
-Two files will be generated in the current directory:
-
-* `rpc_pb.rb`
-* `rpc_services_pb.rb`
-
-### Examples
-
-#### Simple client to display wallet balance
-
-Every time you use the Ruby gRPC you need to require the `rpc_services_pb` file.
-
-We assume that `lnd` runs on the default `localhost:10009`.
-
-We further assume you run `lnd` with `--no-macaroons`.
-
-```ruby
-#!/usr/bin/env ruby
-
-$:.unshift(File.dirname(__FILE__))
-
-require 'grpc'
-require 'rpc_services_pb'
-
-# Due to updated ECDSA generated tls.cert we need to let gprc know that
-# we need to use that cipher suite otherwise there will be a handhsake
-# error when we communicate with the lnd rpc server.
-ENV['GRPC_SSL_CIPHER_SUITES'] = "HIGH+ECDSA"
-
-certificate = File.read(File.expand_path("~/.lnd/tls.cert"))
-credentials = GRPC::Core::ChannelCredentials.new(certificate)
-stub = Lnrpc::Lightning::Stub.new('127.0.0.1:10009', credentials)
-
-response = stub.wallet_balance(Lnrpc::WalletBalanceRequest.new())
-puts "Total balance: #{response.total_balance}"
-```
-
-This will show the `total_balance` of the wallet.
-
-#### Streaming client for invoice payment updates
-
-```ruby
-#!/usr/bin/env ruby
-
-$:.unshift(File.dirname(__FILE__))
-
-require 'grpc'
-require 'rpc_services_pb'
-
-ENV['GRPC_SSL_CIPHER_SUITES'] = "HIGH+ECDSA"
-
-certificate = File.read(File.expand_path("~/.lnd/tls.cert"))
-credentials = GRPC::Core::ChannelCredentials.new(certificate)
-stub = Lnrpc::Lightning::Stub.new('127.0.0.1:10009', credentials)
-
-stub.subscribe_invoices(Lnrpc::InvoiceSubscription.new) do |invoice|
- puts invoice.inspect
-end
-```
-
-Now, create an invoice on your node:
-
-```bash
-$ lncli addinvoice --amt=590
-{
- "r_hash": ,
- "pay_req":
-}
-```
-
-Next send a payment to it from another node:
-
-```
-$ lncli sendpayment --pay_req=
-```
-
-You should now see the details of the settled invoice appear.
-
-#### Using Macaroons
-
-To authenticate using macaroons you need to include the macaroon in the metadata of the request.
-
-```ruby
-# Lnd admin macaroon is at ~/.lnd/data/chain/bitcoin/simnet/admin.macaroon on Linux and
-# ~/Library/Application Support/Lnd/data/chain/bitcoin/simnet/admin.macaroon on Mac
-macaroon_binary = File.read(File.expand_path("~/.lnd/data/chain/bitcoin/simnet/admin.macaroon"))
-macaroon = macaroon_binary.each_byte.map { |b| b.to_s(16).rjust(2,'0') }.join
-```
-
-The simplest approach to use the macaroon is to include the metadata in each request as shown below.
-
-```ruby
-stub.get_info(Lnrpc::GetInfoRequest.new, metadata: {macaroon: macaroon})
-```
-
-However, this can get tiresome to do for each request. We can use gRPC interceptors to add this metadata to each request automatically. Our interceptor class would look like this.
-
-```ruby
-class MacaroonInterceptor < GRPC::ClientInterceptor
- attr_reader :macaroon
-
- def initialize(macaroon)
- @macaroon = macaroon
- super
- end
-
- def request_response(request:, call:, method:, metadata:)
- metadata['macaroon'] = macaroon
- yield
- end
-
- def server_streamer(request:, call:, method:, metadata:)
- metadata['macaroon'] = macaroon
- yield
- end
-end
-```
-
-And then we would include it when we create our stub like so.
-
-```ruby
-certificate = File.read(File.expand_path("~/.lnd/tls.cert"))
-credentials = GRPC::Core::ChannelCredentials.new(certificate)
-macaroon_binary = File.read(File.expand_path("~/.lnd/data/chain/bitcoin/simnet/admin.macaroon"))
-macaroon = macaroon_binary.each_byte.map { |b| b.to_s(16).rjust(2,'0') }.join
-
-stub = Lnrpc::Lightning::Stub.new(
- 'localhost:10009',
- credentials,
- interceptors: [MacaroonInterceptor.new(macaroon)]
-)
-
-# Now we don't need to pass the metadata on a request level
-p stub.get_info(Lnrpc::GetInfoRequest.new)
-```
-
-#### Receive Large Responses
-
-A GRPC::ResourceExhausted exception is raised when a server response is too large. In particular, this will happen with mainnet DescribeGraph calls. The solution is to raise the default limits by including a channel_args hash when creating our stub.
-
-```ruby
-stub = Lnrpc::Lightning::Stub.new(
- 'localhost:10009',
- credentials,
- channel_args: {"grpc.max_receive_message_length" => 1024 * 1024 * 50}
-)
-```
\ No newline at end of file
diff --git a/lnd/docs/macaroons.md b/lnd/docs/macaroons.md
deleted file mode 100644
index 13d8a6fd..00000000
--- a/lnd/docs/macaroons.md
+++ /dev/null
@@ -1,201 +0,0 @@
-As part of [the `lnd` 0.3-alpha
-release](https://github.com/lightningnetwork/lnd/releases/tag/v0.3-alpha), we
-have addressed [issue 20](https://github.com/lightningnetwork/lnd/issues/20),
-which is RPC authentication. Until this was implemented, all RPC calls to `lnd`
-were unauthenticated. To fix this, we've utilized
-[macaroons](https://research.google.com/pubs/pub41892.html), which are similar
-to cookies but more capable. This brief overview explains, at a basic level,
-how they work, how we use them for `lnd` authentication, and our future plans.
-
-## What are macaroons?
-
-You can think of a macaroon as a cookie, in a way. Cookies are small bits of
-data that your browser stores and sends to a particular website when it makes a
-request to that website. If you're logged into a website, that cookie can store
-a session ID, which the site can look up in its own database to check who you
-are and give you the appropriate content.
-
-A macaroon is similar: it's a small bit of data that a client (like `lncli`)
-can send to a service (like `lnd`) to assert that it's allowed to perform an
-action. The service looks up the macaroon ID and verifies that the macaroon was
-initially signed with the service's root key. However, unlike a cookie, you can
-*delegate* a macaroon, or create a version of it that has more limited
-capabilities, and then send it to someone else to use.
-
-Just like a cookie, a macaroon should be sent over a secure channel (such as a
-TLS-encrypted connection), which is why we've also begun enforcing TLS for RPC
-requests in this release. Before SSL was enforced on websites such as Facebook
-and Google, listening to HTTP sessions on wireless networks was one way to
-hijack the session and log in as that user, gaining access to the user's
-account. Macaroons are similar in that intercepting a macaroon in transit
-allows the interceptor to use the macaroon to gain all the privileges of the
-legitimate user.
-
-## Macaroon delegation
-
-A macaroon is delegated by adding restrictions (called caveats) and an
-authentication code similar to a signature (technically an HMAC) to it. The
-technical method of doing this is outside the scope of this overview
-documentation, but the [README in the macaroons package](../macaroons/README.md)
-or the macaroon paper linked above describe it in more detail. The
-user must remember several things:
-
-* Sharing a macaroon allows anyone in possession of that macaroon to use it to
- access the service (in our case, `lnd`) to do anything permitted by the
- macaroon. There is a specific type of restriction, called a "third party
- caveat," that requires an external service to verify the request; however,
- `lnd` doesn't currently implement those.
-
-* If you add a caveat to a macaroon and share the resulting macaroon, the
- person receiving it cannot remove the caveat.
-
-This is used in `lnd` in an interesting way. By default, when `lnd` starts, it
-creates three files which contain macaroons: a file called `admin.macaroon`,
-which contains a macaroon with no caveats, a file called `readonly.macaroon`,
-which is the *same* macaroon but with an additional caveat, that permits only
-methods that don't change the state of `lnd`, and `invoice.macaroon`, which
-only has access to invoice related methods.
-
-## How macaroons are used by `lnd` and `lncli`.
-
-On startup, `lnd` checks to see if the `admin.macaroon`, `readonly.macaroon`
-and `invoice.macaroon` files exist. If they don't exist, `lnd` updates its
-database with a new macaroon ID, generates the three files `admin.macaroon`,
-`readonly.macaroon` and `invoice.macaroon`, all with the same ID. The
-`readonly.macaroon` file has an additional caveat which restricts the caller
-to using only read-only methods and the `invoice.macaroon` also has an
-additional caveat which restricts the caller to using only invoice related
-methods. This means a few important things:
-
-* You can delete the `admin.macaroon` and be left with only the
- `readonly.macaroon`, which can sometimes be useful (for example, if you want
- your `lnd` instance to run in autopilot mode and don't want to accidentally
- change its state).
-
-* If you delete the data directory which contains the `macaroons.db` file, this
- invalidates the `admin.macaroon`, `readonly.macaroon` and `invoice.macaroon`
- files. Invalid macaroon files give you errors like `cannot get macaroon: root
- key with id 0 doesn't exist` or `verification failed: signature mismatch
- after caveat verification`.
-
-You can also run `lnd` with the `--no-macaroons` option, which skips the
-creation of the macaroon files and all macaroon checks within the RPC server.
-This means you can still pass a macaroon to the RPC server with a client, but
-it won't be checked for validity. Note that disabling authentication of a server
-that's listening on a public interface is not allowed. This means the
-`--no-macaroons` option is only permitted when the RPC server is in a private
-network. In CIDR notation, the following IPs are considered private,
-- [`169.254.0.0/16` and `fe80::/10`](https://en.wikipedia.org/wiki/Link-local_address).
-- [`224.0.0.0/4` and `ff00::/8`](https://en.wikipedia.org/wiki/Multicast_address).
-- [`10.0.0.0/8`, `172.16.0.0/12` and `192.168.0.0/16`](https://tools.ietf.org/html/rfc1918).
-- [`fc00::/7`](https://tools.ietf.org/html/rfc4193).
-
-Since `lnd` requires macaroons by default in order to call RPC methods, `lncli`
-now reads a macaroon and provides it in the RPC call. Unless the path is
-changed by the `--macaroonpath` option, `lncli` tries to read the macaroon from
-the network directory of `lnd`'s currently active network (e.g. for simnet
-`lnddir/data/chain/bitcoin/simnet/admin.macaroon`) by default and will error if
-that file doesn't exist unless provided the `--no-macaroons` option. Keep this
-in mind when running `lnd` with `--no-macaroons`, as `lncli` will error out
-unless called the same way **or** `lnd` has generated a macaroon on a previous
-run without this option.
-
-`lncli` also adds a caveat which makes it valid for only 60 seconds by default
-to help prevent replay in case the macaroon is somehow intercepted in
-transmission. This is unlikely with TLS, but can happen e.g. when using a PKI
-and network setup which allows inspection of encrypted traffic, and an attacker
-gets access to the traffic logs after interception. The default 60 second
-timeout can be changed with the `--macaroontimeout` option; this can be
-increased for making RPC calls between systems whose clocks are more than 60s
-apart.
-
-## Stateless initialization
-
-As mentioned above, by default `lnd` creates several macaroon files in its
-directory. These are unencrypted and in case of the `admin.macaroon` provide
-full access to the daemon. This can be seen as quite a big security risk if
-the `lnd` daemon runs in an environment that is not fully trusted.
-
-The macaroon files are the only files with highly sensitive information that
-are not encrypted (unlike the wallet file and the macaroon database file that
-contains the [root key](../macaroons/README.md), these are always encrypted,
-even if no password is used).
-
-To avoid leaking the macaroon information, `lnd` supports the so called
-`stateless initialization` mode:
-* The three startup commands `create`, `unlock` and `changepassword` of `lncli`
- all have a flag called `--stateless_init` that instructs the daemon **not**
- to create `*.macaroon` files.
-* The two operations `create` and `changepassword` that actually create/update
- the macaroon database will return the admin macaroon in the RPC call.
- Assuming the daemon and the `lncli` are not used on the same machine, this
- will leave no unencrypted information on the machine where `lnd` runs on.
- * To be more precise: By default, when using the `changepassword` command, the
- macaroon root key in the macaroon DB is just re-encrypted with the new
- password. But the key remains the same and therefore the macaroons issued
- before the `changepassword` command still remain valid. If a user wants to
- invalidate all previously created macaroons, the `--new_mac_root_key` flag
- of the `changepassword` command should be used!
-* An user of `lncli` will see the returned admin macaroon printed to the screen
- or saved to a file if the parameter `--save_to=some_file.macaroon` is used.
-* **Important:** By default, `lnd` will create the macaroon files during the
- `unlock` phase, if the `--stateless_init` flag is not used. So to avoid
- leakage of the macaroon information, use the stateless initialization flag
- for all three startup commands of the wallet unlocker service!
-
-Examples:
-
-* Create a new wallet stateless (first run):
- * `lncli create --stateless_init --save_to=/safe/location/admin.macaroon`
-* Unlock a wallet that has previously been initialized stateless:
- * `lncli unlock --stateless_init`
-* Use the created macaroon:
- * `lncli --macaroonpath=/safe/location/admin.macaroon getinfo`
-
-## Using Macaroons with GRPC clients
-
-When interacting with `lnd` using the GRPC interface, the macaroons are encoded
-as a hex string over the wire and can be passed to `lnd` by specifying the
-hex-encoded macaroon as GRPC metadata:
-
- GET https://localhost:8080/v1/getinfo
- Grpc-Metadata-macaroon:
-
-Where `` is the hex encoded binary data from the macaroon file itself.
-
-A very simple example using `curl` may look something like this:
-
- curl --insecure --header "Grpc-Metadata-macaroon: $(xxd -ps -u -c 1000 $HOME/.lnd/data/chain/bitcoin/simnet/admin.macaroon)" https://localhost:8080/v1/getinfo
-
-Have a look at the [Java GRPC example](/docs/grpc/java.md) for programmatic usage details.
-
-## Creating macaroons with custom permissions
-
-The macaroon bakery is described in more detail in the
-[README in the macaroons package](../macaroons/README.md).
-
-## Future improvements to the `lnd` macaroon implementation
-
-The existing macaroon implementation in `lnd` and `lncli` lays the groundwork
-for future improvements in functionality and security. We will add features
-such as:
-
-* Improved replay protection for securing RPC calls
-
-* Macaroon database encryption
-
-* Root key rotation and possibly macaroon invalidation/rotation
-
-* Additional restrictions, such as limiting payments to use (or not use)
- specific routes, channels, nodes, etc.
-
-* Accounting-based macaroons, which can make an instance of `lnd` act almost
- like a bank for apps: for example, an app that pays to consume APIs whose
- budget is limited to the money it receives by providing an API/service
-
-* Support for third-party caveats, which allows external plugins for
- authorization and authentication
-
-With this new feature, we've started laying the groundwork for flexible
-authentication and authorization for RPC calls to `lnd`. We look forward to
-expanding its functionality to make it easy to develop secure apps.
diff --git a/lnd/docs/nat_traversal.md b/lnd/docs/nat_traversal.md
deleted file mode 100644
index dd48fcfc..00000000
--- a/lnd/docs/nat_traversal.md
+++ /dev/null
@@ -1,23 +0,0 @@
-# NAT Traversal
-
-`lnd` has support for NAT traversal using a number of different techniques. At
-the time of writing this documentation, UPnP and NAT-PMP are supported. NAT
-traversal can be enabled through `lnd`'s `--nat` flag.
-
-```shell
-$ lnd ... --nat
-```
-
-On startup, `lnd` will try the different techniques until one is found that's
-supported by your hardware. The underlying dependencies used for these
-techniques rely on using system-specific binaries in order to detect your
-gateway device's address. This is needed because we need to be able to reach the
-gateway device to determine if it supports the specific NAT traversal technique
-currently being tried. Because of this, due to uncommon setups, it is possible
-that these binaries are not found in your system. If this is case, `lnd` will
-exit stating such error.
-
-As a bonus, `lnd` spawns a background thread that automatically detects IP
-address changes and propagates the new address update to the rest of the
-network. This is especially beneficial for users who were provided dynamic IP
-addresses from their internet service provider.
diff --git a/lnd/docs/psbt.md b/lnd/docs/psbt.md
deleted file mode 100644
index 059af124..00000000
--- a/lnd/docs/psbt.md
+++ /dev/null
@@ -1,598 +0,0 @@
-# PSBT
-
-This document describes various use cases around the topic of Partially Signed
-Bitcoin Transactions (PSBTs). `lnd`'s wallet now features a full set of PSBT
-functionality, including creating, signing and funding channels with PSBTs.
-
-See [BIP174](https://github.com/bitcoin/bips/blob/master/bip-0174.mediawiki) for
-a full description of the PSBT format and the different _roles_ that a
-participant in a PSBT can have.
-
-## Creating/funding a PSBT
-
-The first step for every transaction that is constructed using a PSBT flow is to
-select inputs (UTXOs) to fund the desired output and to add a change output that
-sends the remaining funds back to the own wallet.
-
-This `wallet psbt fund` command is very similar to `bitcoind`'s
-`walletcreatefundedpsbt` command. One main difference is that you can specify a
-template PSBT in the `lncli` variant that contains the output(s) and optional
-inputs. Another difference is that for the `--outputs` flag, `lncli` expects the
-amounts to be in satoshis instead of fractions of a bitcoin.
-
-### Simple example: fund PSBT that sends to address
-
-Let's start with a very simple example and assume we want to send half a coin
-to the address `bcrt1qjrdns4f5zwkv29ln86plqzs092yd5fg6nsz8re`:
-
-```shell script
-$ lncli wallet psbt fund --outputs='{"bcrt1qjrdns4f5zwkv29ln86plqzs092yd5fg6nsz8re":50000000}'
-
-{
- "psbt": "cHNidP8BAHECAAAAAeJQY2VLRtutKgQYFUajEKpjFfl0Uyrm6x23OumDpe/4AQAAAAD/////AkxREgEAAAAAFgAUv6pTgbKHN60CZ+RQn5yOuH6c2WiA8PoCAAAAABYAFJDbOFU0E6zFF/M+g/AKDyqI2iUaAAAAAAABAOsCAAAAAAEBbxqXgEf9DlzcqqNM610s5pL1X258ra6+KJ22etb7HAcBAAAAAAAAAAACACT0AAAAAAAiACC7U1W0iJGhQ6o7CexDh5k36V6v3256xpA9/xmB2BybTFZdDQQAAAAAFgAUKp2ThzhswyM2QHlyvmMB6tQB7V0CSDBFAiEA4Md8RIZYqFdUPsgDyomlzMJL9bJ6Ho23JGTihXtEelgCIAeNXRLyt88SOuuWFVn3IodCE4U5D6DojIHesRmikF28ASEDHYFzMEAxfmfq98eSSnZtUwb1w7mAtHG65y8qiRFNnIkAAAAAAQEfVl0NBAAAAAAWABQqnZOHOGzDIzZAeXK+YwHq1AHtXQEDBAEAAAAAAAA=",
- "change_output_index": 0,
- "locks": [
- {
- "id": "ede19a92ed321a4705f8a1cccc1d4f6182545d4bb4fae08bd5937831b7e38f98",
- "outpoint": "f8efa583e93ab71debe62a5374f91563aa10a3461518042aaddb464b656350e2:1",
- "expiration": 1601553408
- }
- ]
-}
-```
-
-The first thing we notice in the response is that an outpoint was locked.
-That means, the UTXO that was chosen to fund the PSBT is currently locked and
-cannot be used by the internal wallet or any other RPC call. This lock will be
-released automatically either after 10 minutes (timeout) or once a transaction
-that spends the UTXO is published.
-
-If we inspect the PSBT that was created, we see that the locked input was indeed
-selected, the UTXO information was attached and a change output (at index 0) was
-created as well:
-
-```shell script
-$ bitcoin-cli decodepsbt cHNidP8BAHECAAAAAeJQY2VLRtutKgQYFUajEKpjFfl0Uyrm6x23OumDpe/4AQAAAAD/////AkxREgEAAAAAFgAUv6pTgbKHN60CZ+RQn5yOuH6c2WiA8PoCAAAAABYAFJDbOFU0E6zFF/M+g/AKDyqI2iUaAAAAAAABAOsCAAAAAAEBbxqXgEf9DlzcqqNM610s5pL1X258ra6+KJ22etb7HAcBAAAAAAAAAAACACT0AAAAAAAiACC7U1W0iJGhQ6o7CexDh5k36V6v3256xpA9/xmB2BybTFZdDQQAAAAAFgAUKp2ThzhswyM2QHlyvmMB6tQB7V0CSDBFAiEA4Md8RIZYqFdUPsgDyomlzMJL9bJ6Ho23JGTihXtEelgCIAeNXRLyt88SOuuWFVn3IodCE4U5D6DojIHesRmikF28ASEDHYFzMEAxfmfq98eSSnZtUwb1w7mAtHG65y8qiRFNnIkAAAAAAQEfVl0NBAAAAAAWABQqnZOHOGzDIzZAeXK+YwHq1AHtXQEDBAEAAAAAAAA=
-{
- "tx": {
- "txid": "33a316d62ddf74656967754d26ea83a3cb89e03ae44578d965156d4b71b1fce7",
- "hash": "33a316d62ddf74656967754d26ea83a3cb89e03ae44578d965156d4b71b1fce7",
- "version": 2,
- "size": 113,
- "vsize": 113,
- "weight": 452,
- "locktime": 0,
- "vin": [
- {
- "txid": "f8efa583e93ab71debe62a5374f91563aa10a3461518042aaddb464b656350e2",
- "vout": 1,
- "scriptSig": {
- "asm": "",
- "hex": ""
- },
- "sequence": 4294967295
- }
- ],
- "vout": [
- {
- "value": 0.17977676,
- "n": 0,
- "scriptPubKey": {
- "asm": "0 bfaa5381b28737ad0267e4509f9c8eb87e9cd968",
- "hex": "0014bfaa5381b28737ad0267e4509f9c8eb87e9cd968",
- "reqSigs": 1,
- "type": "witness_v0_keyhash",
- "addresses": [
- "bcrt1qh7498qdjsum66qn8u3gfl8ywhplfektg6mutfs"
- ]
- }
- },
- {
- "value": 0.50000000,
- "n": 1,
- "scriptPubKey": {
- "asm": "0 90db38553413acc517f33e83f00a0f2a88da251a",
- "hex": "001490db38553413acc517f33e83f00a0f2a88da251a",
- "reqSigs": 1,
- "type": "witness_v0_keyhash",
- "addresses": [
- "bcrt1qjrdns4f5zwkv29ln86plqzs092yd5fg6nsz8re"
- ]
- }
- }
- ]
- },
- "unknown": {
- },
- "inputs": [
- {
- "witness_utxo": {
-...
- },
- "non_witness_utxo": {
- ...
- },
- "sighash": "ALL"
- }
- ],
- "outputs": [
-...
- ],
- "fee": 0.00007050
-}
-```
-
-### Advanced example: fund PSBT with manual coin selection
-
-Let's now look at how we can implement manual coin selection by using the `fund`
-command. We again want to send half a coin to
-`bcrt1qjrdns4f5zwkv29ln86plqzs092yd5fg6nsz8re` but we want to select our inputs
-manually.
-
-The first step is to look at all available UTXOs and choose. To do so, we use
-the `listunspent` command:
-
-```shell script
-$ lncli listunspent
-
-{
- "utxos": [
- {
- "address_type": 0,
- "address": "bcrt1qmsq36rtc6ap3m0m6jryu0ez923et6kxrv46t4w",
- "amount_sat": 100000000,
- "pk_script": "0014dc011d0d78d7431dbf7a90c9c7e4455472bd58c3",
- "outpoint": "3597b451ff56bc901eb806e8c644a004e934b4c208679756b4cddc455c768c48:1",
- "confirmations": 6
- },
- {
- "address_type": 0,
- "address": "bcrt1q92we8pecdnpjxdjq09etuccpat2qrm2acu4256",
- "amount_sat": 67984726,
- "pk_script": "00142a9d9387386cc32336407972be6301ead401ed5d",
- "outpoint": "f8efa583e93ab71debe62a5374f91563aa10a3461518042aaddb464b656350e2:1",
- "confirmations": 24
- },
-...
- ]
-}
-```
-
-Next, we choose these two inputs and create the PSBT:
-
-```shell script
-$ lncli wallet psbt fund --outputs='{"bcrt1qjrdns4f5zwkv29ln86plqzs092yd5fg6nsz8re":50000000}' \
- --inputs='["3597b451ff56bc901eb806e8c644a004e934b4c208679756b4cddc455c768c48:1","f8efa583e93ab71debe62a5374f91563aa10a3461518042aaddb464b656350e2:1"]'
-
-{
- "psbt": "cHNidP8BAJoCAAAAAkiMdlxF3M20VpdnCMK0NOkEoETG6Aa4HpC8Vv9RtJc1AQAAAAAAAAAA4lBjZUtG260qBBgVRqMQqmMV+XRTKubrHbc66YOl7/gBAAAAAAAAAAACgPD6AgAAAAAWABSQ2zhVNBOsxRfzPoPwCg8qiNolGtIkCAcAAAAAFgAUuvRP5r7qAvj0egDxyX9/FH+vukgAAAAAAAEA3gIAAAAAAQEr9IZcho/gV/6fH8C8P+yhNRZP+l3YuxsyatdYcS0S6AEAAAAA/v///wLI/8+yAAAAABYAFDXoRFwgXNO5VVtVq2WpaENh6blAAOH1BQAAAAAWABTcAR0NeNdDHb96kMnH5EVUcr1YwwJHMEQCIDqugtYLp4ebJAZvOdieshLi1lLuPl2tHQG4jM4ybwEGAiBeMpCkbHBmzYvljxb1JBQyVAMuoco0xIfi+5OQdHuXaAEhAnH96NhTW09X0npE983YBsHUoMPI4U4xBtHenpZVTEqpVwAAAAEBHwDh9QUAAAAAFgAU3AEdDXjXQx2/epDJx+RFVHK9WMMBAwQBAAAAAAEA6wIAAAAAAQFvGpeAR/0OXNyqo0zrXSzmkvVfbnytrr4onbZ61vscBwEAAAAAAAAAAAIAJPQAAAAAACIAILtTVbSIkaFDqjsJ7EOHmTfpXq/fbnrGkD3/GYHYHJtMVl0NBAAAAAAWABQqnZOHOGzDIzZAeXK+YwHq1AHtXQJIMEUCIQDgx3xEhlioV1Q+yAPKiaXMwkv1snoejbckZOKFe0R6WAIgB41dEvK3zxI665YVWfcih0IThTkPoOiMgd6xGaKQXbwBIQMdgXMwQDF+Z+r3x5JKdm1TBvXDuYC0cbrnLyqJEU2ciQAAAAABAR9WXQ0EAAAAABYAFCqdk4c4bMMjNkB5cr5jAerUAe1dAQMEAQAAAAAAAA==",
- "change_output_index": 1,
- "locks": [
- {
- "id": "ede19a92ed321a4705f8a1cccc1d4f6182545d4bb4fae08bd5937831b7e38f98",
- "outpoint": "3597b451ff56bc901eb806e8c644a004e934b4c208679756b4cddc455c768c48:1",
- "expiration": 1601560626
- },
- {
- "id": "ede19a92ed321a4705f8a1cccc1d4f6182545d4bb4fae08bd5937831b7e38f98",
- "outpoint": "f8efa583e93ab71debe62a5374f91563aa10a3461518042aaddb464b656350e2:1",
- "expiration": 1601560626
- }
- ]
-}
-```
-
-Inspecting this PSBT, we notice that the two inputs were chosen and a large
-change change output was added at index 1:
-
-```shell script
-$ bitcoin-cli decodepsbt cHNidP8BAJoCAAAAAkiMdlxF3M20VpdnCMK0NOkEoETG6Aa4HpC8Vv9RtJc1AQAAAAAAAAAA4lBjZUtG260qBBgVRqMQqmMV+XRTKubrHbc66YOl7/gBAAAAAAAAAAACgPD6AgAAAAAWABSQ2zhVNBOsxRfzPoPwCg8qiNolGtIkCAcAAAAAFgAUuvRP5r7qAvj0egDxyX9/FH+vukgAAAAAAAEA3gIAAAAAAQEr9IZcho/gV/6fH8C8P+yhNRZP+l3YuxsyatdYcS0S6AEAAAAA/v///wLI/8+yAAAAABYAFDXoRFwgXNO5VVtVq2WpaENh6blAAOH1BQAAAAAWABTcAR0NeNdDHb96kMnH5EVUcr1YwwJHMEQCIDqugtYLp4ebJAZvOdieshLi1lLuPl2tHQG4jM4ybwEGAiBeMpCkbHBmzYvljxb1JBQyVAMuoco0xIfi+5OQdHuXaAEhAnH96NhTW09X0npE983YBsHUoMPI4U4xBtHenpZVTEqpVwAAAAEBHwDh9QUAAAAAFgAU3AEdDXjXQx2/epDJx+RFVHK9WMMBAwQBAAAAAAEA6wIAAAAAAQFvGpeAR/0OXNyqo0zrXSzmkvVfbnytrr4onbZ61vscBwEAAAAAAAAAAAIAJPQAAAAAACIAILtTVbSIkaFDqjsJ7EOHmTfpXq/fbnrGkD3/GYHYHJtMVl0NBAAAAAAWABQqnZOHOGzDIzZAeXK+YwHq1AHtXQJIMEUCIQDgx3xEhlioV1Q+yAPKiaXMwkv1snoejbckZOKFe0R6WAIgB41dEvK3zxI665YVWfcih0IThTkPoOiMgd6xGaKQXbwBIQMdgXMwQDF+Z+r3x5JKdm1TBvXDuYC0cbrnLyqJEU2ciQAAAAABAR9WXQ0EAAAAABYAFCqdk4c4bMMjNkB5cr5jAerUAe1dAQMEAQAAAAAAAA==
-
-{
-"tx": {
- "txid": "e62356b99c3097eaa1241ff8e39b996917e66b13e4c0ccba3698982d746c3b76",
- "hash": "e62356b99c3097eaa1241ff8e39b996917e66b13e4c0ccba3698982d746c3b76",
- "version": 2,
- "size": 154,
- "vsize": 154,
- "weight": 616,
- "locktime": 0,
- "vin": [
- {
- "txid": "3597b451ff56bc901eb806e8c644a004e934b4c208679756b4cddc455c768c48",
- "vout": 1,
- "scriptSig": {
- "asm": "",
- "hex": ""
- },
- "sequence": 0
- },
- {
- "txid": "f8efa583e93ab71debe62a5374f91563aa10a3461518042aaddb464b656350e2",
- "vout": 1,
- "scriptSig": {
- "asm": "",
- "hex": ""
- },
- "sequence": 0
- }
- ],
- "vout": [
- {
- "value": 0.50000000,
- "n": 0,
- "scriptPubKey": {
- "asm": "0 90db38553413acc517f33e83f00a0f2a88da251a",
- "hex": "001490db38553413acc517f33e83f00a0f2a88da251a",
- "reqSigs": 1,
- "type": "witness_v0_keyhash",
- "addresses": [
- "bcrt1qjrdns4f5zwkv29ln86plqzs092yd5fg6nsz8re"
- ]
- }
- },
- {
- "value": 1.17974226,
- "n": 1,
- "scriptPubKey": {
- "asm": "0 baf44fe6beea02f8f47a00f1c97f7f147fafba48",
- "hex": "0014baf44fe6beea02f8f47a00f1c97f7f147fafba48",
- "reqSigs": 1,
- "type": "witness_v0_keyhash",
- "addresses": [
- "bcrt1qht6yle47agp03ar6qrcujlmlz3l6lwjgjv36zl"
- ]
- }
- }
- ]
-},
-"unknown": {
-},
-"inputs": [
-...
-],
-"outputs": [
-...
-],
-"fee": 0.00010500
-}
-```
-
-## Signing and finalizing a PSBT
-
-Assuming we now want to sign the transaction that we created in the previous
-example, we simply pass it to the `finalize` sub command of the wallet:
-
-```shell script
-$ lncli wallet psbt finalize cHNidP8BAJoCAAAAAkiMdlxF3M20VpdnCMK0NOkEoETG6Aa4HpC8Vv9RtJc1AQAAAAAAAAAA4lBjZUtG260qBBgVRqMQqmMV+XRTKubrHbc66YOl7/gBAAAAAAAAAAACgPD6AgAAAAAWABSQ2zhVNBOsxRfzPoPwCg8qiNolGtIkCAcAAAAAFgAUuvRP5r7qAvj0egDxyX9/FH+vukgAAAAAAAEA3gIAAAAAAQEr9IZcho/gV/6fH8C8P+yhNRZP+l3YuxsyatdYcS0S6AEAAAAA/v///wLI/8+yAAAAABYAFDXoRFwgXNO5VVtVq2WpaENh6blAAOH1BQAAAAAWABTcAR0NeNdDHb96kMnH5EVUcr1YwwJHMEQCIDqugtYLp4ebJAZvOdieshLi1lLuPl2tHQG4jM4ybwEGAiBeMpCkbHBmzYvljxb1JBQyVAMuoco0xIfi+5OQdHuXaAEhAnH96NhTW09X0npE983YBsHUoMPI4U4xBtHenpZVTEqpVwAAAAEBHwDh9QUAAAAAFgAU3AEdDXjXQx2/epDJx+RFVHK9WMMBAwQBAAAAAAEA6wIAAAAAAQFvGpeAR/0OXNyqo0zrXSzmkvVfbnytrr4onbZ61vscBwEAAAAAAAAAAAIAJPQAAAAAACIAILtTVbSIkaFDqjsJ7EOHmTfpXq/fbnrGkD3/GYHYHJtMVl0NBAAAAAAWABQqnZOHOGzDIzZAeXK+YwHq1AHtXQJIMEUCIQDgx3xEhlioV1Q+yAPKiaXMwkv1snoejbckZOKFe0R6WAIgB41dEvK3zxI665YVWfcih0IThTkPoOiMgd6xGaKQXbwBIQMdgXMwQDF+Z+r3x5JKdm1TBvXDuYC0cbrnLyqJEU2ciQAAAAABAR9WXQ0EAAAAABYAFCqdk4c4bMMjNkB5cr5jAerUAe1dAQMEAQAAAAAAAA==
-
-{
- "psbt": "cHNidP8BAJoCAAAAAkiMdlxF3M20VpdnCMK0NOkEoETG6Aa4HpC8Vv9RtJc1AQAAAAAAAAAA4lBjZUtG260qBBgVRqMQqmMV+XRTKubrHbc66YOl7/gBAAAAAAAAAAACgPD6AgAAAAAWABSQ2zhVNBOsxRfzPoPwCg8qiNolGtIkCAcAAAAAFgAUuvRP5r7qAvj0egDxyX9/FH+vukgAAAAAAAEA3gIAAAAAAQEr9IZcho/gV/6fH8C8P+yhNRZP+l3YuxsyatdYcS0S6AEAAAAA/v///wLI/8+yAAAAABYAFDXoRFwgXNO5VVtVq2WpaENh6blAAOH1BQAAAAAWABTcAR0NeNdDHb96kMnH5EVUcr1YwwJHMEQCIDqugtYLp4ebJAZvOdieshLi1lLuPl2tHQG4jM4ybwEGAiBeMpCkbHBmzYvljxb1JBQyVAMuoco0xIfi+5OQdHuXaAEhAnH96NhTW09X0npE983YBsHUoMPI4U4xBtHenpZVTEqpVwAAAAEBHwDh9QUAAAAAFgAU3AEdDXjXQx2/epDJx+RFVHK9WMMBCGwCSDBFAiEAuiv52IX5wZlYJqqVGsQPfeQ/kneCNRD34v5yplNpuMYCIECHVUhjHPKSiWSsYEKD4JWGAyUwQHgDytA1whFOyLclASECg7PDfGE/uURta5/R42Vso6QKmVAgYMhjWlXENkE/x+QAAQDrAgAAAAABAW8al4BH/Q5c3KqjTOtdLOaS9V9ufK2uviidtnrW+xwHAQAAAAAAAAAAAgAk9AAAAAAAIgAgu1NVtIiRoUOqOwnsQ4eZN+ler99uesaQPf8Zgdgcm0xWXQ0EAAAAABYAFCqdk4c4bMMjNkB5cr5jAerUAe1dAkgwRQIhAODHfESGWKhXVD7IA8qJpczCS/Wyeh6NtyRk4oV7RHpYAiAHjV0S8rfPEjrrlhVZ9yKHQhOFOQ+g6IyB3rEZopBdvAEhAx2BczBAMX5n6vfHkkp2bVMG9cO5gLRxuucvKokRTZyJAAAAAAEBH1ZdDQQAAAAAFgAUKp2ThzhswyM2QHlyvmMB6tQB7V0BCGwCSDBFAiEAqK7FSrqWe2non0kl96yu2+gSXGPYPC7ZjzVZEMMWtpYCIGTzCDHZhJYGPrsnBWU8o0Eyd4nBa+6d037xGFcGUYJLASECORgkj75Xu8+DTh8bqYBIvNx1hSxV7VSJOwY6jam6LY8AAAA=",
- "final_tx": "02000000000102488c765c45dccdb456976708c2b434e904a044c6e806b81e90bc56ff51b49735010000000000000000e25063654b46dbad2a04181546a310aa6315f974532ae6eb1db73ae983a5eff80100000000000000000280f0fa020000000016001490db38553413acc517f33e83f00a0f2a88da251ad224080700000000160014baf44fe6beea02f8f47a00f1c97f7f147fafba4802483045022100ba2bf9d885f9c1995826aa951ac40f7de43f9277823510f7e2fe72a65369b8c6022040875548631cf2928964ac604283e09586032530407803cad035c2114ec8b72501210283b3c37c613fb9446d6b9fd1e3656ca3a40a99502060c8635a55c436413fc7e402483045022100a8aec54aba967b69e89f4925f7acaedbe8125c63d83c2ed98f355910c316b696022064f30831d98496063ebb2705653ca341327789c16bee9dd37ef118570651824b0121023918248fbe57bbcf834e1f1ba98048bcdc75852c55ed54893b063a8da9ba2d8f00000000"
-}
-```
-
-That final transaction can now, in theory, be broadcast. But **it is very
-important** that you **do not** publish it manually if any of the involved
-outputs are used to fund a channel. See
-[the safety warning below](#safety-warning) to learn the reason for this.
-
-## Opening a channel by using a PSBT
-
-This is a step-by-step guide on how to open a channel with `lnd` by using a PSBT
-as the funding transaction.
-We will use `bitcoind` to create and sign the transaction just to keep the
-example simple. Of course any other PSBT compatible wallet could be used and the
-process would likely be spread out over multiple signing steps. The goal of this
-example is not to cover each and every possible edge case but to help users of
-`lnd` understand what inputs the `lncli` utility expects.
-
-The goal is to open a channel of 1'234'567 satoshis to the node
-`03db1e56e5f76bc4018cf6f03d1bb98a7ae96e3f18535e929034f85e7f1ca2b8ac` by using
-a PSBT. That means, `lnd` can have a wallet balance of `0` and is still able to
-open a channel. We'll jump into an example right away.
-
-The new funding flow has a small caveat: _Time matters_.
-
-When opening a channel using the PSBT flow, we start the negotiation
-with the remote peer immediately so we can obtain their multisig key they are
-going to use for the channel. Then we pause the whole process until we get a
-fully signed transaction back from the user. Unfortunately there is no reliable
-way to know after how much time the remote node starts to clean up and "forgets"
-about the pending channel. If the remote node is an `lnd` node, we know it's
-after 10 minutes. **So as long as the whole process takes less than 10 minutes,
-everything should work fine.**
-
-### Safety warning
-
-**DO NOT PUBLISH** the finished transaction by yourself or with another tool.
-lnd MUST publish it in the proper funding flow order **OR THE FUNDS CAN BE
-LOST**!
-
-This is very important to remember when using wallets like `Wasabi` for
-instance, where the "publish" button is very easy to hit by accident.
-
-### 1. Use the new `--psbt` flag in `lncli openchannel`
-
-The new `--psbt` flag in the `openchannel` command starts an interactive dialog
-between `lncli` and the user. Below the command you see an example output from
-a regtest setup. Of course all values will be different.
-
-```shell script
-$ lncli openchannel --node_key 03db1e56e5f76bc4018cf6f03d1bb98a7ae96e3f18535e929034f85e7f1ca2b8ac --local_amt 1234567 --psbt
-
-Starting PSBT funding flow with pending channel ID fc7853889a04d33b8115bd79ebc99c5eea80d894a0bead40fae5a06bcbdccd3d.
-PSBT funding initiated with peer 03db1e56e5f76bc4018cf6f03d1bb98a7ae96e3f18535e929034f85e7f1ca2b8ac.
-Please create a PSBT that sends 0.01234567 BTC (1234567 satoshi) to the funding address bcrt1qh33ghvgjj3ef625nl9jxz6nnrz2z9e65vsdey7w5msrklgr6rc0sv0s08q.
-
-Example with bitcoind:
- bitcoin-cli walletcreatefundedpsbt [] '[{"bcrt1qh33ghvgjj3ef625nl9jxz6nnrz2z9e65vsdey7w5msrklgr6rc0sv0s08q":0.01234567}]'
-
-Or if you are using a wallet that can fund a PSBT directly (currently not
-possible with bitcoind), you can use this PSBT that contains the same address
-and amount: cHNidP8BADUCAAAAAAGH1hIAAAAAACIAILxii7ESlHKdKpP5ZGFqcxiUIudUZBuSedTcB2+geh4fAAAAAAAA
-
-Paste the funded PSBT here to continue the funding flow.
-Base64 encoded PSBT:
-```
-
-The command line now waits until a PSBT is entered. We'll create one in the next
-step. Make sure to use a new shell window/tab for the next commands and leave
-the prompt from the `openchannel` running as is.
-
-### 2a. Use `bitcoind` to create a funding transaction
-
-The output of the last command already gave us an example command to use with
-`bitcoind`. We'll go ahead and execute it now. The meaning of this command is
-something like "bitcoind, give me a PSBT that sends the given amount to the
-given address, choose any input you see fit":
-
-```shell script
-$ bitcoin-cli walletcreatefundedpsbt [] '[{"bcrt1qh33ghvgjj3ef625nl9jxz6nnrz2z9e65vsdey7w5msrklgr6rc0sv0s08q":0.01234567}]'
-
-{
- "psbt": "cHNidP8BAH0CAAAAAbxLLf9+AYfqfF69QAQuETnL6cas7GDiWBZF+3xxc/Y/AAAAAAD+////AofWEgAAAAAAIgAgvGKLsRKUcp0qk/lkYWpzGJQi51RkG5J51NwHb6B6Hh+1If0jAQAAABYAFL+6THEGhybJnOkFGSRFbtCcPOG8AAAAAAABAR8wBBAkAQAAABYAFHemJ11XF7CU7WXBIJLD/qZF+6jrAAAA",
- "fee": 0.00003060,
- "changepos": 1
-}
-```
-
-We see that `bitcoind` has given us a transaction that would pay `3060` satoshi
-in fees. Fee estimation/calculation can be changed with parameters of the
-`walletcreatefundedpsbt` command. To see all options, use
-`bitcoin-cli help walletcreatefundedpsbt`.
-
-If we want to know what exactly is in this PSBT, we can look at it with the
-`decodepsbt` command:
-
-```shell script
-$ bitcoin-cli decodepsbt cHNidP8BAH0CAAAAAbxLLf9+AYfqfF69QAQuETnL6cas7GDiWBZF+3xxc/Y/AAAAAAD+////AofWEgAAAAAAIgAgvGKLsRKUcp0qk/lkYWpzGJQi51RkG5J51NwHb6B6Hh+1If0jAQAAABYAFL+6THEGhybJnOkFGSRFbtCcPOG8AAAAAAABAR8wBBAkAQAAABYAFHemJ11XF7CU7WXBIJLD/qZF+6jrAAAA
-
-{
- "tx": {
- "txid": "374504e4246a93a45b4a2c2bc31d8adc8525aa101c7b9065db6dc01c4bdfce0a",
- "hash": "374504e4246a93a45b4a2c2bc31d8adc8525aa101c7b9065db6dc01c4bdfce0a",
- "version": 2,
- "size": 125,
- "vsize": 125,
- "weight": 500,
- "locktime": 0,
- "vin": [
- {
- "txid": "3ff673717cfb451658e260ecacc6e9cb39112e0440bd5e7cea87017eff2d4bbc",
- "vout": 0,
- "scriptSig": {
- "asm": "",
- "hex": ""
- },
- "sequence": 4294967294
- }
- ],
- "vout": [
- {
- "value": 0.01234567,
- "n": 0,
- "scriptPubKey": {
- "asm": "0 bc628bb11294729d2a93f964616a73189422e754641b9279d4dc076fa07a1e1f",
- "hex": "0020bc628bb11294729d2a93f964616a73189422e754641b9279d4dc076fa07a1e1f",
- "reqSigs": 1,
- "type": "witness_v0_scripthash",
- "addresses": [
- "bcrt1qh33ghvgjj3ef625nl9jxz6nnrz2z9e65vsdey7w5msrklgr6rc0sv0s08q"
- ]
- }
- },
- {
- "value": 48.98759093,
- "n": 1,
- "scriptPubKey": {
- "asm": "0 bfba4c71068726c99ce9051924456ed09c3ce1bc",
- "hex": "0014bfba4c71068726c99ce9051924456ed09c3ce1bc",
- "reqSigs": 1,
- "type": "witness_v0_keyhash",
- "addresses": [
- "bcrt1qh7aycugxsunvn88fq5vjg3tw6zwrecduvvgre5"
- ]
- }
- }
- ]
- },
- "unknown": {
- },
- "inputs": [
- {
- "witness_utxo": {
- "amount": 48.99996720,
- "scriptPubKey": {
- "asm": "0 77a6275d5717b094ed65c12092c3fea645fba8eb",
- "hex": "001477a6275d5717b094ed65c12092c3fea645fba8eb",
- "type": "witness_v0_keyhash",
- "address": "bcrt1qw7nzwh2hz7cffmt9cysf9sl75ezlh28tzl4n4e"
- }
- }
- }
- ],
- "outputs": [
- {
- },
- {
- }
- ],
- "fee": 0.00003060
-}
-```
-
-This tells us that we got a PSBT with a big input, the channel output and a
-change output for the rest. Everything is there but the signatures/witness data,
-which is exactly what we need.
-
-### 2b. Use `lnd` to create a funding transaction
-
-Starting with version `v0.12.0`, `lnd` can also create PSBTs. This assumes a
-scenario where one instance of `lnd` only has public keys (watch only mode) and
-a secondary, hardened and firewalled `lnd` instance has the corresponding
-private keys. On the watching only mode, the following command can be used to
-create the funding PSBT:
-
-```shell script
-$ lncli wallet psbt fund --outputs='{"bcrt1qh33ghvgjj3ef625nl9jxz6nnrz2z9e65vsdey7w5msrklgr6rc0sv0s08q":1234567}'
-
-{
- "psbt": "cHNidP8BAH0CAAAAAUiMdlxF3M20VpdnCMK0NOkEoETG6Aa4HpC8Vv9RtJc1AQAAAAD/////AofWEgAAAAAAIgAgvGKLsRKUcp0qk/lkYWpzGJQi51RkG5J51NwHb6B6Hh+X7OIFAAAAABYAFNigOB6EbCLRi+Evlv4r2yJx63NxAAAAAAABAN4CAAAAAAEBK/SGXIaP4Ff+nx/AvD/soTUWT/pd2LsbMmrXWHEtEugBAAAAAP7///8CyP/PsgAAAAAWABQ16ERcIFzTuVVbVatlqWhDYem5QADh9QUAAAAAFgAU3AEdDXjXQx2/epDJx+RFVHK9WMMCRzBEAiA6roLWC6eHmyQGbznYnrIS4tZS7j5drR0BuIzOMm8BBgIgXjKQpGxwZs2L5Y8W9SQUMlQDLqHKNMSH4vuTkHR7l2gBIQJx/ejYU1tPV9J6RPfN2AbB1KDDyOFOMQbR3p6WVUxKqVcAAAABAR8A4fUFAAAAABYAFNwBHQ1410Mdv3qQycfkRVRyvVjDAQMEAQAAAAAAAA==",
- "change_output_index": 1,
- "locks": [
- {
- "id": "ede19a92ed321a4705f8a1cccc1d4f6182545d4bb4fae08bd5937831b7e38f98",
- "outpoint": "3597b451ff56bc901eb806e8c644a004e934b4c208679756b4cddc455c768c48:1",
- "expiration": 1601562037
- }
- ]
-}
-```
-
-### 3. Verify and sign the PSBT
-
-Now that we have a valid PSBT that has everything but the final
-signatures/witness data, we can paste it into the prompt in `lncli` that is
-still waiting for our input.
-
-```shell script
-...
-Base64 encoded PSBT: cHNidP8BAH0CAAAAAbxLLf9+AYfqfF69QAQuETnL6cas7GDiWBZF+3xxc/Y/AAAAAAD+////AofWEgAAAAAAIgAgvGKLsRKUcp0qk/lkYWpzGJQi51RkG5J51NwHb6B6Hh+1If0jAQAAABYAFL+6THEGhybJnOkFGSRFbtCcPOG8AAAAAAABAR8wBBAkAQAAABYAFHemJ11XF7CU7WXBIJLD/qZF+6jrAAAA
-
-PSBT verified by lnd, please continue the funding flow by signing the PSBT by
-all required parties/devices. Once the transaction is fully signed, paste it
-again here.
-
-Base64 encoded PSBT:
-```
-
-We can now go ahead and sign the transaction. We are going to use `bitcoind` for
-this again, but in practice this would now happen on a hardware wallet and
-perhaps `bitcoind` would only know the public keys and couldn't sign for the
-transaction itself. Again, this is only an example and can't reflect all
-real-world use cases.
-
-```shell script
-$ bitcoin-cli walletprocesspsbt cHNidP8BAH0CAAAAAbxLLf9+AYfqfF69QAQuETnL6cas7GDiWBZF+3xxc/Y/AAAAAAD+////AofWEgAAAAAAIgAgvGKLsRKUcp0qk/lkYWpzGJQi51RkG5J51NwHb6B6Hh+1If0jAQAAABYAFL+6THEGhybJnOkFGSRFbtCcPOG8AAAAAAABAR8wBBAkAQAAABYAFHemJ11XF7CU7WXBIJLD/qZF+6jrAAAA
-
-{
-"psbt": "cHNidP8BAH0CAAAAAbxLLf9+AYfqfF69QAQuETnL6cas7GDiWBZF+3xxc/Y/AAAAAAD+////AofWEgAAAAAAIgAgvGKLsRKUcp0qk/lkYWpzGJQi51RkG5J51NwHb6B6Hh+1If0jAQAAABYAFL+6THEGhybJnOkFGSRFbtCcPOG8AAAAAAABAR8wBBAkAQAAABYAFHemJ11XF7CU7WXBIJLD/qZF+6jrAQhrAkcwRAIgHKQbenZYvgADRd9TKGVO36NnaIgW3S12OUg8XGtSrE8CICmeaYoJ/U7Ecm+/GneY8i2hu2QCaQnuomJgzn+JAnrDASEDUBmCLcsybA5qXSRBBdZ0Uk/FQiay9NgOpv4D26yeJpAAAAA=",
-"complete": true
-}
-```
-
-If you are using the two `lnd` node model as described in
-[2b](#2b-use-lnd-to-create-a-funding-transaction), you can achieve the same
-result with the following command:
-
-```shell script
-$ lncli wallet psbt finalize cHNidP8BAH0CAAAAAUiMdlxF3M20VpdnCMK0NOkEoETG6Aa4HpC8Vv9RtJc1AQAAAAD/////AofWEgAAAAAAIgAgvGKLsRKUcp0qk/lkYWpzGJQi51RkG5J51NwHb6B6Hh+X7OIFAAAAABYAFNigOB6EbCLRi+Evlv4r2yJx63NxAAAAAAABAN4CAAAAAAEBK/SGXIaP4Ff+nx/AvD/soTUWT/pd2LsbMmrXWHEtEugBAAAAAP7///8CyP/PsgAAAAAWABQ16ERcIFzTuVVbVatlqWhDYem5QADh9QUAAAAAFgAU3AEdDXjXQx2/epDJx+RFVHK9WMMCRzBEAiA6roLWC6eHmyQGbznYnrIS4tZS7j5drR0BuIzOMm8BBgIgXjKQpGxwZs2L5Y8W9SQUMlQDLqHKNMSH4vuTkHR7l2gBIQJx/ejYU1tPV9J6RPfN2AbB1KDDyOFOMQbR3p6WVUxKqVcAAAABAR8A4fUFAAAAABYAFNwBHQ1410Mdv3qQycfkRVRyvVjDAQMEAQAAAAAAAA==
-
-{
- "psbt": "cHNidP8BAH0CAAAAAUiMdlxF3M20VpdnCMK0NOkEoETG6Aa4HpC8Vv9RtJc1AQAAAAD/////AofWEgAAAAAAIgAgvGKLsRKUcp0qk/lkYWpzGJQi51RkG5J51NwHb6B6Hh+X7OIFAAAAABYAFNigOB6EbCLRi+Evlv4r2yJx63NxAAAAAAABAN4CAAAAAAEBK/SGXIaP4Ff+nx/AvD/soTUWT/pd2LsbMmrXWHEtEugBAAAAAP7///8CyP/PsgAAAAAWABQ16ERcIFzTuVVbVatlqWhDYem5QADh9QUAAAAAFgAU3AEdDXjXQx2/epDJx+RFVHK9WMMCRzBEAiA6roLWC6eHmyQGbznYnrIS4tZS7j5drR0BuIzOMm8BBgIgXjKQpGxwZs2L5Y8W9SQUMlQDLqHKNMSH4vuTkHR7l2gBIQJx/ejYU1tPV9J6RPfN2AbB1KDDyOFOMQbR3p6WVUxKqVcAAAABAR8A4fUFAAAAABYAFNwBHQ1410Mdv3qQycfkRVRyvVjDAQhrAkcwRAIgU3Ow7cLkKrg8BJe0U0n9qFLPizqEzY0JtjVlpWOEk14CID/4AFNfgwNENN2LoOs0C6uHgt4sk8rNoZG+VMGzOC/HASECg7PDfGE/uURta5/R42Vso6QKmVAgYMhjWlXENkE/x+QAAAA=",
- "final_tx": "02000000000101488c765c45dccdb456976708c2b434e904a044c6e806b81e90bc56ff51b497350100000000ffffffff0287d6120000000000220020bc628bb11294729d2a93f964616a73189422e754641b9279d4dc076fa07a1e1f97ece20500000000160014d8a0381e846c22d18be12f96fe2bdb2271eb73710247304402205373b0edc2e42ab83c0497b45349fda852cf8b3a84cd8d09b63565a56384935e02203ff800535f83034434dd8ba0eb340bab8782de2c93cacda191be54c1b3382fc701210283b3c37c613fb9446d6b9fd1e3656ca3a40a99502060c8635a55c436413fc7e400000000"
-}
-```
-
-Interpreting the output, we now have a complete, final, and signed transaction
-inside the PSBT.
-
-**!!! WARNING !!!**
-
-**DO NOT PUBLISH** the finished transaction by yourself or with another tool.
-lnd MUST publish it in the proper funding flow order **OR THE FUNDS CAN BE
-LOST**!
-
-Let's give it to `lncli` to continue:
-
-```shell script
-...
-Base64 encoded PSBT: cHNidP8BAH0CAAAAAbxLLf9+AYfqfF69QAQuETnL6cas7GDiWBZF+3xxc/Y/AAAAAAD+////AofWEgAAAAAAIgAgvGKLsRKUcp0qk/lkYWpzGJQi51RkG5J51NwHb6B6Hh+1If0jAQAAABYAFL+6THEGhybJnOkFGSRFbtCcPOG8AAAAAAABAR8wBBAkAQAAABYAFHemJ11XF7CU7WXBIJLD/qZF+6jrAQhrAkcwRAIgHKQbenZYvgADRd9TKGVO36NnaIgW3S12OUg8XGtSrE8CICmeaYoJ/U7Ecm+/GneY8i2hu2QCaQnuomJgzn+JAnrDASEDUBmCLcsybA5qXSRBBdZ0Uk/FQiay9NgOpv4D26yeJpAAAAA=
-{
- "funding_txid": "374504e4246a93a45b4a2c2bc31d8adc8525aa101c7b9065db6dc01c4bdfce0a"
-}
-```
-
-Success! We now have the final transaction ID of the published funding
-transaction. Now we only have to wait for some confirmations, then we can start
-using the freshly created channel.
-
-## Batch opening channels
-
-The PSBT channel funding flow makes it possible to open multiple channels in one
-transaction. This can be achieved by taking the initial PSBT returned by the
-`openchannel` and feed it into the `--base_psbt` parameter of the next
-`openchannel` command. This won't work with `bitcoind` though, as it cannot take
-a PSBT as partial input for the `walletcreatefundedpsbt` command.
-
-However, the `bitcoin-cli` examples from the command line can be combined into
-a single command. For example:
-
-Channel 1:
-```shell script
-$ bitcoin-cli walletcreatefundedpsbt [] '[{"tb1qywvazres587w9wyy8uw03q8j9ek6gc9crwx4jvhqcmew4xzsvqcq3jjdja":0.01000000}]'
-```
-
-Channel 2:
-```shell script
-$ bitcoin-cli walletcreatefundedpsbt [] '[{"tb1q53626fcwwtcdc942zaf4laqnr3vg5gv4g0hakd2h7fw2pmz6428sk3ezcx":0.01000000}]'
-```
-
-Combined command to get batch PSBT:
-```shell script
-$ bitcoin-cli walletcreatefundedpsbt [] '[{"tb1q53626fcwwtcdc942zaf4laqnr3vg5gv4g0hakd2h7fw2pmz6428sk3ezcx":0.01000000},{"tb1qywvazres587w9wyy8uw03q8j9ek6gc9crwx4jvhqcmew4xzsvqcq3jjdja":0.01000000}]'
-```
-
-### Safety warning about batch transactions
-
-As mentioned before, the PSBT channel funding flow works by pausing the funding
-negotiation with the remote peer directly after the multisig keys have been
-exchanged. That means, the channel isn't fully opened yet at the time the PSBT
-is signed. This is fine for a single channel because the signed transaction is
-only published after the counter-signed commitment transactions were exchanged
-and the funds can be spent again by both parties.
-
-When doing batch transactions, **publishing** the whole transaction with
-multiple channel funding outputs **too early could lead to loss of funds**!
-
-For example, let's say we want to open two channels. We call `openchannel --psbt`
-two times, combine the funding addresses as shown above, verify the PSBT, sign
-it and finally paste it into the terminal of the first command. `lnd` then goes
-ahead and finishes the negotiations with peer 1. If successful, `lnd` publishes
-the transaction. In the meantime we paste the same PSBT into the second terminal
-window. But by now, the peer 2 for channel 2 has timed out our funding flow and
-aborts the negotiation. Normally this would be fine, we would just not publish
-the funding transaction. But in the batch case, channel 1 has already published
-the transaction that contains both channel outputs. But because we never got a
-signature from peer 2 to spend the funds now locked in a 2-of-2 multisig, the
-fund are lost (unless peer 2 cooperates in a complicated, manual recovery
-process).
-
-### Use --no_publish for batch transactions
-
-To mitigate the problem described in the section above, when open multiple
-channels in one batch transaction, it is **imperative to use the
-`--no_publish`** flag for each channel but the very last. This prevents the
-full batch transaction to be published before each and every single channel has
-fully completed its funding negotiation.
diff --git a/lnd/docs/recovery.md b/lnd/docs/recovery.md
deleted file mode 100644
index 49ac01a1..00000000
--- a/lnd/docs/recovery.md
+++ /dev/null
@@ -1,365 +0,0 @@
-# Table of Contents
-
-* [Recovering Funds From `lnd` (funds are safu!)](#recovering-funds-from-lnd-funds-are-safu)
- * [On-Chain Recovery](#on-chain-recovery)
- * [24-word Cipher Seeds](#24-word-cipher-seeds)
- * [Wallet and Seed Passphrases](#wallet-and-seed-passphrases)
- * [Starting On-Chain Recovery](#starting-on-chain-recovery)
- * [Forced In-Place Rescan](#forced-in-place-rescan)
- * [Off-Chain Recovery](#off-chain-recovery)
- * [Obtaining SCBs](#obtaining-scbs)
- * [On-Disk `channel.backup`](#on-disk-channelbackup)
- * [Using the `ExportChanBackup` RPC](#using-the-exportchanbackup-rpc)
- * [Streaming Updates via `SubscribeChannelBackups`.](#streaming-updates-via-subscribechannelbackups)
- * [Recovering Using SCBs](#recovering-using-scbs)
-
-# Recovering Funds From `lnd` (funds are safu!)
-
-In this document, we'll go over the various built-in mechanisms for recovering
-funds from `lnd` due to any sort of data loss, or malfunction. Coins in `lnd`
-can exist in one of two pools: on-chain or off-chain. On-chain funds are
-outputs under the control of `lnd` that can be spent immediately, and without
-any auxiliary data. Off-chain funds on the other hand exist within a 2-of-2
-multi-sig output typically referred to as a payment channel. Depending on the
-exact nature of operation of a given `lnd` node, one of these pools of funds
-may be empty.
-
-Fund recovery for `lnd` will require two pieces of data:
- 1. Your 24-word cipher seed
- 2. Your encrypted Static Channel Backup file (or the raw data)
-
-If one is only attempting to recover _on chain_ funds, then only the first item
-is required.
-
-The SCB file is encrypted using a key _derived_ from the user's seed. As a
-result, it cannot be used in isolation.
-
-## On-Chain Recovery
-
-### 24-word Cipher Seeds
-
-When a new `lnd` node is created, it's given a 24-word seed phrase, called an
-[`cipher seed`](https://github.com/lightningnetwork/lnd/tree/master/aezeed).
-The two seed formats look similar, but the only commonality they share are
-using the same default English dictionary. A valid seed phrase obtained over
-the CLI `lncli create` command looks something like:
-```
-!!!YOU MUST WRITE DOWN THIS SEED TO BE ABLE TO RESTORE THE WALLET!!!
-
----------------BEGIN LND CIPHER SEED---------------
- 1. ability 2. noise 3. lift 4. document
- 5. certain 6. month 7. shoot 8. perfect
- 9. matrix 10. mango 11. excess 12. turkey
-13. river 14. pitch 15. fluid 16. rack
-17. drill 18. text 19. buddy 20. pool
-21. soul 22. fatal 23. ship 24. jelly
----------------END LND CIPHER SEED-----------------
-
-!!!YOU MUST WRITE DOWN THIS SEED TO BE ABLE TO RESTORE THE WALLET!!!
-```
-
-### Wallet and Seed Passphrases
-
-During the creation process, users are first prompted to enter a **wallet
-password**:
-```
-Input wallet password:
-Confirm wallet password:
-```
-
-This password is used to _encrypt_ the wallet on disk, which includes any
-derived master private keys or public key data.
-
-Users can also _optionally_ enter a second passphrase which we call the _cipher
-seed passphrase_:
-```
-Your cipher seed can optionally be encrypted.
-Input your passphrase if you wish to encrypt it (or press enter to proceed without a cipher seed passphrase):
-```
-
-If specified, then this will be used to encrypt the cipher seed itself. The
-cipher seed format is unique in that the 24-word phrase is actually a
-_ciphertext_. As a result, there's no standard word list as any arbitrary
-encoding can be used. If a passphrase is specified, then the cipher seed you
-write down is actually an _encryption_ of the entropy used to generate the BIP
-32 root key for the wallet. Unlike a BIP 39 24-word phrase, the cipher seed is
-able to _detect_ incorrect passphrase. BIP 39 on the other hand, will instead
-silently decrypt to a new (likely empty) wallet.
-
-### Starting On-Chain Recovery
-
-The initial entry point to trigger recovery of on-chain funds in the command
-line is the `lncli create` command.
-```
-⛰ lncli create
-```
-
-Next, one can enter a _new_ wallet password to encrypt any newly derived keys
-as a result of the recovery process.
-```
-Input wallet password:
-Confirm wallet password:
-```
-
-Once a new wallet password has been obtained, the user will be prompted for
-their _existing_ cipher seed:
-```
-Input your 24-word mnemonic separated by spaces: ability noise lift document certain month shoot perfect matrix mango excess turkey river pitch fluid rack drill text buddy pool soul fatal ship jelly
-```
-
-If a _cipher seed passphrase_ was used when the seed was created, it MUST be entered now:
-```
-Input your cipher seed passphrase (press enter if your seed doesn't have a passphrase):
-```
-
-Finally, the user has an option to choose a _recovery window_:
-```
-Input an optional address look-ahead used to scan for used keys (default 2500):
-```
-
-The recovery window is a metric that the on-chain rescanner will use to
-determine when all the "used" addresses have been found. If the recovery window
-is two, lnd will fail to find funds in any addresses generated after the point
-in which two consecutive addresses were generated but never used. If an `lnd`
-on-chain wallet was extensively used, then users may want to _increase_ the
-default value.
-
-If all the information provided was valid, then you'll be presented with the
-seed again:
-```
-
-!!!YOU MUST WRITE DOWN THIS SEED TO BE ABLE TO RESTORE THE WALLET!!!
-
----------------BEGIN LND CIPHER SEED---------------
- 1. ability 2. noise 3. lift 4. document
- 5. certain 6. month 7. shoot 8. perfect
- 9. matrix 10. mango 11. excess 12. turkey
-13. river 14. pitch 15. fluid 16. rack
-17. drill 18. text 19. buddy 20. pool
-21. soul 22. fatal 23. ship 24. jelly
----------------END LND CIPHER SEED-----------------
-
-!!!YOU MUST WRITE DOWN THIS SEED TO BE ABLE TO RESTORE THE WALLET!!!
-
-lnd successfully initialized!
-```
-
-In `lnd`'s logs, you should see something along the lines of (irrelevant lines skipped):
-```
-[INF] LNWL: Opened wallet
-[INF] LTND: Wallet recovery mode enabled with address lookahead of 2500 addresses
-[INF] LNWL: RECOVERY MODE ENABLED -- rescanning for used addresses with recovery_window=2500
-[INF] CHBU: Updating backup file at test_lnd3/data/chain/bitcoin/simnet/channel.backup
-[INF] CHBU: Swapping old multi backup file from test_lnd3/data/chain/bitcoin/simnet/temp-dont-use.backup to test_lnd3/data/chain/bitcoin/simnet/channel.backup
-[INF] LNWL: Seed birthday surpassed, starting recovery of wallet from height=748 hash=3032830c812a4a6ea305d8ead13b52e9e69d6400ff3c997970b6f76fbc770920 with recovery-window=2500
-[INF] LNWL: Scanning 1 blocks for recoverable addresses
-[INF] LNWL: Recovered addresses from blocks 748-748
-[INF] LNWL: Started rescan from block 3032830c812a4a6ea305d8ead13b52e9e69d6400ff3c997970b6f76fbc770920 (height 748) for 800 addresses
-[INF] LNWL: Catching up block hashes to height 748, this might take a while
-[INF] LNWL: Done catching up block hashes
-[INF] LNWL: Finished rescan for 800 addresses (synced to block 3032830c812a4a6ea305d8ead13b52e9e69d6400ff3c997970b6f76fbc770920, height 748)
-```
-
-That final line indicates the rescan is complete! If not all funds have
-appeared, then the user may need to _repeat_ the process with a higher recovery
-window. Depending on how old the wallet is (the cipher seed stores the wallet's
-birthday!) and how many addresses were used, the rescan may take anywhere from
-a few minutes to a few hours. To track the recovery progress, one can use the
-command `lncli getrecoveryinfo`. When finished, the following is returned,
-```
-{
- "recovery_mode": true,
- "recovery_finished": true,
- "progress": 1
-}
-```
-
-If the rescan wasn't able to complete fully (`lnd` was shutdown for example),
-then from `lncli unlock`, it's possible to _restart_ the rescan from where it
-left off with the `--recovery-window` argument:
-```
-⛰ lncli unlock --recovery_window=2500
-```
-
-Note that if this argument is not specified, then the wallet will not
-_re-enter_ the recovery mode and may miss funds during the portion of the
-rescan.
-
-### Forced In-Place Rescan
-
-The recovery methods described above assume a clean slate for a node, so
-there's no existing UTXO or key data in the node's database. However, there're
-times when an _existing_ node may want to _manually_ rescan the chain. We have
-a command line flag for that! Just start `lnd` and add the following flag:
-```
-⛰ lnd --reset-wallet-transactions
-```
-
-The `--reset-wallet-transactions` flag will _reset_ the best synced height of
-the wallet back to its birthday, or genesis if the birthday isn't known (for
-some older wallets).
-
-Just run `lnd` with the flag, unlock it, then the wallet should begin
-rescanning. An entry resembling the following will show up in the logs once it's
-complete:
-```
-[INF] LNWL: Finished rescan for 800 addresses (synced to block 3032830c812a4a6ea305d8ead13b52e9e69d6400ff3c997970b6f76fbc770920, height 748)
-```
-
-**Remember to remove the flag once the rescan was completed successfully to
-avoid rescanning again for every restart of lnd**.
-
-## Off-Chain Recovery
-
-After version `v0.6-beta` of `lnd`, the daemon now ships with a new feature
-called Static Channel Backups (SCBs). We call these _static_ as they only need
-to be obtained _once_: when the channel is created. From there on, a backup is
-good until the channel is closed. The backup contains all the information we
-need to initiate the Data Loss Protection (DLP) feature in the protocol, which
-ultimately leads to us recovering the funds from the channel _on-chain_. This
-is a foolproof _safe_ backup mechanism.
-
-We say _safe_, as care has been taken to ensure that there are no foot guns in
-this method of backing up channels, vs doing things like `rsync`ing or copying
-the `channel.db` file periodically. Those methods can be dangerous as one never
-knows if they have the latest state of a channel or not. Instead, we aim to
-provide a simple, safe method to allow users to recover the settled funds in
-their channels in the case of partial or complete data loss. The backups
-themselves are encrypted using a key derived from the user's seed, this way we
-protect privacy of the users channels in the back up state, and ensure that a
-random node can't attempt to import another user's channels.
-
-Given a valid SCB, the user will be able to recover funds that are fully
-settled within their channels. By "fully settled" we mean funds that are in the
-base commitment outputs, and not HTLCs. We can only restore these funds as
-right after the channel is created, as we have all the data required to make a
-backup, but lack information about the future HTLCs that the channel will
-process.
-
-### Obtaining SCBs
-
-#### On-Disk `channel.backup`
-
-There are multiple ways of obtaining SCBs from `lnd`. The most commonly used
-method will likely be via the `channels.backup` file that's stored on-disk
-alongside the rest of the chain data. This is a special file that contains SCB
-entries for _all_ currently open channels. Each time a channel is opened or
-closed, this file is updated on disk in a safe manner (atomic file rename). As
-a result, unlike the `channel.db` file, it's _always_ safe to copy this file
-for backup at ones desired location. The default location on Linux is:
-```
-~/.lnd/data/chain/bitcoin/mainnet/channel.backup
-```
-
-An example of using file system level notification to [copy the backup to a
-distinct volume/partition/drive can be found
-here](https://gist.github.com/alexbosworth/2c5e185aedbdac45a03655b709e255a3).
-
-#### Using the `ExportChanBackup` RPC
-
-Another way to obtain SCBS for all or a target channel is via the new
-`exportchanbackup` `lncli` command:
-```
-⛰ lncli --network=simnet exportchanbackup --chan_point=29be6d259dc71ebdf0a3a0e83b240eda78f9023d8aeaae13c89250c7e59467d5:0
-{
- "chan_point": "29be6d259dc71ebdf0a3a0e83b240eda78f9023d8aeaae13c89250c7e59467d5:0",
- "chan_backup": "02e7b423c8cf11038354732e9696caff9d5ac9720440f70a50ca2b9fcef5d873c8e64d53bdadfe208a86c96c7f31dc4eb370a02631bb02dce6611c435753a0c1f86c9f5b99006457f0dc7ee4a1c19e0d31a1036941d65717a50136c877d66ec80bb8f3e67cee8d9a5cb3f4081c3817cd830a8d0cf851c1f1e03fee35d790e42d98df5b24e07e6d9d9a46a16352e9b44ad412571c903a532017a5bc1ffe1369c123e1e17e1e4d52cc32329aa205d73d57f846389a6e446f612eeb2dcc346e4590f59a4c533f216ee44f09c1d2298b7d6c"
-}
-
-⛰ lncli --network=simnet exportchanbackup --all
-{
- "chan_points": [
- "29be6d259dc71ebdf0a3a0e83b240eda78f9023d8aeaae13c89250c7e59467d5:0"
- ],
- "multi_chan_backup": "fd73e992e5133aa085c8e45548e0189c411c8cfe42e902b0ee2dec528a18fb472c3375447868ffced0d4812125e4361d667b7e6a18b2357643e09bbe7e9110c6b28d74f4f55e7c29e92419b52509e5c367cf2d977b670a2ff7560f5fe24021d246abe30542e6c6e3aa52f903453c3a2389af918249dbdb5f1199aaecf4931c0366592165b10bdd58eaf706d6df02a39d9323a0c65260ffcc84776f2705e4942d89e4dbefa11c693027002c35582d56e295dcf74d27e90873699657337696b32c05c8014911a7ec8eb03bdbe526fe658be8abdf50ab12c4fec9ddeefc489cf817721c8e541d28fbe71e32137b5ea066a9f4e19814deedeb360def90eff2965570aab5fedd0ebfcd783ce3289360953680ac084b2e988c9cbd0912da400861467d7bb5ad4b42a95c2d541653e805cbfc84da401baf096fba43300358421ae1b43fd25f3289c8c73489977592f75bc9f73781f41718a752ab325b70c8eb2011c5d979f6efc7a76e16492566e43d94dbd42698eb06ff8ad4fd3f2baabafded"
-}
-
-⛰ lncli --network=simnet exportchanbackup --all --output_file=channels.backup
-```
-
-As shown above, a user can either: specify a specific channel to backup, backup
-all existing channels, or backup directly to an on-disk file. All backups use
-the same format.
-
-#### Streaming Updates via `SubscribeChannelBackups`
-
-Using the gRPC interace directly, [a new call:
-`SubscribeChannelBackups`](https://api.lightning.community/#subscribechannelbackups).
-This call allows users to receive a new notification each time the underlying
-SCB state changes. This can be used to implement more complex backup
-schemes, compared to the file system notification based approach.
-
-### Recovering Using SCBs
-
-If a node is being created from scratch, then it's possible to pass in an
-existing SCB using the `lncli create` or `lncli unlock` commands:
-```
-⛰ lncli create -multi_file=channels.backup
-```
-
-Alternatively, the `restorechanbackup` command can be used if `lnd` has already
-been created at the time of SCB restoration:
-```
-⛰ lncli restorechanbackup -h
-NAME:
- lncli restorechanbackup - Restore an existing single or multi-channel static channel backup
-
-USAGE:
- lncli restorechanbackup [command options] [--single_backup] [--multi_backup] [--multi_file=]
-
-CATEGORY:
- Channels
-
-DESCRIPTION:
-
- Allows a user to restore a Static Channel Backup (SCB) that was
- obtained either via the exportchanbackup command, or from lnd's
- automatically manged channels.backup file. This command should be used
- if a user is attempting to restore a channel due to data loss on a
- running node restored with the same seed as the node that created the
- channel. If successful, this command will allows the user to recover
- the settled funds stored in the recovered channels.
-
- The command will accept backups in one of three forms:
-
- * A single channel packed SCB, which can be obtained from
- exportchanbackup. This should be passed in hex encoded format.
-
- * A packed multi-channel SCB, which couples several individual
- static channel backups in single blob.
-
- * A file path which points to a packed multi-channel backup within a
- file, using the same format that lnd does in its channels.backup
- file.
-
-
-OPTIONS:
- --single_backup value a hex encoded single channel backup obtained from exportchanbackup
- --multi_backup value a hex encoded multi-channel backup obtained from exportchanbackup
- --multi_file value the path to a multi-channel back up file
-```
-
-Once the process has been initiated, `lnd` will proceed to:
-
- 1. Given the set of channels to recover, the server will then will insert a
- series of "channel shells" into the database. These contain only the
- information required to initiate the DLP (data loss protection) protocol
- and nothing more. As a result, they're marked as "recovered" channels in
- the database, and we'll disallow trying to use them for any other process.
- 2. Once the channel shell is recovered, the
- [chanbackup](https://github.com/lightningnetwork/lnd/tree/master/chanbackup)
- package will attempt to insert a LinkNode that contains all prior
- addresses that we were able to reach the peer at. During the process,
- we'll also insert the edge for that channel (only in the outgoing
- direction) into the database as well.
- 3. lnd will then start up, and as usual attempt to establish connections to
- all peers that we have channels open with. If `lnd` is already running,
- then a new persistent connection attempt will be initiated.
- 4. Once we connect with a peer, we'll then initiate the DLP protocol. The
- remote peer will discover that we've lost data, and then immediately force
- close their channel. Before they do though, they'll send over the channel
- reestablishment handshake message which contains the unrevoked commitment
- point which we need to derive keys (will be fixed in
- BOLT 1.1 by making the key static) to sweep our funds.
- 5. Once the commitment transaction confirms, given information within the SCB
- we'll re-derive all keys we need, and then sweep the funds.
diff --git a/lnd/docs/release.md b/lnd/docs/release.md
deleted file mode 100644
index e6591ea4..00000000
--- a/lnd/docs/release.md
+++ /dev/null
@@ -1,70 +0,0 @@
-# `lnd`'s Reproducible Build System
-
-This package contains the build script that the `lnd` project uses in order to
-build binaries for each new release. As of `go1.13`, with some new build flags,
-binaries are now reproducible, allowing developers to build the binary on
-distinct machines, and end up with a byte-for-byte identical binary. However,
-this wasn't _fully_ solved in `go1.13`, as the build system still includes the
-directory the binary is built into the binary itself. As a result, our scripts
-utilize a work around needed until `go1.13.2`.
-
-## Building a New Release
-
-### macOS/Linux/Windows (WSL)
-
-No prior set up is needed on Linux or macOS is required in order to build the
-release binaries. However, on Windows, the only way to build the release
-binaries at the moment is by using the Windows Subsystem Linux. One can build
-the release binaries following these steps:
-
-1. `git clone https://github.com/lightningnetwork/lnd.git`
-2. `cd lnd`
-3. `make release tag= # is the name of the next release/tag`
-
-This will then create a directory of the form `lnd-` containing archives
-of the release binaries for each supported operating system and architecture,
-and a manifest file containing the hash of each archive.
-
-## Verifying a Release
-
-With `go1.13`, it's now possible for third parties to verify release binaries.
-Before this version of `go`, one had to trust the release manager(s) to build the
-proper binary. With this new system, third parties can now _independently_ run
-the release process, and verify that all the hashes of the release binaries
-match exactly that of the release binaries produced by said third parties.
-
-To verify a release, one must obtain the following tools (many of these come
-installed by default in most Unix systems): `gpg`/`gpg2`, `shashum`, and
-`tar`/`unzip`.
-
-Once done, verifiers can proceed with the following steps:
-
-1. Acquire the archive containing the release binaries for one's specific
- operating system and architecture, and the manifest file along with its
- signature.
-2. Verify the signature of the manifest file with `gpg --verify
- manifest-.txt.sig`. This will require obtaining the PGP keys which
- signed the manifest file, which are included in the release notes.
-3. Recompute the `SHA256` hash of the archive with `shasum -a 256 `,
- locate the corresponding one in the manifest file, and ensure they match
- __exactly__.
-
-At this point, verifiers can use the release binaries acquired if they trust
-the integrity of the release manager(s). Otherwise, one can proceed with the
-guide to verify the release binaries were built properly by obtaining `shasum`
-and `go` (matching the same version used in the release):
-
-4. Extract the release binaries contained within the archive, compute their
- hashes as done above, and note them down.
-5. Ensure `go` is installed, matching the same version as noted in the release
- notes.
-6. Obtain a copy of `lnd`'s source code with `git clone
- https://github.com/lightningnetwork/lnd` and checkout the source code of the
- release with `git checkout `.
-7. Proceed to verify the tag with `git verify-tag ` and compile the
- binaries from source for the intended operating system and architecture with
- `make release sys=OS-ARCH tag=`.
-8. Extract the archive found in the `lnd-` directory created by the
- release script and recompute the `SHA256` hash of the release binaries (lnd
- and lncli) with `shasum -a 256 `. These should match __exactly__
- as the ones noted above.
diff --git a/lnd/docs/rest/websockets.md b/lnd/docs/rest/websockets.md
deleted file mode 100644
index 705a4c73..00000000
--- a/lnd/docs/rest/websockets.md
+++ /dev/null
@@ -1,99 +0,0 @@
-# WebSockets with `lnd`'s REST API
-
-This document describes how streaming response REST calls can be used correctly
-by making use of the WebSocket API.
-
-As an example, we are going to write a simple JavaScript program that subscribes
-to `lnd`'s
-[block notification RPC](https://api.lightning.community/#v2-chainnotifier-register-blocks).
-
-The WebSocket will be kept open as long as `lnd` runs and JavaScript program
-isn't stopped.
-
-## Browser environment
-
-When using WebSockets in a browser, there are certain security limitations of
-what header fields are allowed to be sent. Therefore, the macaroon cannot just
-be added as a `Grpc-Metadata-Macaroon` header field as it would work with normal
-REST calls. The browser will just ignore that header field and not send it.
-
-Instead we have added a workaround in `lnd`'s WebSocket proxy that allows
-sending the macaroon as a WebSocket "protocol":
-
-```javascript
-const host = 'localhost:8080'; // The default REST port of lnd, can be overwritten with --restlisten=ip:port
-const macaroon = '0201036c6e6402eb01030a10625e7e60fd00f5a6f9cd53f33fc82a...'; // The hex encoded macaroon to send
-const initialRequest = { // The initial request to send (see API docs for each RPC).
- hash: "xlkMdV382uNPskw6eEjDGFMQHxHNnZZgL47aVDSwiRQ=", // Just some example to show that all `byte` fields always have to be base64 encoded in the REST API.
- height: 144,
-}
-
-// The protocol is our workaround for sending the macaroon because custom header
-// fields aren't allowed to be sent by the browser when opening a WebSocket.
-const protocolString = 'Grpc-Metadata-Macaroon+' + macaroon;
-
-// Let's now connect the web socket. Notice that all WebSocket open calls are
-// always GET requests. If the RPC expects a call to be POST or DELETE (see API
-// docs to find out), the query parameter "method" can be set to overwrite.
-const wsUrl = 'wss://' + host + '/v2/chainnotifier/register/blocks?method=POST';
-let ws = new WebSocket(wsUrl, protocolString);
-ws.onopen = function (event) {
- // After the WS connection is establishes, lnd expects the client to send the
- // initial message. If an RPC doesn't have any request parameters, an empty
- // JSON object has to be sent as a string, for example: ws.send('{}')
- ws.send(JSON.stringify(initialRequest));
-}
-ws.onmessage = function (event) {
- // We received a new message.
- console.log(event);
-
- // The data we're really interested in is in data and is always a string
- // that needs to be parsed as JSON and always contains a "result" field:
- console.log("Payload: ");
- console.log(JSON.parse(event.data).result);
-}
-ws.onerror = function (event) {
- // An error occured, let's log it to the console.
- console.log(event);
-}
-```
-
-## Node.js environment
-
-With Node.js it is a bit easier to use the streaming response APIs because we
-can set the macaroon header field directly. This is the example from the API
-docs:
-
-```javascript
-// --------------------------
-// Example with websockets:
-// --------------------------
-const WebSocket = require('ws');
-const fs = require('fs');
-const macaroon = fs.readFileSync('LND_DIR/data/chain/bitcoin/simnet/admin.macaroon').toString('hex');
-let ws = new WebSocket('wss://localhost:8080/v2/chainnotifier/register/blocks?method=POST', {
- // Work-around for self-signed certificates.
- rejectUnauthorized: false,
- headers: {
- 'Grpc-Metadata-Macaroon': macaroon,
- },
-});
-let requestBody = {
- hash: "",
- height: "",
-}
-ws.on('open', function() {
- ws.send(JSON.stringify(requestBody));
-});
-ws.on('error', function(err) {
- console.log('Error: ' + err);
-});
-ws.on('message', function(body) {
- console.log(body);
-});
-// Console output (repeated for every message in the stream):
-// {
-// "hash": ,
-// "height": ,
-// }
-```
diff --git a/lnd/docs/ruby-thing.rb b/lnd/docs/ruby-thing.rb
deleted file mode 100644
index 922201fe..00000000
--- a/lnd/docs/ruby-thing.rb
+++ /dev/null
@@ -1,12 +0,0 @@
-#!/usr/bin/env ruby
-
-File.open("INSTALL.md", 'r') do |f|
- f.each_line do |line|
- forbidden_words = ['Table of contents', 'define', 'pragma']
- next if !line.start_with?("#") || forbidden_words.any? { |w| line =~ /#{w}/ }
-
- title = line.gsub("#", "").strip
- href = title.gsub(" ", "-").downcase
- puts " " * (line.count("#")-1) + "* [#{title}](\##{href})"
- end
-end
diff --git a/lnd/docs/safety.md b/lnd/docs/safety.md
deleted file mode 100644
index 6f3d74cc..00000000
--- a/lnd/docs/safety.md
+++ /dev/null
@@ -1,438 +0,0 @@
-# lnd Operational Safety Guidelines
-
-## Table of Contents
-
-* [Overview](#overview)
- - [aezeed](#aezeed)
- - [Wallet password](#wallet-password)
- - [TLS](#tls)
- - [Macaroons](#macaroons)
- - [Static Channel Backups (SCBs)](#static-channel-backups-scbs)
- - [Static remote keys](#static-remote-keys)
-* [Best practices](#best-practices)
- - [aezeed storage](#aezeed-storage)
- - [File based backups](#file-based-backups)
- - [Keeping Static Channel Backups (SCBs) safe](#keeping-static-channel-backups-scb-safe)
- - [Keep `lnd` updated](#keep-lnd-updated)
- - [Zombie channels](#zombie-channels)
- - [Migrating a node to a new device](#migrating-a-node-to-a-new-device)
- - [Migrating a node from clearnet to Tor](#migrating-a-node-from-clearnet-to-tor)
- - [Prevent data corruption](#prevent-data-corruption)
- - [Don't interrupt `lncli` commands](#dont-interrupt-lncli-commands)
- - [Regular accounting/monitoring](#regular-accountingmonitoring)
- - [Pruned bitcoind node](#pruned-bitcoind-node)
- - [The `--noseedbackup` flag](#the---noseedbackup-flag)
-
-## Overview
-
-This chapter describes the security/safety mechanisms that are implemented in
-`lnd`. We encourage every person that is planning on putting mainnet funds into
-a Lightning Network channel using `lnd` to read this guide carefully.
-As of this writing, `lnd` is still in beta and it is considered `#reckless` to
-put any life altering amounts of BTC into the network.
-That said, we constantly put in a lot of effort to make `lnd` safer to use and
-more secure. We will update this documentation with each safety mechanism that
-we implement.
-
-The first part of this document describes the security elements that are used in
-`lnd` and how they work on a high level.
-The second part is a list of best practices that has crystallized from bug
-reports, developer recommendations and experiences from a lot of individuals
-running mainnet `lnd` nodes during the last 18 months and counting.
-
-### aezeed
-
-This is what all the on-chain private keys are derived from. `aezeed` is similar
-to BIP39 as it uses the same word list to encode the seed as a mnemonic phrase.
-But this is where the similarities end, because `aezeed` is _not_ compatible
-with BIP39. The 24 words of `aezeed` encode a 128 bit entropy (the seed itself),
-a wallet birthday (days since BTC genesis block) and a version.
-This data is _encrypted_ with a password using the AEZ cipher suite (hence the
-name). Encrypting the content instead of using the password to derive the HD
-extended root key has the advantage that the password can actually be checked
-for correctness and can also be changed without affecting any of the derived
-keys.
-A BIP for the `aezeed` scheme is being written and should be published soon.
-
-Important to know:
-* As with any bitcoin seed phrase, never reveal this to any person and store
- the 24 words (and the password) in a safe place.
-* You should never run two different `lnd` nodes with the same seed! Even if
- they aren't running at the same time. This will lead to strange/unpredictable
- behavior or even loss of funds. To migrate an `lnd` node to a new device,
- please see the [node migration section](#migrating-a-node-to-a-new-device).
-* For more technical information [see the aezeed README](../aezeed/README.md).
-
-### Wallet password
-
-The wallet password is one of the first things that has to be entered if a new
-wallet is created using `lnd`. It is completely independent from the `aezeed`
-cipher seed passphrase (which is optional). The wallet password is used to
-encrypt the sensitive parts of `lnd`'s databases, currently some parts of
-`wallet.db` and `macaroons.db`. Loss of this password does not necessarily
-mean loss of funds, as long as the `aezeed` passphrase is still available.
-But the node will need to be restored using the
-[SCB restore procedure](recovery.md).
-
-### TLS
-
-By default the two API connections `lnd` offers (gRPC on port 10009 and REST on
-port 8080) use TLS with a self-signed certificate for transport level security.
-Specifying the certificate on the client side (for example `lncli`) is only a
-protection against man-in-the-middle attacks and does not provide any
-authentication. In fact, `lnd` will never even see the certificate that is
-supplied to `lncli` with the `--tlscertpath` argument. `lncli` only uses that
-certificate to verify it is talking to the correct gRPC server.
-If the key/certificate pair (`tls.cert` and `tls.key` in the main `lnd` data
-directory) is missing on startup, a new self-signed key/certificate pair is
-generated. Clients connecting to `lnd` then have to use the new certificate
-to verify they are talking to the correct server.
-
-### Macaroons
-
-Macaroons are used as the main authentication method in `lnd`. A macaroon is a
-cryptographically verifiable token, comparable to a [JWT](https://jwt.io/)
-or other form of API access token. In `lnd` this token consists of a _list of
-permissions_ (what operations does the user of the token have access to) and a
-set of _restrictions_ (e.g. token expiration timestamp, IP address restriction).
-`lnd` does not keep track of the individual macaroons issued, only the key that
-was used to create (and later verify) them. That means, individual tokens cannot
-currently be invalidated, only all of them at once.
-See the [high-level macaroons documentation](macaroons.md) or the [technical
-README](../macaroons/README.md) for more information.
-
-Important to know:
-* Deleting the `*.macaroon` files in the `/data/chain/bitcoin/mainnet/`
- folder will trigger `lnd` to recreate the default macaroons. But this does
- **NOT** invalidate clients that use an old macaroon. To make sure all
- previously generated macaroons are invalidated, the `macaroons.db` has to be
- deleted as well as all `*.macaroon`.
-
-### Static Channel Backups (SCBs)
-
-A Static Channel Backup is a piece of data that contains all _static_
-information about a channel, like funding transaction, capacity, key derivation
-paths, remote node public key, remote node last known network addresses and
-some static settings like CSV timeout and min HTLC setting.
-Such a backup can either be obtained as a file containing entries for multiple
-channels or by calling RPC methods to get individual (or all) channel data.
-See the section on [keeping SCBs safe](#keeping-static-channel-backups-scb-safe)
-for more information.
-
-What the SCB does **not** contain is the current channel balance (or the
-associated commitment transaction). So how can a channel be restored using
-SCBs?
-That's the important part: _A channel cannot be restored using SCBs_, but the
-funds that are in the channel can be claimed. The restore procedure relies on
-the Data Loss Prevention (DLP) protocol which works by connecting to the remote
-node and asking them to **force close** the channel and hand over the needed
-information to sweep the on-chain funds that belong to the local node.
-Because of this, [restoring a node from SCB](recovery.md) should be seen as an
-emergency measure as all channels will be closed and on-chain fees incur to the
-party that opened the channel initially.
-To migrate an existing, working node to a new device, SCBs are _not_ the way to
-do it. See the section about
-[migrating a node](#migrating-a-node-to-a-new-device) on how to do it correctly.
-
-Important to know:
-* [Restoring a node from SCB](recovery.md) will force-close all channels
- contained in that file.
-* Restoring a node from SCB relies on the remote node of each channel to be
- online and respond to the DLP protocol. That's why it's important to
- [get rid of zombie channels](#zombie-channels) because they cannot be
- recovered using SCBs.
-* The SCB data is encrypted with a key from the seed the node was created with.
- A node can therefore only be restored from SCB if the seed is also known.
-
-### Static remote keys
-
-Since version `v0.8.0-beta`, `lnd` supports the `option_static_remote_key` (also
-known as "safu commitments"). All new channels will be opened with this option
-enabled by default, if the other node also supports it.
-In essence, this change makes it possible for a node to sweep their channel
-funds if the remote node force-closes, without any further communication between
-the nodes. Previous to this change, your node needed to get a random channel
-secret (called the `per_commit_point`) from the remote node even if they
-force-closed the channel, which could make recovery very difficult.
-
-## Best practices
-
-### aezeed storage
-
-When creating a new wallet, `lnd` will print out 24 words to write down, which
-is the wallet's seed (in the [aezeed](#aezeed) format). That seed is optionally
-encrypted with a passphrase, also called the _cipher seed passphrase_.
-It is absolutely important to write both the seed and, if set, the password down
-and store it in a safe place as **there is no way of exporting the seed from an
-lnd wallet**. When creating the wallet, after printing the seed to the command
-line, it is hashed and only the hash (or to be more exact, the BIP32 extended
-root key) is stored in the `wallet.db` file.
-There is
-[a tool being worked on](https://github.com/lightningnetwork/lnd/pull/2373)
-that can extract the BIP32 extended root key but currently you cannot restore
-lnd with only this root key.
-
-Important to know:
-* Setting a password/passphrase for the aezeed is meant to protect it from
- an attacker that finds the paper/storage device. Writing down the password
- alongside the 24 seed words does not enhance the security in any way.
- Therefore the password should be stored in a separate place.
-
-### File based backups
-
-There is a lot of confusion and also some myths about how to best backup the
-off-chain funds of an `lnd` node. Making a mistake here is also still the single
-biggest risk of losing off-chain funds, even though we do everything to mitigate
-those risks.
-
-**What files can/should I regularly backup?**
-The single most important file that needs to be backed up whenever it changes
-is the `/data/chain/bitcoin/mainnet/channel.backup` file which holds
-the Static Channel Backups (SCBs). This file is only updated every time `lnd`
-starts, a channel is opened or a channel is closed.
-
-Most consumer Lightning wallet apps upload the file to the cloud automatically.
-
-See the [SCB chapter](#static-channel-backups-scbs) for more
-information on how to use the file to restore channels.
-
-**What files should never be backed up to avoid problems?**
-This is a bit of a trick question, as making the backup is not the problem.
-Restoring/using an old version of a specific file called
-`/data/graph/mainnet/channel.db` is what is very risky and should
-_never_ be done!
-This requires some explanation:
-The way LN channels are currently set up (until `eltoo` is implemented) is that
-both parties agree on a current balance. To make sure none of the two peers in
-a channel ever try to publish an old state of that balance, they both hand over
-their keys to the other peer that gives them the means to take _all_ funds (not
-just their agreed upon part) from a channel, if an _old_ state is ever
-published. Therefore, having an old state of a channel basically means
-forfeiting the balance to the other party.
-
-As payments in `lnd` can be made multiple times a second, it's very hard to
-make a backup of the channel database every time it is updated. And even if it
-can be technically done, the confidence that a particular state is certainly the
-most up-to-date can never be very high. That's why the focus should be on
-[making sure the channel database is not corrupted](#prevent-data-corruption),
-[closing out the zombie channels](#zombie-channels) and keeping your SCBs safe.
-
-### Keeping Static Channel Backups (SCB) safe
-
-As mentioned in the previous chapter, there is a file where `lnd` stores and
-updates a backup of all channels whenever the node is restarted, a new channel
-is opened or a channel is closed:
-`/data/chain/bitcoin/mainnet/channel.backup`
-
-One straight-forward way of backing that file up is to create a file watcher and
-react whenever the file is changed. Here is an example script that
-[automatically makes a copy of the file whenever it changes](https://gist.github.com/alexbosworth/2c5e185aedbdac45a03655b709e255a3).
-
-Other ways of obtaining SCBs for a node's channels are
-[described in the recovery documentation](recovery.md#obtaining-scbs).
-
-Because the backup file is encrypted with a key from the seed the node was
-created with, it can safely be stored on a cloud storage or any other storage
-medium. Many consumer focused wallet smartphone apps automatically store a
-backup file to the cloud, if the phone is set up to allow it.
-
-### Keep `lnd` updated
-
-With every larger update of `lnd`, new security features are added. Users are
-always encouraged to update their nodes as soon as possible. This also helps the
-network in general as new safety features that require compatibility among nodes
-can be used sooner.
-
-### Zombie channels
-
-Zombie channels are channels that are most likely dead but are still around.
-This can happen if one of the channel peers has gone offline for good (possibly
-due to a failure of some sort) and didn't close its channels. The other, still
-online node doesn't necessarily know that its partner will never come back
-online.
-
-Funds that are in such channels are at great risk, as is described quite
-dramatically in
-[this article](https://medium.com/@gcomxx/get-rid-of-those-zombie-channels-1267d5a2a708?)
-.
-
-The TL;DR of the article is that if you have funds in a zombie channel and you
-need to recover your node after a failure, SCBs won't be able to recover those
-funds. Because SCB restore
-[relies on the remote node cooperating](#static-channel-backups-scbs).
-
-That's why it's important to **close channels with peers that have been
-offline** for a length of time as a precautionary measure.
-
-Of course this might not be good advice for a routing node operator that wants
-to support mobile users and route for them. Nodes running on a mobile device
-tend to be offline for long periods of time. It would be bad for those users if
-they needed to open a new channel every time they want to use the wallet.
-Most mobile wallets only open private channels as they do not intend to route
-payments through them. A routing node operator should therefore take into
-account if a channel is public or private when thinking about closing it.
-
-### Migrating a node to a new device
-
-As mentioned in the chapters [aezeed](#aezeed) and
-[SCB](#static-channel-backups-scbs) you should never use the same seed on two
-different nodes and restoring from SCB is not a migration but an emergency
-procedure.
-What is the correct way to migrate an existing node to a new device? There is
-an easy way that should work for most people and there's the harder/costlier
-fallback way to do it.
-
-**Option 1: Move the whole data directory to the new device**
-This option works very well if the new device runs the same operating system on
-the same architecture. If that is the case, the whole `/home//.lnd`
-directory in Linux (or `$HOME/Library/Application Support/lnd` in MacOS,
-`%LOCALAPPDATA%\lnd` in Windows) can be moved to the new device and `lnd`
-started there. It is important to shut down `lnd` on the old device before
-moving the directory!
-**Not supported/untested** is moving the data directory between different
-operating systems (for example `MacOS` -> `Linux`) or different system
-architectures (for example `32bit` -> `64bit` or `ARM` -> `amd64`). Data
-corruption or unexpected behavior can be the result. Users switching between
-operating systems or architectures should always use Option 2!
-
-**Option 2: Start from scratch**
-If option 1 does not work or is too risky, the safest course of action is to
-initialize the existing node again from scratch. Unfortunately this incurs some
-on-chain fee costs as all channels will need to be closed. Using the same seed
-means restoring the same network node identity as before. If a new identity
-should be created, a new seed needs to be created.
-Follow these steps to create the **same node (with the same seed)** from
-scratch:
-1. On the old device, close all channels (`lncli closeallchannels`). The
- command can take up to several minutes depending on the number of channels.
- **Do not interrupt the command!**
-1. Wait for all channels to be fully closed. If some nodes don't respond to the
- close request it can be that `lnd` will go ahead and force close those
- channels. This means that the local balance will be time locked for up to
- two weeks (depending on the channel size). Check `lncli pendingchannels` to
- see if any channels are still in the process of being force closed.
-1. After all channels are fully closed (and `lncli pendingchannels` lists zero
- channels), `lnd` can be shut down on the old device.
-1. Start `lnd` on the new device and create a new wallet with the existing seed
- that was used on the old device (answer "yes" when asked if an existing seed
- should be used).
-1. Wait for the wallet to rescan the blockchain. This can take up to several
- hours depending on the age of the seed and the speed of the chain backend.
-1. After the chain is fully synced (`lncli getinfo` shows
- `"synced_to_chain": true`) the on-chain funds from the previous device should
- now be visible on the new device as well and new channels can be opened.
-
-**What to do after the move**
-If things don't work as expected on the moved or re-created node, consider this
-list things that possibly need to be changed to work on a new device:
-* In case the new device has a different hostname and TLS connection problems
- occur, delete the `tls.key` and `tls.cert` files in the data directory and
- restart `lnd` to recreate them.
-* If an external IP is set (either with `--externalip` or `--tlsextraip`) these
- might need to be changed if the new machine has a different address. Changing
- the `--tlsextraip` setting also means regenerating the certificate pair. See
- point 1.
-* If port `9735` (or `10009` for gRPC) was forwarded on the router, these
- forwarded ports need to point to the new device. The same applies to firewall
- rules.
-* It might take more than 24 hours for a new IP address to be visible on
- network explorers.
-* If channels show as offline after several hours, try to manually connect to
- the remote peer. They might still try to reach `lnd` on the old address.
-
-### Migrating a node from clearnet to Tor
-
-If an `lnd` node has already been connected to the internet with an IPv4 or IPv6
-(clearnet) address and has any non-private channels, this connection between
-channels and IP address is known to the network and cannot be deleted.
-Starting the same node with the same identity and channels using Tor is trivial
-to link back to any previously used clearnet IP address and does therefore not
-provide any privacy benefits.
-The following steps are recommended to cut all links between the old clearnet
-node and the new Tor node:
-1. Close all channels on the old node and wait for them to fully close.
-1. Send all on-chain funds of the old node through a Coin Join service (like
- Wasabi or Samurai/Whirlpool) until a sufficiently high anonymity set is
- reached.
-1. Create a new `lnd` node with a **new seed** that is only connected to Tor
- and generate an on-chain address on the new node.
-1. Send the mixed/coinjoined coins to the address of the new node.
-1. Start opening channels.
-1. Check an online network explorer that no IPv4 or IPv6 address is associated
- with the new node's identity.
-
-### Prevent data corruption
-
-Many problems while running an `lnd` node can be prevented by avoiding data
-corruption in the channel database (`/data/graph/mainnet/channel.db`).
-
-The following (non-exhaustive) list of things can lead to data corruption:
-* A spinning hard drive gets a physical shock.
-* `lnd`'s main data directory being written on an SD card or USB thumb drive
- (SD cards and USB thumb drives _must_ be considered unsafe for critical files
- that are written to very often, as the channel DB is).
-* `lnd`'s main data directory being written to a network drive without
- `fsync` support.
-* Unclean shutdown of `lnd`.
-* Aborting channel operation commands (see next chapter).
-* Not enough disk space for a growing channel DB file.
-* Moving `lnd`'s main data directory between different operating systems/
- architectures.
-
-To avoid most of these factors, it is recommended to store `lnd`'s main data
-directory on an Solid State Drive (SSD) of a reliable manufacturer.
-An alternative or extension to that is to use a replicated disk setup. Making
-sure a power failure does not interrupt the node by running a UPS (
-uninterruptible power supply) might also make sense depending on the reliability
-of the local power grid and the amount of funds at stake.
-
-### Don't interrupt `lncli` commands
-
-Things can start to take a while to execute if a node has more than 50 to 100
-channels. It is extremely important to **never interrupt an `lncli` command**
-if it is manipulating the channel database, which is true for the following
-commands:
- - `openchannel`
- - `closechannel` and `closeallchannels`
- - `abandonchannel`
- - `updatechanpolicy`
- - `restorechanbackup`
-
-Interrupting any of those commands can lead to an inconsistent state of the
-channel database and unpredictable behavior. If it is uncertain if a command
-is really stuck or if the node is still working on it, a look at the log file
-can help to get an idea.
-
-### Regular accounting/monitoring
-
-Regular monitoring of a node and keeping track of the movement of funds can help
-prevent problems. Tools like [`lndmon`](https://github.com/lightninglabs/lndmon)
-can assist with these tasks.
-
-### Pruned bitcoind node
-
-Running `lnd` connected to a `bitcoind` node that is running in prune mode is
-not supported! `lnd` needs to verify the funding transaction of every channel
-in the network and be able to retrieve that information from `bitcoind` which
-it cannot deliver when that information is pruned away.
-
-In theory pruning away all blocks _before_ the SegWit activation would work
-as LN channels rely on SegWit. But this has neither been tested nor would it
-be recommended/supported.
-
-In addition to not running a pruned node, it is recommended to run `bitcoind`
-with the `-txindex` flag for performance reasons, though this is not strictly
-required.
-
-Multiple `lnd` nodes can run off of a single `bitcoind` instance. There will be
-connection/thread/performance limits at some number of `lnd` nodes but in
-practice running 2 or 3 `lnd` instances per `bitcoind` node didn't show any
-problems.
-
-### The `--noseedbackup` flag
-
-This is a flag that is only used for integration tests and should **never** be
-used on mainnet! Turning this flag on means that the 24 word seed will not be
-shown when creating a wallet. The seed is required to restore a node in case
-of data corruption and without it all funds (on-chain and off-chain) are
-being put at risk.
diff --git a/lnd/docs/watchtower.md b/lnd/docs/watchtower.md
deleted file mode 100644
index 870ec1e8..00000000
--- a/lnd/docs/watchtower.md
+++ /dev/null
@@ -1,236 +0,0 @@
-# Private Altruist Watchtowers
-
-As of v0.7.0, `lnd` supports the ability to run a private, altruist watchtower
-as a fully-integrated subsystem of `lnd`. Watchtowers act as a second line of
-defense in responding to malicious or accidental breach scenarios in the event
-that the client’s node is offline or unable to respond at the time of a breach,
-offering greater degree of safety to channel funds.
-
-In contrast to a _reward watchtower_ which demand a portion of the channel funds
-as a reward for fulfilling its duty, an _altruist watchtower_ returns all of the
-victim’s funds (minus on-chain fees) without taking a cut. Reward watchtowers
-will be enabled in a subsequent release, though are still undergoing further
-testing and refinement.
-
-In addition, `lnd` can now be configured to operate as a _watchtower client_,
-backing up encrypted breach-remedy transactions (aka. justice transactions) to
-other altruist watchtowers. The watchtower stores fixed-size, encrypted blobs
-and is only able to decrypt and publish the justice transaction after the
-offending party has broadcast a revoked commitment state. Client communications
-with a watchtower are encrypted and authenticated using ephemeral keypairs,
-mitigating the amount of tracking the watchtower can perform on its clients
-using long-term identifiers.
-
-Note that we have chosen to deploy a restricted set of features in this release
-that can begin to provide meaningful security to `lnd` users. Many more
-watchtower-related features are nearly complete or have meaningful progress, and
-we will continue to ship them as they receive further testing and become safe to
-release.
-
-Note: *For now, watchtowers will only backup the `to_local` and `to_remote` outputs
-from revoked commitments; backing up HTLC outputs is slated to be deployed in a
-future release, as the protocol can be extended to include the extra signature
-data in the encrypted blobs.*
-
-## Configuring a Watchtower
-
-To set up a watchtower, command line users should compile in the optional
-`watchtowerrpc` subserver, which will offer the ability to interface with the
-tower via gRPC or `lncli`. The release binaries will include the `watchtowerrpc`
-subserver by default.
-
-The minimal configuration needed to activate the tower is `watchtower.active=1`.
-
-Retrieving information about your tower’s configurations can be done using
-`lncli tower info`:
-
-```
-🏔 lncli tower info
-{
- "pubkey": "03281d603b2c5e19b8893a484eb938d7377179a9ef1a6bca4c0bcbbfc291657b63",
- "listeners": [
- "[::]:9911"
- ],
- "uris": null,
-}
-```
-
-The entire set of watchtower configuration options can be found using
-`lnd -h`:
-
-```
-watchtower:
- --watchtower.active If the watchtower should be active or not
- --watchtower.towerdir= Directory of the watchtower.db (default: $HOME/.lnd/data/watchtower)
- --watchtower.listen= Add interfaces/ports to listen for peer connections
- --watchtower.externalip= Add interfaces/ports where the watchtower can accept peer connections
- --watchtower.readtimeout= Duration the watchtower server will wait for messages to be received before hanging up on client connections
- --watchtower.writetimeout= Duration the watchtower server will wait for messages to be written before hanging up on client connections
-```
-
-### Listening Interfaces
-
-By default, the watchtower will listen on `:9911` which specifies port `9911`
-listening on all available interfaces. Users may configure their own listeners
-via the `--watchtower.listen=` option. You can verify your configuration by
-checking the `"listeners"` field in `lncli tower info`. If you're having trouble
-connecting to your watchtower, ensure that `` is open or your proxy is
-properly configured to point to an active listener.
-
-### External IP Addresses
-
-Additionally, users can specify their tower’s external IP address(es) using
-`watchtower.externalip=`, which will expose the full tower URIs
-(pubkey@host:port) over RPC or `lncli tower info`:
-
-```
- ...
- "uris": [
- "03281d603b2c5e19b8893a484eb938d7377179a9ef1a6bca4c0bcbbfc291657b63@1.2.3.4:9911"
- ]
-```
-
-The watchtower's URIs can be given to clients in order to connect and use the
-tower with the following command:
-
-```
-🏔 lncli wtclient add 03281d603b2c5e19b8893a484eb938d7377179a9ef1a6bca4c0bcbbfc291657b63@1.2.3.4:9911
-```
-
-If the watchtower's clients will need remote access, be sure to either:
- - Open port 9911 or a port chosen via `watchtower.listen`.
- - Use a proxy to direct traffic from an open port to the watchtower's listening
- address.
-
-### Tor Hidden Services
-
-Watchtowers have tor hidden service support and can automatically generate a
-hidden service on startup with the following flags:
-
-```
-🏔 lnd --tor.active --tor.v3 --watchtower.active
-```
-
-The onion address is then shown in the "uris" field when queried with `lncli tower info`:
-
-```
-...
-"uris": [
- "03281d603b2c5e19b8893a484eb938d7377179a9ef1a6bca4c0bcbbfc291657b63@bn2kxggzjysvsd5o3uqe4h7655u7v2ydhxzy7ea2fx26duaixlwuguad.onion:9911"
-]
-```
-
-Note: *The watchtower’s public key is distinct from `lnd`’s node public key. For
-now this acts as a soft whitelist as it requires clients to know the tower’s
-public key in order to use it for backups before more advanced whitelisting
-features are implemented. We recommend NOT disclosing this public key openly,
-unless you are prepared to open your tower up to the entire Internet.*
-
-### Watchtower Database Directory
-
-The watchtower's database can be moved using the `watchtower.towerdir=`
-configuration option. Note that a trailing `/bitcoin/mainnet/watchtower.db`
-will be appended to the chosen directory to isolate databases for different
-chains, so setting `watchtower.towerdir=/path/to/towerdir` will yield a
-watchtower database at `/path/to/towerdir/bitcoin/mainnet/watchtower.db`.
-
-On Linux, for example, the default watchtower database will be located at:
-
-```
-/$USER/.lnd/data/watchtower/bitcoin/mainnet/watchtower.db
-```
-
-## Configuring a Watchtower Client
-
-In order to set up a watchtower client, you’ll need two things:
-
-1. The watchtower client must be enabled with the `--wtclient.active` flag.
-
-```
-🏔 lnd --wtclient.active
-```
-
-2. The watchtower URI of an active watchtower.
-
-```
-🏔 lncli wtclient add 03281d603b2c5e19b8893a484eb938d7377179a9ef1a6bca4c0bcbbfc291657b63@1.2.3.4:9911
-```
-
-Multiple watchtowers can be configured through this method.
-
-### Justice Fee Rates
-
-Users may optionally configure the fee rate of justice transactions by setting
-the `wtclient.sweep-fee-rate` option, which accepts values in sat/byte. The
-default value is 10 sat/byte, though users may choose to target higher rates to
-offer greater priority during fee-spikes. Modifying the `sweep-fee-rate` will
-be applied to all new updates after the daemon has been restarted.
-
-### Monitoring
-
-With the addition of the `lncli wtclient` command, users are now able to
-interact with the watchtower client directly to obtain/modify information about
-the set of registered watchtowers.
-
-As as example, with the `lncli wtclient tower` command, you can obtain the
-number of sessions currently negotiated with the watchtower added above and
-determine whether it is currently being used for backups through the
-`active_session_candidate` value.
-
-```
-🏔 lncli wtclient tower 03281d603b2c5e19b8893a484eb938d7377179a9ef1a6bca4c0bcbbfc291657b63
-{
- "pubkey": "03281d603b2c5e19b8893a484eb938d7377179a9ef1a6bca4c0bcbbfc291657b63",
- "addresses": [
- "1.2.3.4:9911"
- ],
- "active_session_candidate": true,
- "num_sessions": 1,
- "sessions": []
-}
-```
-
-To obtain information about the watchtower's sessions, users can use the
-`--include_sessions` flag.
-
-```
-🏔 lncli wtclient tower --include_sessions 03281d603b2c5e19b8893a484eb938d7377179a9ef1a6bca4c0bcbbfc291657b63
-{
- "pubkey": "03281d603b2c5e19b8893a484eb938d7377179a9ef1a6bca4c0bcbbfc291657b63",
- "addresses": [
- "1.2.3.4:9911"
- ],
- "active_session_candidate": true,
- "num_sessions": 1,
- "sessions": [
- {
- "num_backups": 0,
- "num_pending_backups": 0,
- "max_backups": 1024,
- "sweep_sat_per_byte": 10
- }
- ]
-}
-```
-
-The entire set of watchtower client configuration options can be found with
-`lncli wtclient -h`:
-
-```
-NAME:
- lncli wtclient - Interact with the watchtower client.
-
-USAGE:
- lncli wtclient command [command options] [arguments...]
-
-COMMANDS:
- add Register a watchtower to use for future sessions/backups.
- remove Remove a watchtower to prevent its use for future sessions/backups.
- towers Display information about all registered watchtowers.
- tower Display information about a specific registered watchtower.
- stats Display the session stats of the watchtower client.
- policy Display the active watchtower client policy configuration.
-
-OPTIONS:
- --help, -h show help
-```
diff --git a/lnd/feature/default_sets.go b/lnd/feature/default_sets.go
deleted file mode 100644
index ce080e8b..00000000
--- a/lnd/feature/default_sets.go
+++ /dev/null
@@ -1,54 +0,0 @@
-package feature
-
-import "github.com/pkt-cash/pktd/lnd/lnwire"
-
-// setDesc describes which feature bits should be advertised in which feature
-// sets.
-type setDesc map[lnwire.FeatureBit]map[Set]struct{}
-
-// defaultSetDesc are the default set descriptors for generating feature
-// vectors. Each set is annotated with the corresponding identifier from BOLT 9
-// indicating where it should be advertised.
-var defaultSetDesc = setDesc{
- lnwire.DataLossProtectRequired: {
- SetInit: {}, // I
- SetNodeAnn: {}, // N
- },
- lnwire.GossipQueriesOptional: {
- SetInit: {}, // I
- SetNodeAnn: {}, // N
- },
- lnwire.TLVOnionPayloadOptional: {
- SetInit: {}, // I
- SetNodeAnn: {}, // N
- SetInvoice: {}, // 9
- SetLegacyGlobal: {},
- },
- lnwire.StaticRemoteKeyRequired: {
- SetInit: {}, // I
- SetNodeAnn: {}, // N
- SetLegacyGlobal: {},
- },
- lnwire.UpfrontShutdownScriptOptional: {
- SetInit: {}, // I
- SetNodeAnn: {}, // N
- },
- lnwire.PaymentAddrOptional: {
- SetInit: {}, // I
- SetNodeAnn: {}, // N
- SetInvoice: {}, // 9
- },
- lnwire.MPPOptional: {
- SetInit: {}, // I
- SetNodeAnn: {}, // N
- SetInvoice: {}, // 9
- },
- lnwire.AnchorsOptional: {
- SetInit: {}, // I
- SetNodeAnn: {}, // N
- },
- lnwire.WumboChannelsOptional: {
- SetInit: {}, // I
- SetNodeAnn: {}, // N
- },
-}
diff --git a/lnd/feature/deps.go b/lnd/feature/deps.go
deleted file mode 100644
index b2332241..00000000
--- a/lnd/feature/deps.go
+++ /dev/null
@@ -1,133 +0,0 @@
-package feature
-
-import (
- "fmt"
-
- "github.com/pkt-cash/pktd/btcutil/er"
- "github.com/pkt-cash/pktd/lnd/lnwire"
-)
-
-type (
- // featureSet contains a set of feature bits.
- featureSet map[lnwire.FeatureBit]struct{}
-
- // supportedFeatures maps the feature bit from a feature vector to a
- // boolean indicating if this features dependencies have already been
- // verified. This allows us to short circuit verification if multiple
- // features have common dependencies, or map traversal starts verifying
- // from the bottom up.
- supportedFeatures map[lnwire.FeatureBit]bool
-
- // depDesc maps a features to its set of dependent features, which must
- // also be present for the vector to be valid. This can be used to
- // recursively check the dependency chain for features in a feature
- // vector.
- depDesc map[lnwire.FeatureBit]featureSet
-)
-
-// ErrMissingFeatureDep is an error signaling that a transitive dependency in a
-// feature vector is not set properly.
-type ErrMissingFeatureDep struct {
- dep lnwire.FeatureBit
-}
-
-// NewErrMissingFeatureDep creates a new ErrMissingFeatureDep error.
-func NewErrMissingFeatureDep(dep lnwire.FeatureBit) ErrMissingFeatureDep {
- return ErrMissingFeatureDep{dep: dep}
-}
-
-// Error returns a human-readable description of the missing dep error.
-func (e ErrMissingFeatureDep) Error() string {
- return fmt.Sprintf("missing feature dependency: %v", e.dep)
-}
-
-// deps is the default set of dependencies for assigned feature bits. If a
-// feature is not present in the depDesc it is assumed to have no dependencies.
-//
-// NOTE: For proper functioning, only the optional variant of feature bits
-// should be used in the following descriptor. In the future it may be necessary
-// to distinguish the dependencies for optional and required bits, but for now
-// the validation code maps required bits to optional ones since it simplifies
-// the number of constraints.
-var deps = depDesc{
- lnwire.PaymentAddrOptional: {
- lnwire.TLVOnionPayloadOptional: {},
- },
- lnwire.MPPOptional: {
- lnwire.PaymentAddrOptional: {},
- },
- lnwire.AnchorsOptional: {
- lnwire.StaticRemoteKeyOptional: {},
- },
-}
-
-// ValidateDeps asserts that a feature vector sets all features and their
-// transitive dependencies properly. It assumes that the dependencies between
-// optional and required features are identical, e.g. if a feature is required
-// but its dependency is optional, that is sufficient.
-func ValidateDeps(fv *lnwire.FeatureVector) er.R {
- features := fv.Features()
- supported := initSupported(features)
-
- return validateDeps(features, supported)
-}
-
-// validateDeps is a subroutine that recursively checks that the passed features
-// have all of their associated dependencies in the supported map.
-func validateDeps(features featureSet, supported supportedFeatures) er.R {
- for bit := range features {
- // Convert any required bits to optional.
- bit = mapToOptional(bit)
-
- // If the supported features doesn't contain the dependency, this
- // vector is invalid.
- checked, ok := supported[bit]
- if !ok {
- return er.E(NewErrMissingFeatureDep(bit))
- }
-
- // Alternatively, if we know that this dependency is valid, we
- // can short circuit and continue verifying other bits.
- if checked {
- continue
- }
-
- // Recursively validate dependencies, since this method ranges
- // over the subDeps. This method will return true even if
- // subDeps is nil.
- subDeps := deps[bit]
- if err := validateDeps(subDeps, supported); err != nil {
- return err
- }
-
- // Once we've confirmed that this feature's dependencies, if
- // any, are sound, we record this so other paths taken through
- // `bit` return early when inspecting the supported map.
- supported[bit] = true
- }
-
- return nil
-}
-
-// initSupported sets all bits from the feature vector as supported but not
-// checked. This signals that the validity of their dependencies has not been
-// verified. All required bits are mapped to optional to simplify the DAG.
-func initSupported(features featureSet) supportedFeatures {
- supported := make(supportedFeatures)
- for bit := range features {
- bit = mapToOptional(bit)
- supported[bit] = false
- }
-
- return supported
-}
-
-// mapToOptional returns the optional variant of a given feature bit pair. Our
-// dependendency graph is described using only optional feature bits, which
-// reduces the number of constraints we need to express in the descriptor.
-func mapToOptional(bit lnwire.FeatureBit) lnwire.FeatureBit {
- if bit.IsRequired() {
- bit ^= 0x01
- }
- return bit
-}
diff --git a/lnd/feature/deps_test.go b/lnd/feature/deps_test.go
deleted file mode 100644
index 029cb5be..00000000
--- a/lnd/feature/deps_test.go
+++ /dev/null
@@ -1,168 +0,0 @@
-package feature
-
-import (
- "reflect"
- "testing"
-
- "github.com/pkt-cash/pktd/btcutil/er"
- "github.com/pkt-cash/pktd/lnd/lnwire"
-)
-
-type depTest struct {
- name string
- raw *lnwire.RawFeatureVector
- expErr error
-}
-
-var depTests = []depTest{
- {
- name: "empty",
- raw: lnwire.NewRawFeatureVector(),
- },
- {
- name: "no deps optional",
- raw: lnwire.NewRawFeatureVector(
- lnwire.GossipQueriesOptional,
- ),
- },
- {
- name: "no deps required",
- raw: lnwire.NewRawFeatureVector(
- lnwire.TLVOnionPayloadRequired,
- ),
- },
- {
- name: "one dep optional",
- raw: lnwire.NewRawFeatureVector(
- lnwire.TLVOnionPayloadOptional,
- lnwire.PaymentAddrOptional,
- ),
- },
- {
- name: "one dep required",
- raw: lnwire.NewRawFeatureVector(
- lnwire.TLVOnionPayloadRequired,
- lnwire.PaymentAddrRequired,
- ),
- },
- {
- name: "one missing optional",
- raw: lnwire.NewRawFeatureVector(
- lnwire.PaymentAddrOptional,
- ),
- expErr: ErrMissingFeatureDep{lnwire.TLVOnionPayloadOptional},
- },
- {
- name: "one missing required",
- raw: lnwire.NewRawFeatureVector(
- lnwire.PaymentAddrRequired,
- ),
- expErr: ErrMissingFeatureDep{lnwire.TLVOnionPayloadOptional},
- },
- {
- name: "two dep optional",
- raw: lnwire.NewRawFeatureVector(
- lnwire.TLVOnionPayloadOptional,
- lnwire.PaymentAddrOptional,
- lnwire.MPPOptional,
- ),
- },
- {
- name: "two dep required",
- raw: lnwire.NewRawFeatureVector(
- lnwire.TLVOnionPayloadRequired,
- lnwire.PaymentAddrRequired,
- lnwire.MPPRequired,
- ),
- },
- {
- name: "two dep last missing optional",
- raw: lnwire.NewRawFeatureVector(
- lnwire.PaymentAddrOptional,
- lnwire.MPPOptional,
- ),
- expErr: ErrMissingFeatureDep{lnwire.TLVOnionPayloadOptional},
- },
- {
- name: "two dep last missing required",
- raw: lnwire.NewRawFeatureVector(
- lnwire.PaymentAddrRequired,
- lnwire.MPPRequired,
- ),
- expErr: ErrMissingFeatureDep{lnwire.TLVOnionPayloadOptional},
- },
- {
- name: "two dep first missing optional",
- raw: lnwire.NewRawFeatureVector(
- lnwire.TLVOnionPayloadOptional,
- lnwire.MPPOptional,
- ),
- expErr: ErrMissingFeatureDep{lnwire.PaymentAddrOptional},
- },
- {
- name: "two dep first missing required",
- raw: lnwire.NewRawFeatureVector(
- lnwire.TLVOnionPayloadRequired,
- lnwire.MPPRequired,
- ),
- expErr: ErrMissingFeatureDep{lnwire.PaymentAddrOptional},
- },
- {
- name: "forest optional",
- raw: lnwire.NewRawFeatureVector(
- lnwire.GossipQueriesOptional,
- lnwire.TLVOnionPayloadOptional,
- lnwire.PaymentAddrOptional,
- lnwire.MPPOptional,
- ),
- },
- {
- name: "forest required",
- raw: lnwire.NewRawFeatureVector(
- lnwire.GossipQueriesRequired,
- lnwire.TLVOnionPayloadRequired,
- lnwire.PaymentAddrRequired,
- lnwire.MPPRequired,
- ),
- },
- {
- name: "broken forest optional",
- raw: lnwire.NewRawFeatureVector(
- lnwire.GossipQueriesOptional,
- lnwire.TLVOnionPayloadOptional,
- lnwire.MPPOptional,
- ),
- expErr: ErrMissingFeatureDep{lnwire.PaymentAddrOptional},
- },
- {
- name: "broken forest required",
- raw: lnwire.NewRawFeatureVector(
- lnwire.GossipQueriesRequired,
- lnwire.TLVOnionPayloadRequired,
- lnwire.MPPRequired,
- ),
- expErr: ErrMissingFeatureDep{lnwire.PaymentAddrOptional},
- },
-}
-
-// TestValidateDeps tests that ValidateDeps correctly asserts whether or not the
-// set features constitute a valid feature chain when accounting for transititve
-// dependencies.
-func TestValidateDeps(t *testing.T) {
- for _, test := range depTests {
- test := test
- t.Run(test.name, func(t *testing.T) {
- testValidateDeps(t, test)
- })
- }
-}
-
-func testValidateDeps(t *testing.T, test depTest) {
- fv := lnwire.NewFeatureVector(test.raw, lnwire.Features)
- err := ValidateDeps(fv)
- if !reflect.DeepEqual(er.Wrapped(err), test.expErr) {
- t.Fatalf("validation mismatch, want: %v, got: %v",
- test.expErr, err)
-
- }
-}
diff --git a/lnd/feature/manager.go b/lnd/feature/manager.go
deleted file mode 100644
index cc754710..00000000
--- a/lnd/feature/manager.go
+++ /dev/null
@@ -1,134 +0,0 @@
-package feature
-
-import (
- "github.com/pkt-cash/pktd/btcutil/er"
- "github.com/pkt-cash/pktd/lnd/lnwire"
-)
-
-// Config houses any runtime modifications to the default set descriptors. For
-// our purposes, this typically means disabling certain features to test legacy
-// protocol interoperability or functionality.
-type Config struct {
- // NoTLVOnion unsets any optional or required TLVOnionPaylod bits from
- // all feature sets.
- NoTLVOnion bool
-
- // NoStaticRemoteKey unsets any optional or required StaticRemoteKey
- // bits from all feature sets.
- NoStaticRemoteKey bool
-
- // NoAnchors unsets any bits signaling support for anchor outputs.
- NoAnchors bool
-
- // NoWumbo unsets any bits signalling support for wumbo channels.
- NoWumbo bool
-}
-
-// Manager is responsible for generating feature vectors for different requested
-// feature sets.
-type Manager struct {
- // fsets is a static map of feature set to raw feature vectors. Requests
- // are fulfilled by cloning these interal feature vectors.
- fsets map[Set]*lnwire.RawFeatureVector
-}
-
-// NewManager creates a new feature Manager, applying any custom modifications
-// to its feature sets before returning.
-func NewManager(cfg Config) (*Manager, er.R) {
- return newManager(cfg, defaultSetDesc)
-}
-
-// newManager creates a new feature Manager, applying any custom modifications
-// to its feature sets before returning. This method accepts the setDesc as its
-// own parameter so that it can be unit tested.
-func newManager(cfg Config, desc setDesc) (*Manager, er.R) {
- // First build the default feature vector for all known sets.
- fsets := make(map[Set]*lnwire.RawFeatureVector)
- for bit, sets := range desc {
- for set := range sets {
- // Fetch the feature vector for this set, allocating a
- // new one if it doesn't exist.
- fv, ok := fsets[set]
- if !ok {
- fv = lnwire.NewRawFeatureVector()
- }
-
- // Set the configured bit on the feature vector,
- // ensuring that we don't set two feature bits for the
- // same pair.
- err := fv.SafeSet(bit)
- if err != nil {
- return nil, er.Errorf("unable to set "+
- "%v in %v: %v", bit, set, err)
- }
-
- // Write the updated feature vector under its set.
- fsets[set] = fv
- }
- }
-
- // Now, remove any features as directed by the config.
- for set, raw := range fsets {
- if cfg.NoTLVOnion {
- raw.Unset(lnwire.TLVOnionPayloadOptional)
- raw.Unset(lnwire.TLVOnionPayloadRequired)
- raw.Unset(lnwire.PaymentAddrOptional)
- raw.Unset(lnwire.PaymentAddrRequired)
- raw.Unset(lnwire.MPPOptional)
- raw.Unset(lnwire.MPPRequired)
- }
- if cfg.NoStaticRemoteKey {
- raw.Unset(lnwire.StaticRemoteKeyOptional)
- raw.Unset(lnwire.StaticRemoteKeyRequired)
- }
- if cfg.NoAnchors {
- raw.Unset(lnwire.AnchorsOptional)
- raw.Unset(lnwire.AnchorsRequired)
- }
- if cfg.NoWumbo {
- raw.Unset(lnwire.WumboChannelsOptional)
- raw.Unset(lnwire.WumboChannelsRequired)
- }
-
- // Ensure that all of our feature sets properly set any
- // dependent features.
- fv := lnwire.NewFeatureVector(raw, lnwire.Features)
- err := ValidateDeps(fv)
- if err != nil {
- return nil, er.Errorf("invalid feature set %v: %v",
- set, err)
- }
- }
-
- return &Manager{
- fsets: fsets,
- }, nil
-}
-
-// GetRaw returns a raw feature vector for the passed set. If no set is known,
-// an empty raw feature vector is returned.
-func (m *Manager) GetRaw(set Set) *lnwire.RawFeatureVector {
- if fv, ok := m.fsets[set]; ok {
- return fv.Clone()
- }
-
- return lnwire.NewRawFeatureVector()
-}
-
-// Get returns a feature vector for the passed set. If no set is known, an empty
-// feature vector is returned.
-func (m *Manager) Get(set Set) *lnwire.FeatureVector {
- raw := m.GetRaw(set)
- return lnwire.NewFeatureVector(raw, lnwire.Features)
-}
-
-// ListSets returns a list of the feature sets that our node supports.
-func (m *Manager) ListSets() []Set {
- var sets []Set
-
- for set := range m.fsets {
- sets = append(sets, set)
- }
-
- return sets
-}
diff --git a/lnd/feature/manager_internal_test.go b/lnd/feature/manager_internal_test.go
deleted file mode 100644
index 0a581f2b..00000000
--- a/lnd/feature/manager_internal_test.go
+++ /dev/null
@@ -1,128 +0,0 @@
-package feature
-
-import (
- "reflect"
- "testing"
-
- "github.com/pkt-cash/pktd/lnd/lnwire"
-)
-
-type managerTest struct {
- name string
- cfg Config
-}
-
-const unknownFeature lnwire.FeatureBit = 30
-
-var testSetDesc = setDesc{
- lnwire.DataLossProtectRequired: {
- SetNodeAnn: {}, // I
- },
- lnwire.TLVOnionPayloadOptional: {
- SetInit: {}, // I
- SetNodeAnn: {}, // N
- },
- lnwire.StaticRemoteKeyOptional: {
- SetInit: {}, // I
- SetNodeAnn: {}, // N
- },
-}
-
-var managerTests = []managerTest{
- {
- name: "default",
- cfg: Config{},
- },
- {
- name: "no tlv",
- cfg: Config{
- NoTLVOnion: true,
- },
- },
- {
- name: "no static remote key",
- cfg: Config{
- NoStaticRemoteKey: true,
- },
- },
- {
- name: "no tlv or static remote key",
- cfg: Config{
- NoTLVOnion: true,
- NoStaticRemoteKey: true,
- },
- },
-}
-
-// TestManager asserts basic initialazation and operation of a feature manager,
-// including that the proper features are removed in response to config changes.
-func TestManager(t *testing.T) {
- for _, test := range managerTests {
- test := test
- t.Run(test.name, func(t *testing.T) {
- testManager(t, test)
- })
- }
-}
-
-func testManager(t *testing.T, test managerTest) {
- m, err := newManager(test.cfg, testSetDesc)
- if err != nil {
- t.Fatalf("unable to create feature manager: %v", err)
- }
-
- sets := []Set{
- SetInit,
- SetLegacyGlobal,
- SetNodeAnn,
- SetInvoice,
- }
-
- for _, set := range sets {
- raw := m.GetRaw(set)
- fv := m.Get(set)
-
- fv2 := lnwire.NewFeatureVector(raw, lnwire.Features)
-
- if !reflect.DeepEqual(fv, fv2) {
- t.Fatalf("mismatch Get vs GetRaw, raw: %v vs fv: %v",
- fv2, fv)
- }
-
- assertUnset := func(bit lnwire.FeatureBit) {
- hasBit := fv.HasFeature(bit) || fv.HasFeature(bit^1)
- if hasBit {
- t.Fatalf("bit %v or %v is set", bit, bit^1)
- }
- }
-
- // Assert that the manager properly unset the configured feature
- // bits from all sets.
- if test.cfg.NoTLVOnion {
- assertUnset(lnwire.TLVOnionPayloadOptional)
- }
- if test.cfg.NoStaticRemoteKey {
- assertUnset(lnwire.StaticRemoteKeyOptional)
- }
-
- assertUnset(unknownFeature)
- }
-
- // Do same basic sanity checks on features that are always present.
- nodeFeatures := m.Get(SetNodeAnn)
-
- assertSet := func(bit lnwire.FeatureBit) {
- has := nodeFeatures.HasFeature(bit)
- if !has {
- t.Fatalf("node features don't advertised %v", bit)
- }
- }
-
- assertSet(lnwire.DataLossProtectOptional)
- if !test.cfg.NoTLVOnion {
- assertSet(lnwire.TLVOnionPayloadRequired)
- }
- if !test.cfg.NoStaticRemoteKey {
- assertSet(lnwire.StaticRemoteKeyOptional)
- }
-}
diff --git a/lnd/feature/required.go b/lnd/feature/required.go
deleted file mode 100644
index 654b1a2e..00000000
--- a/lnd/feature/required.go
+++ /dev/null
@@ -1,23 +0,0 @@
-package feature
-
-import (
- "fmt"
-
- "github.com/pkt-cash/pktd/btcutil/er"
- "github.com/pkt-cash/pktd/lnd/lnwire"
-)
-
-// ErrUnknownRequired signals that a feature vector requires certain features
-// that our node is unaware of or does not implement.
-var ErrUnknownRequired = er.GenericErrorType.CodeWithDetail("ErrUnknownRequired",
- "feature vector contains unknown required features")
-
-// ValidateRequired returns an error if the feature vector contains a non-zero
-// number of unknown, required feature bits.
-func ValidateRequired(fv *lnwire.FeatureVector) er.R {
- unknown := fv.UnknownRequiredFeatures()
- if len(unknown) > 0 {
- return ErrUnknownRequired.New(fmt.Sprintf("%v", unknown), nil)
- }
- return nil
-}
diff --git a/lnd/feature/set.go b/lnd/feature/set.go
deleted file mode 100644
index 2ac2ce52..00000000
--- a/lnd/feature/set.go
+++ /dev/null
@@ -1,41 +0,0 @@
-package feature
-
-// Set is an enum identifying various feature sets, which separates the single
-// feature namespace into distinct categories depending what context a feature
-// vector is being used.
-type Set uint8
-
-const (
- // SetInit identifies features that should be sent in an Init message to
- // a remote peer.
- SetInit Set = iota
-
- // SetLegacyGlobal identifies features that should be set in the legacy
- // GlobalFeatures field of an Init message, which maintains backwards
- // compatibility with nodes that haven't implemented flat features.
- SetLegacyGlobal
-
- // SetNodeAnn identifies features that should be advertised on node
- // announcements.
- SetNodeAnn
-
- // SetInvoice identifies features that should be advertised on invoices
- // generated by the daemon.
- SetInvoice
-)
-
-// String returns a human-readable description of a Set.
-func (s Set) String() string {
- switch s {
- case SetInit:
- return "SetInit"
- case SetLegacyGlobal:
- return "SetLegacyGlobal"
- case SetNodeAnn:
- return "SetNodeAnn"
- case SetInvoice:
- return "SetInvoice"
- default:
- return "SetUnknown"
- }
-}
diff --git a/lnd/fmgr/interfaces.go b/lnd/fmgr/interfaces.go
deleted file mode 100644
index 47238a8d..00000000
--- a/lnd/fmgr/interfaces.go
+++ /dev/null
@@ -1,20 +0,0 @@
-package fmgr
-
-import (
- "github.com/pkt-cash/pktd/lnd/lnpeer"
- "github.com/pkt-cash/pktd/lnd/lnwire"
-)
-
-// Manager is an interface that describes the basic operation of a funding
-// manager. It should at a minimum process a subset of lnwire messages that
-// are denoted as funding messages.
-type Manager interface {
- // ProcessFundingMsg processes a funding message represented by the
- // lnwire.Message parameter along with the Peer object representing a
- // connection to the counterparty.
- ProcessFundingMsg(lnwire.Message, lnpeer.Peer)
-
- // IsPendingChannel is used to determine whether to send an Error message
- // to the funding manager or not.
- IsPendingChannel([32]byte, lnpeer.Peer) bool
-}
diff --git a/lnd/fundingmanager.go b/lnd/fundingmanager.go
deleted file mode 100644
index b467537a..00000000
--- a/lnd/fundingmanager.go
+++ /dev/null
@@ -1,3583 +0,0 @@
-package lnd
-
-import (
- "bytes"
- "encoding/binary"
- "sync"
- "time"
-
- "github.com/davecgh/go-spew/spew"
- "github.com/pkt-cash/pktd/btcec"
- "github.com/pkt-cash/pktd/btcutil"
- "github.com/pkt-cash/pktd/btcutil/er"
- "github.com/pkt-cash/pktd/chaincfg/chainhash"
- "github.com/pkt-cash/pktd/lnd/chainntnfs"
- "github.com/pkt-cash/pktd/lnd/chainreg"
- "github.com/pkt-cash/pktd/lnd/chanacceptor"
- "github.com/pkt-cash/pktd/lnd/channeldb"
- "github.com/pkt-cash/pktd/lnd/channeldb/kvdb"
- "github.com/pkt-cash/pktd/lnd/discovery"
- "github.com/pkt-cash/pktd/lnd/htlcswitch"
- "github.com/pkt-cash/pktd/lnd/input"
- "github.com/pkt-cash/pktd/lnd/keychain"
- "github.com/pkt-cash/pktd/lnd/labels"
- "github.com/pkt-cash/pktd/lnd/lnpeer"
- "github.com/pkt-cash/pktd/lnd/lnrpc"
- "github.com/pkt-cash/pktd/lnd/lnwallet"
- "github.com/pkt-cash/pktd/lnd/lnwallet/chainfee"
- "github.com/pkt-cash/pktd/lnd/lnwallet/chanfunding"
- "github.com/pkt-cash/pktd/lnd/lnwire"
- "github.com/pkt-cash/pktd/lnd/routing"
- "github.com/pkt-cash/pktd/pktlog/log"
- "github.com/pkt-cash/pktd/txscript"
- "github.com/pkt-cash/pktd/wire"
- "golang.org/x/crypto/salsa20"
-)
-
-const (
- // TODO(roasbeef): tune
- msgBufferSize = 50
-
- // minBtcRemoteDelay and maxBtcRemoteDelay is the extremes of the
- // Bitcoin CSV delay we will require the remote to use for its
- // commitment transaction. The actual delay we will require will be
- // somewhere between these values, depending on channel size.
- minBtcRemoteDelay uint16 = 144
- maxBtcRemoteDelay uint16 = 2016
-
- // minLtcRemoteDelay and maxLtcRemoteDelay is the extremes of the
- // Litecoin CSV delay we will require the remote to use for its
- // commitment transaction. The actual delay we will require will be
- // somewhere between these values, depending on channel size.
- minLtcRemoteDelay uint16 = 576
- maxLtcRemoteDelay uint16 = 8064
-
- // maxWaitNumBlocksFundingConf is the maximum number of blocks to wait
- // for the funding transaction to be confirmed before forgetting
- // channels that aren't initiated by us. 2016 blocks is ~2 weeks.
- maxWaitNumBlocksFundingConf = 2016
-
- // minChanFundingSize is the smallest channel that we'll allow to be
- // created over the RPC interface.
- minChanFundingSize = btcutil.Amount(20000)
-
- // MaxBtcFundingAmount is a soft-limit of the maximum channel size
- // currently accepted on the Bitcoin chain within the Lightning
- // Protocol. This limit is defined in BOLT-0002, and serves as an
- // initial precautionary limit while implementations are battle tested
- // in the real world.
- MaxBtcFundingAmount = btcutil.Amount(1<<24) - 1
-
- // MaxBtcFundingAmountWumbo is a soft-limit on the maximum size of wumbo
- // channels. This limit is 10 BTC and is the only thing standing between
- // you and limitless channel size (apart from 21 million cap)
- MaxBtcFundingAmountWumbo = btcutil.Amount(1000000000)
-
- // maxLtcFundingAmount is a soft-limit of the maximum channel size
- // currently accepted on the Litecoin chain within the Lightning
- // Protocol.
- maxLtcFundingAmount = MaxBtcFundingAmount * chainreg.BtcToLtcConversionRate
-
- // 10mn PKT chan limit
- maxPktFundingAmount = btcutil.Amount(1 << 30 * 10000000)
-)
-
-var (
- // MaxFundingAmount is a soft-limit of the maximum channel size
- // currently accepted within the Lightning Protocol. This limit is
- // defined in BOLT-0002, and serves as an initial precautionary limit
- // while implementations are battle tested in the real world.
- //
- // At the moment, this value depends on which chain is active. It is set
- // to the value under the Bitcoin chain as default.
- //
- // TODO(roasbeef): add command line param to modify
- MaxFundingAmount = MaxBtcFundingAmount
-
- // ErrFundingManagerShuttingDown is an error returned when attempting to
- // process a funding request/message but the funding manager has already
- // been signaled to shut down.
- ErrFundingManagerShuttingDown = Err.CodeWithDetail("ErrFundingManagerShuttingDown",
- "funding manager shutting down")
-
- // ErrConfirmationTimeout is an error returned when we as a responder
- // are waiting for a funding transaction to confirm, but too many
- // blocks pass without confirmation.
- ErrConfirmationTimeout = Err.CodeWithDetail("ErrConfirmationTimeout",
- "timeout waiting for funding confirmation")
-
- // errUpfrontShutdownScriptNotSupported is returned if an upfront shutdown
- // script is set for a peer that does not support the feature bit.
- errUpfrontShutdownScriptNotSupported = Err.CodeWithDetail(
- "errUpfrontShutdownScriptNotSupported",
- "peer does not support option upfront shutdown script")
-
- zeroID [32]byte
-)
-
-// reservationWithCtx encapsulates a pending channel reservation. This wrapper
-// struct is used internally within the funding manager to track and progress
-// the funding workflow initiated by incoming/outgoing methods from the target
-// peer. Additionally, this struct houses a response and error channel which is
-// used to respond to the caller in the case a channel workflow is initiated
-// via a local signal such as RPC.
-//
-// TODO(roasbeef): actually use the context package
-// * deadlines, etc.
-type reservationWithCtx struct {
- reservation *lnwallet.ChannelReservation
- peer lnpeer.Peer
-
- chanAmt btcutil.Amount
-
- // Constraints we require for the remote.
- remoteCsvDelay uint16
- remoteMinHtlc lnwire.MilliSatoshi
- remoteMaxValue lnwire.MilliSatoshi
- remoteMaxHtlcs uint16
-
- // maxLocalCsv is the maximum csv we will accept from the remote.
- maxLocalCsv uint16
-
- updateMtx sync.RWMutex
- lastUpdated time.Time
-
- updates chan *lnrpc.OpenStatusUpdate
- err chan er.R
-}
-
-// isLocked checks the reservation's timestamp to determine whether it is locked.
-func (r *reservationWithCtx) isLocked() bool {
- r.updateMtx.RLock()
- defer r.updateMtx.RUnlock()
-
- // The time zero value represents a locked reservation.
- return r.lastUpdated.IsZero()
-}
-
-// updateTimestamp updates the reservation's timestamp with the current time.
-func (r *reservationWithCtx) updateTimestamp() {
- r.updateMtx.Lock()
- defer r.updateMtx.Unlock()
-
- r.lastUpdated = time.Now()
-}
-
-// initFundingMsg is sent by an outside subsystem to the funding manager in
-// order to kick off a funding workflow with a specified target peer. The
-// original request which defines the parameters of the funding workflow are
-// embedded within this message giving the funding manager full context w.r.t
-// the workflow.
-type initFundingMsg struct {
- peer lnpeer.Peer
- *openChanReq
-}
-
-// fundingMsg is sent by the ProcessFundingMsg function and packages a
-// funding-specific lnwire.Message along with the lnpeer.Peer that sent it.
-type fundingMsg struct {
- msg lnwire.Message
- peer lnpeer.Peer
-}
-
-// pendingChannels is a map instantiated per-peer which tracks all active
-// pending single funded channels indexed by their pending channel identifier,
-// which is a set of 32-bytes generated via a CSPRNG.
-type pendingChannels map[[32]byte]*reservationWithCtx
-
-// serializedPubKey is used within the FundingManager's activeReservations list
-// to identify the nodes with which the FundingManager is actively working to
-// initiate new channels.
-type serializedPubKey [33]byte
-
-// newSerializedKey creates a new serialized public key from an instance of a
-// live pubkey object.
-func newSerializedKey(pubKey *btcec.PublicKey) serializedPubKey {
- var s serializedPubKey
- copy(s[:], pubKey.SerializeCompressed())
- return s
-}
-
-// fundingConfig defines the configuration for the FundingManager. All elements
-// within the configuration MUST be non-nil for the FundingManager to carry out
-// its duties.
-type fundingConfig struct {
- // NoWumboChans indicates if we're to reject all incoming wumbo channel
- // requests, and also reject all outgoing wumbo channel requests.
- NoWumboChans bool
-
- // IDKey is the PublicKey that is used to identify this node within the
- // Lightning Network.
- IDKey *btcec.PublicKey
-
- // Wallet handles the parts of the funding process that involves moving
- // funds from on-chain transaction outputs into Lightning channels.
- Wallet *lnwallet.LightningWallet
-
- // PublishTransaction facilitates the process of broadcasting a
- // transaction to the network.
- PublishTransaction func(*wire.MsgTx, string) er.R
-
- // UpdateLabel updates the label that a transaction has in our wallet,
- // overwriting any existing labels.
- UpdateLabel func(chainhash.Hash, string) er.R
-
- // FeeEstimator calculates appropriate fee rates based on historical
- // transaction information.
- FeeEstimator chainfee.Estimator
-
- // Notifier is used by the FundingManager to determine when the
- // channel's funding transaction has been confirmed on the blockchain
- // so that the channel creation process can be completed.
- Notifier chainntnfs.ChainNotifier
-
- // SignMessage signs an arbitrary message with a given public key. The
- // actual digest signed is the double sha-256 of the message. In the
- // case that the private key corresponding to the passed public key
- // cannot be located, then an error is returned.
- //
- // TODO(roasbeef): should instead pass on this responsibility to a
- // distinct sub-system?
- SignMessage func(pubKey *btcec.PublicKey,
- msg []byte) (input.Signature, er.R)
-
- // CurrentNodeAnnouncement should return the latest, fully signed node
- // announcement from the backing Lightning Network node.
- CurrentNodeAnnouncement func() (lnwire.NodeAnnouncement, er.R)
-
- // SendAnnouncement is used by the FundingManager to send announcement
- // messages to the Gossiper to possibly broadcast to the greater
- // network. A set of optional message fields can be provided to populate
- // any information within the graph that is not included in the gossip
- // message.
- SendAnnouncement func(msg lnwire.Message,
- optionalFields ...discovery.OptionalMsgField) chan er.R
-
- // NotifyWhenOnline allows the FundingManager to register with a
- // subsystem that will notify it when the peer comes online. This is
- // used when sending the fundingLocked message, since it MUST be
- // delivered after the funding transaction is confirmed.
- //
- // NOTE: The peerChan channel must be buffered.
- NotifyWhenOnline func(peer [33]byte, peerChan chan<- lnpeer.Peer)
-
- // FindChannel queries the database for the channel with the given
- // channel ID.
- FindChannel func(chanID lnwire.ChannelID) (*channeldb.OpenChannel, er.R)
-
- // TempChanIDSeed is a cryptographically random string of bytes that's
- // used as a seed to generate pending channel ID's.
- TempChanIDSeed [32]byte
-
- // DefaultRoutingPolicy is the default routing policy used when
- // initially announcing channels.
- DefaultRoutingPolicy htlcswitch.ForwardingPolicy
-
- // DefaultMinHtlcIn is the default minimum incoming htlc value that is
- // set as a channel parameter.
- DefaultMinHtlcIn lnwire.MilliSatoshi
-
- // NumRequiredConfs is a function closure that helps the funding
- // manager decide how many confirmations it should require for a
- // channel extended to it. The function is able to take into account
- // the amount of the channel, and any funds we'll be pushed in the
- // process to determine how many confirmations we'll require.
- NumRequiredConfs func(btcutil.Amount, lnwire.MilliSatoshi) uint16
-
- // RequiredRemoteDelay is a function that maps the total amount in a
- // proposed channel to the CSV delay that we'll require for the remote
- // party. Naturally a larger channel should require a higher CSV delay
- // in order to give us more time to claim funds in the case of a
- // contract breach.
- RequiredRemoteDelay func(btcutil.Amount) uint16
-
- // RequiredRemoteChanReserve is a function closure that, given the
- // channel capacity and dust limit, will return an appropriate amount
- // for the remote peer's required channel reserve that is to be adhered
- // to at all times.
- RequiredRemoteChanReserve func(capacity, dustLimit btcutil.Amount) btcutil.Amount
-
- // RequiredRemoteMaxValue is a function closure that, given the channel
- // capacity, returns the amount of MilliSatoshis that our remote peer
- // can have in total outstanding HTLCs with us.
- RequiredRemoteMaxValue func(btcutil.Amount) lnwire.MilliSatoshi
-
- // RequiredRemoteMaxHTLCs is a function closure that, given the channel
- // capacity, returns the number of maximum HTLCs the remote peer can
- // offer us.
- RequiredRemoteMaxHTLCs func(btcutil.Amount) uint16
-
- // WatchNewChannel is to be called once a new channel enters the final
- // funding stage: waiting for on-chain confirmation. This method sends
- // the channel to the ChainArbitrator so it can watch for any on-chain
- // events related to the channel. We also provide the public key of the
- // node we're establishing a channel with for reconnection purposes.
- WatchNewChannel func(*channeldb.OpenChannel, *btcec.PublicKey) er.R
-
- // ReportShortChanID allows the funding manager to report the newly
- // discovered short channel ID of a formerly pending channel to outside
- // sub-systems.
- ReportShortChanID func(wire.OutPoint) er.R
-
- // ZombieSweeperInterval is the periodic time interval in which the
- // zombie sweeper is run.
- ZombieSweeperInterval time.Duration
-
- // ReservationTimeout is the length of idle time that must pass before
- // a reservation is considered a zombie.
- ReservationTimeout time.Duration
-
- // MinChanSize is the smallest channel size that we'll accept as an
- // inbound channel. We have such a parameter, as otherwise, nodes could
- // flood us with very small channels that would never really be usable
- // due to fees.
- MinChanSize btcutil.Amount
-
- // MaxChanSize is the largest channel size that we'll accept as an
- // inbound channel. We have such a parameter, so that you may decide how
- // WUMBO you would like your channel.
- MaxChanSize btcutil.Amount
-
- // MaxPendingChannels is the maximum number of pending channels we
- // allow for each peer.
- MaxPendingChannels int
-
- // RejectPush is set true if the fundingmanager should reject any
- // incoming channels having a non-zero push amount.
- RejectPush bool
-
- // MaxLocalCSVDelay is the maximum csv delay we will allow for our
- // commit output. Channels that exceed this value will be failed.
- MaxLocalCSVDelay uint16
-
- // NotifyOpenChannelEvent informs the ChannelNotifier when channels
- // transition from pending open to open.
- NotifyOpenChannelEvent func(wire.OutPoint)
-
- // OpenChannelPredicate is a predicate on the lnwire.OpenChannel message
- // and on the requesting node's public key that returns a bool which tells
- // the funding manager whether or not to accept the channel.
- OpenChannelPredicate chanacceptor.ChannelAcceptor
-
- // NotifyPendingOpenChannelEvent informs the ChannelNotifier when channels
- // enter a pending state.
- NotifyPendingOpenChannelEvent func(wire.OutPoint, *channeldb.OpenChannel)
-
- // EnableUpfrontShutdown specifies whether the upfront shutdown script
- // is enabled.
- EnableUpfrontShutdown bool
-
- // RegisteredChains keeps track of all chains that have been registered
- // with the daemon.
- RegisteredChains *chainreg.ChainRegistry
-}
-
-// fundingManager acts as an orchestrator/bridge between the wallet's
-// 'ChannelReservation' workflow, and the wire protocol's funding initiation
-// messages. Any requests to initiate the funding workflow for a channel,
-// either kicked-off locally or remotely are handled by the funding manager.
-// Once a channel's funding workflow has been completed, any local callers, the
-// local peer, and possibly the remote peer are notified of the completion of
-// the channel workflow. Additionally, any temporary or permanent access
-// controls between the wallet and remote peers are enforced via the funding
-// manager.
-type fundingManager struct {
- started sync.Once
- stopped sync.Once
-
- // cfg is a copy of the configuration struct that the FundingManager
- // was initialized with.
- cfg *fundingConfig
-
- // chanIDKey is a cryptographically random key that's used to generate
- // temporary channel ID's.
- chanIDKey [32]byte
-
- // chanIDNonce is a nonce that's incremented for each new funding
- // reservation created.
- nonceMtx sync.RWMutex
- chanIDNonce uint64
-
- // activeReservations is a map which houses the state of all pending
- // funding workflows.
- activeReservations map[serializedPubKey]pendingChannels
-
- // signedReservations is a utility map that maps the permanent channel
- // ID of a funding reservation to its temporary channel ID. This is
- // required as mid funding flow, we switch to referencing the channel
- // by its full channel ID once the commitment transactions have been
- // signed by both parties.
- signedReservations map[lnwire.ChannelID][32]byte
-
- // resMtx guards both of the maps above to ensure that all access is
- // goroutine safe.
- resMtx sync.RWMutex
-
- // fundingMsgs is a channel that relays fundingMsg structs from
- // external sub-systems using the ProcessFundingMsg call.
- fundingMsgs chan *fundingMsg
-
- // queries is a channel which receives requests to query the internal
- // state of the funding manager.
- queries chan interface{}
-
- // fundingRequests is a channel used to receive channel initiation
- // requests from a local subsystem within the daemon.
- fundingRequests chan *initFundingMsg
-
- // newChanBarriers is a map from a channel ID to a 'barrier' which will
- // be signalled once the channel is fully open. This barrier acts as a
- // synchronization point for any incoming/outgoing HTLCs before the
- // channel has been fully opened.
- barrierMtx sync.RWMutex
- newChanBarriers map[lnwire.ChannelID]chan struct{}
-
- localDiscoveryMtx sync.Mutex
- localDiscoverySignals map[lnwire.ChannelID]chan struct{}
-
- handleFundingLockedMtx sync.RWMutex
- handleFundingLockedBarriers map[lnwire.ChannelID]struct{}
-
- quit chan struct{}
- wg sync.WaitGroup
-}
-
-// channelOpeningState represents the different states a channel can be in
-// between the funding transaction has been confirmed and the channel is
-// announced to the network and ready to be used.
-type channelOpeningState uint8
-
-const (
- // markedOpen is the opening state of a channel if the funding
- // transaction is confirmed on-chain, but fundingLocked is not yet
- // successfully sent to the other peer.
- markedOpen channelOpeningState = iota
-
- // fundingLockedSent is the opening state of a channel if the
- // fundingLocked message has successfully been sent to the other peer,
- // but we still haven't announced the channel to the network.
- fundingLockedSent
-
- // addedToRouterGraph is the opening state of a channel if the
- // channel has been successfully added to the router graph
- // immediately after the fundingLocked message has been sent, but
- // we still haven't announced the channel to the network.
- addedToRouterGraph
-)
-
-var (
- // channelOpeningStateBucket is the database bucket used to store the
- // channelOpeningState for each channel that is currently in the process
- // of being opened.
- channelOpeningStateBucket = []byte("channelOpeningState")
-
- // ErrChannelNotFound is an error returned when a channel is not known
- // to us. In this case of the fundingManager, this error is returned
- // when the channel in question is not considered being in an opening
- // state.
- ErrChannelNotFound = Err.CodeWithDetail("ErrChannelNotFound",
- "channel not found")
-)
-
-// newFundingManager creates and initializes a new instance of the
-// fundingManager.
-func newFundingManager(cfg fundingConfig) (*fundingManager, er.R) {
- return &fundingManager{
- cfg: &cfg,
- chanIDKey: cfg.TempChanIDSeed,
- activeReservations: make(map[serializedPubKey]pendingChannels),
- signedReservations: make(map[lnwire.ChannelID][32]byte),
- newChanBarriers: make(map[lnwire.ChannelID]chan struct{}),
- fundingMsgs: make(chan *fundingMsg, msgBufferSize),
- fundingRequests: make(chan *initFundingMsg, msgBufferSize),
- localDiscoverySignals: make(map[lnwire.ChannelID]chan struct{}),
- handleFundingLockedBarriers: make(map[lnwire.ChannelID]struct{}),
- queries: make(chan interface{}, 1),
- quit: make(chan struct{}),
- }, nil
-}
-
-// Start launches all helper goroutines required for handling requests sent
-// to the funding manager.
-func (f *fundingManager) Start() er.R {
- var err er.R
- f.started.Do(func() {
- err = f.start()
- })
- return err
-}
-
-func (f *fundingManager) start() er.R {
- log.Tracef("Funding manager running")
-
- // Upon restart, the Funding Manager will check the database to load any
- // channels that were waiting for their funding transactions to be
- // confirmed on the blockchain at the time when the daemon last went
- // down.
- // TODO(roasbeef): store height that funding finished?
- // * would then replace call below
- allChannels, err := f.cfg.Wallet.Cfg.Database.FetchAllChannels()
- if err != nil {
- return err
- }
-
- for _, channel := range allChannels {
- chanID := lnwire.NewChanIDFromOutPoint(&channel.FundingOutpoint)
-
- // For any channels that were in a pending state when the
- // daemon was last connected, the Funding Manager will
- // re-initialize the channel barriers, and republish the
- // funding transaction if we're the initiator.
- if channel.IsPending {
- f.barrierMtx.Lock()
- log.Tracef("Loading pending ChannelPoint(%v), "+
- "creating chan barrier",
- channel.FundingOutpoint)
-
- f.newChanBarriers[chanID] = make(chan struct{})
- f.barrierMtx.Unlock()
-
- f.localDiscoverySignals[chanID] = make(chan struct{})
-
- // Rebroadcast the funding transaction for any pending
- // channel that we initiated. No error will be returned
- // if the transaction already has been broadcast.
- chanType := channel.ChanType
- if chanType.IsSingleFunder() && chanType.HasFundingTx() &&
- channel.IsInitiator {
-
- var fundingTxBuf bytes.Buffer
- err := channel.FundingTxn.Serialize(&fundingTxBuf)
- if err != nil {
- log.Errorf("Unable to serialize "+
- "funding transaction %v: %v",
- channel.FundingTxn.TxHash(), err)
-
- // Clear the buffer of any bytes that
- // were written before the serialization
- // error to prevent logging an
- // incomplete transaction.
- fundingTxBuf.Reset()
- }
-
- log.Debugf("Rebroadcasting funding tx for "+
- "ChannelPoint(%v): %x",
- channel.FundingOutpoint,
- fundingTxBuf.Bytes())
-
- // Set a nil short channel ID at this stage
- // because we do not know it until our funding
- // tx confirms.
- label := labels.MakeLabel(
- labels.LabelTypeChannelOpen, nil,
- )
-
- errr := f.cfg.PublishTransaction(
- channel.FundingTxn, label,
- )
- if errr != nil {
- log.Errorf("Unable to rebroadcast "+
- "funding tx %x for "+
- "ChannelPoint(%v): %v",
- fundingTxBuf.Bytes(),
- channel.FundingOutpoint, errr)
- }
- }
- }
-
- // We will restart the funding state machine for all channels,
- // which will wait for the channel's funding transaction to be
- // confirmed on the blockchain, and transmit the messages
- // necessary for the channel to be operational.
- f.wg.Add(1)
- go f.advanceFundingState(channel, chanID, nil)
- }
-
- f.wg.Add(1) // TODO(roasbeef): tune
- go f.reservationCoordinator()
-
- return nil
-}
-
-// Stop signals all helper goroutines to execute a graceful shutdown. This
-// method will block until all goroutines have exited.
-func (f *fundingManager) Stop() er.R {
- var err er.R
- f.stopped.Do(func() {
- err = f.stop()
- })
- return err
-}
-
-func (f *fundingManager) stop() er.R {
- log.Infof("Funding manager shutting down")
-
- close(f.quit)
- f.wg.Wait()
-
- return nil
-}
-
-// nextPendingChanID returns the next free pending channel ID to be used to
-// identify a particular future channel funding workflow.
-func (f *fundingManager) nextPendingChanID() [32]byte {
- // Obtain a fresh nonce. We do this by encoding the current nonce
- // counter, then incrementing it by one.
- f.nonceMtx.Lock()
- var nonce [8]byte
- binary.LittleEndian.PutUint64(nonce[:], f.chanIDNonce)
- f.chanIDNonce++
- f.nonceMtx.Unlock()
-
- // We'll generate the next pending channelID by "encrypting" 32-bytes
- // of zeroes which'll extract 32 random bytes from our stream cipher.
- var (
- nextChanID [32]byte
- zeroes [32]byte
- )
- salsa20.XORKeyStream(nextChanID[:], zeroes[:], nonce[:], &f.chanIDKey)
-
- return nextChanID
-}
-
-type pendingChannel struct {
- identityPub *btcec.PublicKey
- channelPoint *wire.OutPoint
- capacity btcutil.Amount
- localBalance btcutil.Amount
- remoteBalance btcutil.Amount
-}
-
-type pendingChansReq struct {
- resp chan []*pendingChannel
- err chan er.R
-}
-
-// PendingChannels returns a slice describing all the channels which are
-// currently pending at the last state of the funding workflow.
-func (f *fundingManager) PendingChannels() ([]*pendingChannel, er.R) {
- respChan := make(chan []*pendingChannel, 1)
- errChan := make(chan er.R, 1)
-
- req := &pendingChansReq{
- resp: respChan,
- err: errChan,
- }
-
- select {
- case f.queries <- req:
- case <-f.quit:
- return nil, ErrFundingManagerShuttingDown.Default()
- }
-
- select {
- case resp := <-respChan:
- return resp, nil
- case err := <-errChan:
- return nil, err
- case <-f.quit:
- return nil, ErrFundingManagerShuttingDown.Default()
- }
-}
-
-// CancelPeerReservations cancels all active reservations associated with the
-// passed node. This will ensure any outputs which have been pre committed,
-// (and thus locked from coin selection), are properly freed.
-func (f *fundingManager) CancelPeerReservations(nodePub [33]byte) {
-
- log.Debugf("Cancelling all reservations for peer %x", nodePub[:])
-
- f.resMtx.Lock()
- defer f.resMtx.Unlock()
-
- // We'll attempt to look up this node in the set of active
- // reservations. If they don't have any, then there's no further work
- // to be done.
- nodeReservations, ok := f.activeReservations[nodePub]
- if !ok {
- log.Debugf("No active reservations for node: %x", nodePub[:])
- return
- }
-
- // If they do have any active reservations, then we'll cancel all of
- // them (which releases any locked UTXO's), and also delete it from the
- // reservation map.
- for pendingID, resCtx := range nodeReservations {
- if err := resCtx.reservation.Cancel(); err != nil {
- log.Errorf("unable to cancel reservation for "+
- "node=%x: %v", nodePub[:], err)
- }
-
- resCtx.err <- er.Errorf("peer disconnected")
- delete(nodeReservations, pendingID)
- }
-
- // Finally, we'll delete the node itself from the set of reservations.
- delete(f.activeReservations, nodePub)
-}
-
-// failFundingFlow will fail the active funding flow with the target peer,
-// identified by its unique temporary channel ID. This method will send an
-// error to the remote peer, and also remove the reservation from our set of
-// pending reservations.
-//
-// TODO(roasbeef): if peer disconnects, and haven't yet broadcast funding
-// transaction, then all reservations should be cleared.
-func (f *fundingManager) failFundingFlow(peer lnpeer.Peer, tempChanID [32]byte,
- fundingErr er.R) {
-
- log.Debugf("Failing funding flow for pending_id=%x: %v",
- tempChanID, fundingErr)
-
- ctx, err := f.cancelReservationCtx(peer.IdentityKey(), tempChanID, false)
- if err != nil {
- log.Errorf("unable to cancel reservation: %v", err)
- }
-
- // In case the case where the reservation existed, send the funding
- // error on the error channel.
- if ctx != nil {
- ctx.err <- fundingErr
- }
-
- // We only send the exact error if it is part of out whitelisted set of
- // errors (lnwire.FundingError or lnwallet.ReservationError).
- errMsg := &lnwire.Error{
- ChanID: tempChanID,
- Data: lnwire.ErrorData(fundingErr.Message()),
- }
-
- log.Debugf("Sending funding error to peer (%x): %v",
- peer.IdentityKey().SerializeCompressed(), spew.Sdump(errMsg))
- if err := peer.SendMessage(false, errMsg); err != nil {
- log.Errorf("unable to send error message to peer %v", err)
- }
-}
-
-// reservationCoordinator is the primary goroutine tasked with progressing the
-// funding workflow between the wallet, and any outside peers or local callers.
-//
-// NOTE: This MUST be run as a goroutine.
-func (f *fundingManager) reservationCoordinator() {
- defer f.wg.Done()
-
- zombieSweepTicker := time.NewTicker(f.cfg.ZombieSweeperInterval)
- defer zombieSweepTicker.Stop()
-
- for {
- select {
-
- case fmsg := <-f.fundingMsgs:
- switch msg := fmsg.msg.(type) {
- case *lnwire.OpenChannel:
- f.handleFundingOpen(fmsg.peer, msg)
- case *lnwire.AcceptChannel:
- f.handleFundingAccept(fmsg.peer, msg)
- case *lnwire.FundingCreated:
- f.handleFundingCreated(fmsg.peer, msg)
- case *lnwire.FundingSigned:
- f.handleFundingSigned(fmsg.peer, msg)
- case *lnwire.FundingLocked:
- f.wg.Add(1)
- go f.handleFundingLocked(fmsg.peer, msg)
- case *lnwire.Error:
- f.handleErrorMsg(fmsg.peer, msg)
- }
- case req := <-f.fundingRequests:
- f.handleInitFundingMsg(req)
-
- case <-zombieSweepTicker.C:
- f.pruneZombieReservations()
-
- case req := <-f.queries:
- switch msg := req.(type) {
- case *pendingChansReq:
- f.handlePendingChannels(msg)
- }
- case <-f.quit:
- return
- }
- }
-}
-
-// advanceFundingState will advance the channel through the steps after the
-// funding transaction is broadcasted, up until the point where the channel is
-// ready for operation. This includes waiting for the funding transaction to
-// confirm, sending funding locked to the peer, adding the channel to the
-// router graph, and announcing the channel. The updateChan can be set non-nil
-// to get OpenStatusUpdates.
-//
-// NOTE: This MUST be run as a goroutine.
-func (f *fundingManager) advanceFundingState(channel *channeldb.OpenChannel,
- pendingChanID [32]byte, updateChan chan<- *lnrpc.OpenStatusUpdate) {
-
- defer f.wg.Done()
-
- // If the channel is still pending we must wait for the funding
- // transaction to confirm.
- if channel.IsPending {
- err := f.advancePendingChannelState(channel, pendingChanID)
- if err != nil {
- log.Errorf("Unable to advance pending state of "+
- "ChannelPoint(%v): %v",
- channel.FundingOutpoint, err)
- return
- }
- }
-
- // We create the state-machine object which wraps the database state.
- lnChannel, err := lnwallet.NewLightningChannel(
- nil, channel, nil,
- )
- if err != nil {
- log.Errorf("Unable to create LightningChannel(%v): %v",
- channel.FundingOutpoint, err)
- return
- }
-
- for {
- channelState, shortChanID, err := f.getChannelOpeningState(
- &channel.FundingOutpoint,
- )
- if ErrChannelNotFound.Is(err) {
- // Channel not in fundingManager's opening database,
- // meaning it was successfully announced to the
- // network.
- // TODO(halseth): could do graph consistency check
- // here, and re-add the edge if missing.
- log.Debugf("ChannelPoint(%v) with chan_id=%x not "+
- "found in opening database, assuming already "+
- "announced to the network",
- channel.FundingOutpoint, pendingChanID)
- return
- } else if err != nil {
- log.Errorf("Unable to query database for "+
- "channel opening state(%v): %v",
- channel.FundingOutpoint, err)
- return
- }
-
- // If we did find the channel in the opening state database, we
- // have seen the funding transaction being confirmed, but there
- // are still steps left of the setup procedure. We continue the
- // procedure where we left off.
- err = f.stateStep(
- channel, lnChannel, shortChanID, pendingChanID,
- channelState, updateChan,
- )
- if err != nil {
- log.Errorf("Unable to advance state(%v): %v",
- channel.FundingOutpoint, err)
- return
- }
- }
-}
-
-// stateStep advances the confirmed channel one step in the funding state
-// machine. This method is synchronous and the new channel opening state will
-// have been written to the database when it successfully returns. The
-// updateChan can be set non-nil to get OpenStatusUpdates.
-func (f *fundingManager) stateStep(channel *channeldb.OpenChannel,
- lnChannel *lnwallet.LightningChannel,
- shortChanID *lnwire.ShortChannelID, pendingChanID [32]byte,
- channelState channelOpeningState,
- updateChan chan<- *lnrpc.OpenStatusUpdate) er.R {
-
- chanID := lnwire.NewChanIDFromOutPoint(&channel.FundingOutpoint)
- log.Debugf("Channel(%v) with ShortChanID %v has opening state %v",
- chanID, shortChanID, channelState)
-
- switch channelState {
-
- // The funding transaction was confirmed, but we did not successfully
- // send the fundingLocked message to the peer, so let's do that now.
- case markedOpen:
- err := f.sendFundingLocked(channel, lnChannel, shortChanID)
- if err != nil {
- return er.Errorf("failed sending fundingLocked: %v",
- err)
- }
-
- // As the fundingLocked message is now sent to the peer, the
- // channel is moved to the next state of the state machine. It
- // will be moved to the last state (actually deleted from the
- // database) after the channel is finally announced.
- err = f.saveChannelOpeningState(
- &channel.FundingOutpoint, fundingLockedSent,
- shortChanID,
- )
- if err != nil {
- return er.Errorf("error setting channel state to"+
- " fundingLockedSent: %v", err)
- }
-
- log.Debugf("Channel(%v) with ShortChanID %v: successfully "+
- "sent FundingLocked", chanID, shortChanID)
-
- return nil
-
- // fundingLocked was sent to peer, but the channel was not added to the
- // router graph and the channel announcement was not sent.
- case fundingLockedSent:
- err := f.addToRouterGraph(channel, shortChanID)
- if err != nil {
- return er.Errorf("failed adding to "+
- "router graph: %v", err)
- }
-
- // As the channel is now added to the ChannelRouter's topology,
- // the channel is moved to the next state of the state machine.
- // It will be moved to the last state (actually deleted from
- // the database) after the channel is finally announced.
- err = f.saveChannelOpeningState(
- &channel.FundingOutpoint, addedToRouterGraph,
- shortChanID,
- )
- if err != nil {
- return er.Errorf("error setting channel state to"+
- " addedToRouterGraph: %v", err)
- }
-
- log.Debugf("Channel(%v) with ShortChanID %v: successfully "+
- "added to router graph", chanID, shortChanID)
-
- // Give the caller a final update notifying them that
- // the channel is now open.
- // TODO(roasbeef): only notify after recv of funding locked?
- fundingPoint := channel.FundingOutpoint
- cp := &lnrpc.ChannelPoint{
- FundingTxid: &lnrpc.ChannelPoint_FundingTxidBytes{
- FundingTxidBytes: fundingPoint.Hash[:],
- },
- OutputIndex: fundingPoint.Index,
- }
-
- if updateChan != nil {
- upd := &lnrpc.OpenStatusUpdate{
- Update: &lnrpc.OpenStatusUpdate_ChanOpen{
- ChanOpen: &lnrpc.ChannelOpenUpdate{
- ChannelPoint: cp,
- },
- },
- PendingChanId: pendingChanID[:],
- }
-
- select {
- case updateChan <- upd:
- case <-f.quit:
- return ErrFundingManagerShuttingDown.Default()
- }
- }
-
- return nil
-
- // The channel was added to the Router's topology, but the channel
- // announcement was not sent.
- case addedToRouterGraph:
- err := f.annAfterSixConfs(channel, shortChanID)
- if err != nil {
- return er.Errorf("error sending channel "+
- "announcement: %v", err)
- }
-
- // We delete the channel opening state from our internal
- // database as the opening process has succeeded. We can do
- // this because we assume the AuthenticatedGossiper queues the
- // announcement messages, and persists them in case of a daemon
- // shutdown.
- err = f.deleteChannelOpeningState(&channel.FundingOutpoint)
- if err != nil {
- return er.Errorf("error deleting channel state: %v",
- err)
- }
-
- log.Debugf("Channel(%v) with ShortChanID %v: successfully "+
- "announced", chanID, shortChanID)
-
- return nil
- }
-
- return er.Errorf("undefined channelState: %v", channelState)
-}
-
-// advancePendingChannelState waits for a pending channel's funding tx to
-// confirm, and marks it open in the database when that happens.
-func (f *fundingManager) advancePendingChannelState(
- channel *channeldb.OpenChannel, pendingChanID [32]byte) er.R {
-
- confChannel, err := f.waitForFundingWithTimeout(channel)
- if ErrConfirmationTimeout.Is(err) {
- // We'll get a timeout if the number of blocks mined
- // since the channel was initiated reaches
- // maxWaitNumBlocksFundingConf and we are not the
- // channel initiator.
- ch := channel
- localBalance := ch.LocalCommitment.LocalBalance.ToSatoshis()
- closeInfo := &channeldb.ChannelCloseSummary{
- ChainHash: ch.ChainHash,
- ChanPoint: ch.FundingOutpoint,
- RemotePub: ch.IdentityPub,
- Capacity: ch.Capacity,
- SettledBalance: localBalance,
- CloseType: channeldb.FundingCanceled,
- RemoteCurrentRevocation: ch.RemoteCurrentRevocation,
- RemoteNextRevocation: ch.RemoteNextRevocation,
- LocalChanConfig: ch.LocalChanCfg,
- }
-
- // Close the channel with us as the initiator because we are
- // timing the channel out.
- if err := ch.CloseChannel(
- closeInfo, channeldb.ChanStatusLocalCloseInitiator,
- ); err != nil {
- return er.Errorf("failed closing channel "+
- "%v: %v", ch.FundingOutpoint, err)
- }
-
- timeoutErr := er.Errorf("timeout waiting for funding tx "+
- "(%v) to confirm", channel.FundingOutpoint)
-
- // When the peer comes online, we'll notify it that we
- // are now considering the channel flow canceled.
- f.wg.Add(1)
- go func() {
- defer f.wg.Done()
-
- peerChan := make(chan lnpeer.Peer, 1)
- var peerKey [33]byte
- copy(peerKey[:], ch.IdentityPub.SerializeCompressed())
-
- f.cfg.NotifyWhenOnline(peerKey, peerChan)
-
- var peer lnpeer.Peer
- select {
- case peer = <-peerChan:
- case <-f.quit:
- return
- }
- // TODO(halseth): should this send be made
- // reliable?
- f.failFundingFlow(peer, pendingChanID, timeoutErr)
- }()
-
- return timeoutErr
-
- } else if err != nil {
- return er.Errorf("error waiting for funding "+
- "confirmation for ChannelPoint(%v): %v",
- channel.FundingOutpoint, err)
- }
-
- // Success, funding transaction was confirmed.
- chanID := lnwire.NewChanIDFromOutPoint(&channel.FundingOutpoint)
- log.Debugf("ChannelID(%v) is now fully confirmed! "+
- "(shortChanID=%v)", chanID, confChannel.shortChanID)
-
- err = f.handleFundingConfirmation(channel, confChannel)
- if err != nil {
- return er.Errorf("unable to handle funding "+
- "confirmation for ChannelPoint(%v): %v",
- channel.FundingOutpoint, err)
- }
-
- return nil
-}
-
-// handlePendingChannels responds to a request for details concerning all
-// currently pending channels waiting for the final phase of the funding
-// workflow (funding txn confirmation).
-func (f *fundingManager) handlePendingChannels(msg *pendingChansReq) {
- var pendingChannels []*pendingChannel
-
- dbPendingChannels, err := f.cfg.Wallet.Cfg.Database.FetchPendingChannels()
- if err != nil {
- msg.err <- err
- return
- }
-
- for _, dbPendingChan := range dbPendingChannels {
- pendingChan := &pendingChannel{
- identityPub: dbPendingChan.IdentityPub,
- channelPoint: &dbPendingChan.FundingOutpoint,
- capacity: dbPendingChan.Capacity,
- localBalance: dbPendingChan.LocalCommitment.LocalBalance.ToSatoshis(),
- remoteBalance: dbPendingChan.LocalCommitment.RemoteBalance.ToSatoshis(),
- }
-
- pendingChannels = append(pendingChannels, pendingChan)
- }
-
- msg.resp <- pendingChannels
-}
-
-// ProcessFundingMsg sends a message to the internal fundingManager goroutine,
-// allowing it to handle the lnwire.Message.
-func (f *fundingManager) ProcessFundingMsg(msg lnwire.Message, peer lnpeer.Peer) {
- select {
- case f.fundingMsgs <- &fundingMsg{msg, peer}:
- case <-f.quit:
- return
- }
-}
-
-// commitmentType returns the commitment type to use for the channel, based on
-// the features the two peers have available.
-func commitmentType(localFeatures,
- remoteFeatures *lnwire.FeatureVector) lnwallet.CommitmentType {
-
- // If both peers are signalling support for anchor commitments, this
- // implicitly mean we'll create the channel of this type. Note that
- // this also enables tweakless commitments, as anchor commitments are
- // always tweakless.
- localAnchors := localFeatures.HasFeature(
- lnwire.AnchorsOptional,
- )
- remoteAnchors := remoteFeatures.HasFeature(
- lnwire.AnchorsOptional,
- )
- if localAnchors && remoteAnchors {
- return lnwallet.CommitmentTypeAnchors
- }
-
- localTweakless := localFeatures.HasFeature(
- lnwire.StaticRemoteKeyOptional,
- )
- remoteTweakless := remoteFeatures.HasFeature(
- lnwire.StaticRemoteKeyOptional,
- )
-
- // If both nodes are signaling the proper feature bit for tweakless
- // copmmitments, we'll use that.
- if localTweakless && remoteTweakless {
- return lnwallet.CommitmentTypeTweakless
- }
-
- // Otherwise we'll fall back to the legacy type.
- return lnwallet.CommitmentTypeLegacy
-}
-
-// handleFundingOpen creates an initial 'ChannelReservation' within the wallet,
-// then responds to the source peer with an accept channel message progressing
-// the funding workflow.
-//
-// TODO(roasbeef): add error chan to all, let channelManager handle
-// error+propagate
-func (f *fundingManager) handleFundingOpen(peer lnpeer.Peer,
- msg *lnwire.OpenChannel) {
-
- // Check number of pending channels to be smaller than maximum allowed
- // number and send ErrorGeneric to remote peer if condition is
- // violated.
- peerPubKey := peer.IdentityKey()
- peerIDKey := newSerializedKey(peerPubKey)
-
- amt := msg.FundingAmount
-
- // We get all pending channels for this peer. This is the list of the
- // active reservations and the channels pending open in the database.
- f.resMtx.RLock()
- reservations := f.activeReservations[peerIDKey]
-
- // We don't count reservations that were created from a canned funding
- // shim. The user has registered the shim and therefore expects this
- // channel to arrive.
- numPending := 0
- for _, res := range reservations {
- if !res.reservation.IsCannedShim() {
- numPending++
- }
- }
- f.resMtx.RUnlock()
-
- // Also count the channels that are already pending. There we don't know
- // the underlying intent anymore, unfortunately.
- channels, err := f.cfg.Wallet.Cfg.Database.FetchOpenChannels(peerPubKey)
- if err != nil {
- f.failFundingFlow(
- peer, msg.PendingChannelID, err,
- )
- return
- }
-
- for _, c := range channels {
- // Pending channels that have a non-zero thaw height were also
- // created through a canned funding shim. Those also don't
- // count towards the DoS protection limit.
- //
- // TODO(guggero): Properly store the funding type (wallet, shim,
- // PSBT) on the channel so we don't need to use the thaw height.
- if c.IsPending && c.ThawHeight == 0 {
- numPending++
- }
- }
-
- // TODO(roasbeef): modify to only accept a _single_ pending channel per
- // block unless white listed
- if numPending >= f.cfg.MaxPendingChannels {
- f.failFundingFlow(
- peer, msg.PendingChannelID,
- lnwire.ErrMaxPendingChannels.Default(),
- )
- return
- }
-
- // We'll also reject any requests to create channels until we're fully
- // synced to the network as we won't be able to properly validate the
- // confirmation of the funding transaction.
- isSynced, _, errr := f.cfg.Wallet.IsSynced()
- if errr != nil || !isSynced {
- if errr != nil {
- log.Errorf("unable to query wallet: %v", errr)
- }
- f.failFundingFlow(
- peer, msg.PendingChannelID,
- lnwire.ErrSynchronizingChain.Default(),
- )
- return
- }
-
- // Ensure that the remote party respects our maximum channel size.
- if amt > f.cfg.MaxChanSize {
- f.failFundingFlow(
- peer, msg.PendingChannelID,
- lnwallet.ErrChanTooLarge(amt, f.cfg.MaxChanSize),
- )
- return
- }
-
- // We'll, also ensure that the remote party isn't attempting to propose
- // a channel that's below our current min channel size.
- if amt < f.cfg.MinChanSize {
- f.failFundingFlow(
- peer, msg.PendingChannelID,
- lnwallet.ErrChanTooSmall(amt, btcutil.Amount(f.cfg.MinChanSize)),
- )
- return
- }
-
- // If request specifies non-zero push amount and 'rejectpush' is set,
- // signal an error.
- if f.cfg.RejectPush && msg.PushAmount > 0 {
- f.failFundingFlow(
- peer, msg.PendingChannelID,
- lnwallet.ErrNonZeroPushAmount(),
- )
- return
- }
-
- // Send the OpenChannel request to the ChannelAcceptor to determine whether
- // this node will accept the channel.
- chanReq := &chanacceptor.ChannelAcceptRequest{
- Node: peer.IdentityKey(),
- OpenChanMsg: msg,
- }
-
- // Query our channel acceptor to determine whether we should reject
- // the channel.
- acceptorResp := f.cfg.OpenChannelPredicate.Accept(chanReq)
- if acceptorResp.RejectChannel() {
- f.failFundingFlow(
- peer, msg.PendingChannelID,
- acceptorResp.ChanAcceptError,
- )
- return
- }
-
- log.Infof("Recv'd fundingRequest(amt=%v, push=%v, delay=%v, "+
- "pendingId=%x) from peer(%x)", amt, msg.PushAmount,
- msg.CsvDelay, msg.PendingChannelID,
- peer.IdentityKey().SerializeCompressed())
-
- // Attempt to initialize a reservation within the wallet. If the wallet
- // has insufficient resources to create the channel, then the
- // reservation attempt may be rejected. Note that since we're on the
- // responding side of a single funder workflow, we don't commit any
- // funds to the channel ourselves.
- //
- // Before we init the channel, we'll also check to see if we've
- // negotiated the new tweakless commitment format. This is only the
- // case if *both* us and the remote peer are signaling the proper
- // feature bit.
- commitType := commitmentType(
- peer.LocalFeatures(), peer.RemoteFeatures(),
- )
- chainHash := chainhash.Hash(msg.ChainHash)
- req := &lnwallet.InitFundingReserveMsg{
- ChainHash: &chainHash,
- PendingChanID: msg.PendingChannelID,
- NodeID: peer.IdentityKey(),
- NodeAddr: peer.Address(),
- LocalFundingAmt: 0,
- RemoteFundingAmt: amt,
- CommitFeePerKw: chainfee.SatPerKWeight(msg.FeePerKiloWeight),
- FundingFeePerKw: 0,
- PushMSat: msg.PushAmount,
- Flags: msg.ChannelFlags,
- MinConfs: 1,
- CommitType: commitType,
- }
-
- reservation, errr := f.cfg.Wallet.InitChannelReservation(req)
- if errr != nil {
- log.Errorf("Unable to initialize reservation: %v", errr)
- f.failFundingFlow(peer, msg.PendingChannelID, errr)
- return
- }
-
- // As we're the responder, we get to specify the number of confirmations
- // that we require before both of us consider the channel open. We'll
- // use our mapping to derive the proper number of confirmations based on
- // the amount of the channel, and also if any funds are being pushed to
- // us. If a depth value was set by our channel acceptor, we will use
- // that value instead.
- numConfsReq := f.cfg.NumRequiredConfs(msg.FundingAmount, msg.PushAmount)
- if acceptorResp.MinAcceptDepth != 0 {
- numConfsReq = acceptorResp.MinAcceptDepth
- }
- reservation.SetNumConfsRequired(numConfsReq)
-
- // We'll also validate and apply all the constraints the initiating
- // party is attempting to dictate for our commitment transaction.
- channelConstraints := &channeldb.ChannelConstraints{
- DustLimit: msg.DustLimit,
- ChanReserve: msg.ChannelReserve,
- MaxPendingAmount: msg.MaxValueInFlight,
- MinHTLC: msg.HtlcMinimum,
- MaxAcceptedHtlcs: msg.MaxAcceptedHTLCs,
- CsvDelay: msg.CsvDelay,
- }
- errr = reservation.CommitConstraints(
- channelConstraints, f.cfg.MaxLocalCSVDelay,
- )
- if errr != nil {
- log.Errorf("Unacceptable channel constraints: %v", errr)
- f.failFundingFlow(peer, msg.PendingChannelID, errr)
- return
- }
-
- // Check whether the peer supports upfront shutdown, and get a new wallet
- // address if our node is configured to set shutdown addresses by default.
- // We use the upfront shutdown script provided by our channel acceptor
- // (if any) in lieu of user input.
- shutdown, errr := getUpfrontShutdownScript(
- f.cfg.EnableUpfrontShutdown, peer, acceptorResp.UpfrontShutdown,
- func() (lnwire.DeliveryAddress, er.R) {
- addr, errr := f.cfg.Wallet.NewAddress(lnwallet.WitnessPubKey, false)
- if errr != nil {
- return nil, errr
- }
- return txscript.PayToAddrScript(addr)
- },
- )
- if err != nil {
- f.failFundingFlow(
- peer, msg.PendingChannelID,
- er.Errorf("getUpfrontShutdownScript error: %v", err),
- )
- return
- }
- reservation.SetOurUpfrontShutdown(shutdown)
-
- log.Infof("Requiring %v confirmations for pendingChan(%x): "+
- "amt=%v, push_amt=%v, committype=%v, upfrontShutdown=%x", numConfsReq,
- msg.PendingChannelID, amt, msg.PushAmount,
- commitType, msg.UpfrontShutdownScript)
-
- // Generate our required constraints for the remote party, using the
- // values provided by the channel acceptor if they are non-zero.
- remoteCsvDelay := f.cfg.RequiredRemoteDelay(amt)
- if acceptorResp.CSVDelay != 0 {
- remoteCsvDelay = acceptorResp.CSVDelay
- }
-
- chanReserve := f.cfg.RequiredRemoteChanReserve(amt, msg.DustLimit)
- if acceptorResp.Reserve != 0 {
- chanReserve = acceptorResp.Reserve
- }
-
- remoteMaxValue := f.cfg.RequiredRemoteMaxValue(amt)
- if acceptorResp.InFlightTotal != 0 {
- remoteMaxValue = acceptorResp.InFlightTotal
- }
-
- maxHtlcs := f.cfg.RequiredRemoteMaxHTLCs(amt)
- if acceptorResp.HtlcLimit != 0 {
- maxHtlcs = acceptorResp.HtlcLimit
- }
-
- // Default to our default minimum hltc value, replacing it with the
- // channel acceptor's value if it is set.
- minHtlc := f.cfg.DefaultMinHtlcIn
- if acceptorResp.MinHtlcIn != 0 {
- minHtlc = acceptorResp.MinHtlcIn
- }
-
- // Once the reservation has been created successfully, we add it to
- // this peer's map of pending reservations to track this particular
- // reservation until either abort or completion.
- f.resMtx.Lock()
- if _, ok := f.activeReservations[peerIDKey]; !ok {
- f.activeReservations[peerIDKey] = make(pendingChannels)
- }
- resCtx := &reservationWithCtx{
- reservation: reservation,
- chanAmt: amt,
- remoteCsvDelay: remoteCsvDelay,
- remoteMinHtlc: minHtlc,
- remoteMaxValue: remoteMaxValue,
- remoteMaxHtlcs: maxHtlcs,
- maxLocalCsv: f.cfg.MaxLocalCSVDelay,
- err: make(chan er.R, 1),
- peer: peer,
- }
- f.activeReservations[peerIDKey][msg.PendingChannelID] = resCtx
- f.resMtx.Unlock()
-
- // Update the timestamp once the fundingOpenMsg has been handled.
- defer resCtx.updateTimestamp()
-
- // With our parameters set, we'll now process their contribution so we
- // can move the funding workflow ahead.
- remoteContribution := &lnwallet.ChannelContribution{
- FundingAmount: amt,
- FirstCommitmentPoint: msg.FirstCommitmentPoint,
- ChannelConfig: &channeldb.ChannelConfig{
- ChannelConstraints: channeldb.ChannelConstraints{
- DustLimit: msg.DustLimit,
- MaxPendingAmount: remoteMaxValue,
- ChanReserve: chanReserve,
- MinHTLC: minHtlc,
- MaxAcceptedHtlcs: maxHtlcs,
- CsvDelay: remoteCsvDelay,
- },
- MultiSigKey: keychain.KeyDescriptor{
- PubKey: copyPubKey(msg.FundingKey),
- },
- RevocationBasePoint: keychain.KeyDescriptor{
- PubKey: copyPubKey(msg.RevocationPoint),
- },
- PaymentBasePoint: keychain.KeyDescriptor{
- PubKey: copyPubKey(msg.PaymentPoint),
- },
- DelayBasePoint: keychain.KeyDescriptor{
- PubKey: copyPubKey(msg.DelayedPaymentPoint),
- },
- HtlcBasePoint: keychain.KeyDescriptor{
- PubKey: copyPubKey(msg.HtlcPoint),
- },
- },
- UpfrontShutdown: msg.UpfrontShutdownScript,
- }
- errr = reservation.ProcessSingleContribution(remoteContribution)
- if errr != nil {
- log.Errorf("unable to add contribution reservation: %v", errr)
- f.failFundingFlow(peer, msg.PendingChannelID, errr)
- return
- }
-
- log.Infof("Sending fundingResp for pending_id(%x)",
- msg.PendingChannelID)
- log.Debugf("Remote party accepted commitment constraints: %v",
- spew.Sdump(remoteContribution.ChannelConfig.ChannelConstraints))
-
- // With the initiator's contribution recorded, respond with our
- // contribution in the next message of the workflow.
- ourContribution := reservation.OurContribution()
- fundingAccept := lnwire.AcceptChannel{
- PendingChannelID: msg.PendingChannelID,
- DustLimit: ourContribution.DustLimit,
- MaxValueInFlight: remoteMaxValue,
- ChannelReserve: chanReserve,
- MinAcceptDepth: uint32(numConfsReq),
- HtlcMinimum: minHtlc,
- CsvDelay: remoteCsvDelay,
- MaxAcceptedHTLCs: maxHtlcs,
- FundingKey: ourContribution.MultiSigKey.PubKey,
- RevocationPoint: ourContribution.RevocationBasePoint.PubKey,
- PaymentPoint: ourContribution.PaymentBasePoint.PubKey,
- DelayedPaymentPoint: ourContribution.DelayBasePoint.PubKey,
- HtlcPoint: ourContribution.HtlcBasePoint.PubKey,
- FirstCommitmentPoint: ourContribution.FirstCommitmentPoint,
- UpfrontShutdownScript: ourContribution.UpfrontShutdown,
- }
-
- if err := peer.SendMessage(true, &fundingAccept); err != nil {
- log.Errorf("unable to send funding response to peer: %v", err)
- f.failFundingFlow(peer, msg.PendingChannelID, err)
- return
- }
-}
-
-// handleFundingAccept processes a response to the workflow initiation sent by
-// the remote peer. This message then queues a message with the funding
-// outpoint, and a commitment signature to the remote peer.
-func (f *fundingManager) handleFundingAccept(peer lnpeer.Peer,
- msg *lnwire.AcceptChannel) {
-
- pendingChanID := msg.PendingChannelID
- peerKey := peer.IdentityKey()
-
- resCtx, err := f.getReservationCtx(peerKey, pendingChanID)
- if err != nil {
- log.Warnf("Can't find reservation (peerKey:%v, chan_id:%v)",
- peerKey, pendingChanID)
- return
- }
-
- // Update the timestamp once the fundingAcceptMsg has been handled.
- defer resCtx.updateTimestamp()
-
- log.Infof("Recv'd fundingResponse for pending_id(%x)",
- pendingChanID[:])
-
- // The required number of confirmations should not be greater than the
- // maximum number of confirmations required by the ChainNotifier to
- // properly dispatch confirmations.
- if msg.MinAcceptDepth > chainntnfs.MaxNumConfs {
- err := lnwallet.ErrNumConfsTooLarge(
- msg.MinAcceptDepth, chainntnfs.MaxNumConfs,
- )
- log.Warnf("Unacceptable channel constraints: %v", err)
- f.failFundingFlow(peer, msg.PendingChannelID, err)
- return
- }
-
- // We'll also specify the responder's preference for the number of
- // required confirmations, and also the set of channel constraints
- // they've specified for commitment states we can create.
- resCtx.reservation.SetNumConfsRequired(uint16(msg.MinAcceptDepth))
- channelConstraints := &channeldb.ChannelConstraints{
- DustLimit: msg.DustLimit,
- ChanReserve: msg.ChannelReserve,
- MaxPendingAmount: msg.MaxValueInFlight,
- MinHTLC: msg.HtlcMinimum,
- MaxAcceptedHtlcs: msg.MaxAcceptedHTLCs,
- CsvDelay: msg.CsvDelay,
- }
- err = resCtx.reservation.CommitConstraints(
- channelConstraints, resCtx.maxLocalCsv,
- )
- if err != nil {
- log.Warnf("Unacceptable channel constraints: %v", err)
- f.failFundingFlow(peer, msg.PendingChannelID, err)
- return
- }
-
- // As they've accepted our channel constraints, we'll regenerate them
- // here so we can properly commit their accepted constraints to the
- // reservation.
- chanReserve := f.cfg.RequiredRemoteChanReserve(resCtx.chanAmt, msg.DustLimit)
-
- // The remote node has responded with their portion of the channel
- // contribution. At this point, we can process their contribution which
- // allows us to construct and sign both the commitment transaction, and
- // the funding transaction.
- remoteContribution := &lnwallet.ChannelContribution{
- FirstCommitmentPoint: msg.FirstCommitmentPoint,
- ChannelConfig: &channeldb.ChannelConfig{
- ChannelConstraints: channeldb.ChannelConstraints{
- DustLimit: msg.DustLimit,
- MaxPendingAmount: resCtx.remoteMaxValue,
- ChanReserve: chanReserve,
- MinHTLC: resCtx.remoteMinHtlc,
- MaxAcceptedHtlcs: resCtx.remoteMaxHtlcs,
- CsvDelay: resCtx.remoteCsvDelay,
- },
- MultiSigKey: keychain.KeyDescriptor{
- PubKey: copyPubKey(msg.FundingKey),
- },
- RevocationBasePoint: keychain.KeyDescriptor{
- PubKey: copyPubKey(msg.RevocationPoint),
- },
- PaymentBasePoint: keychain.KeyDescriptor{
- PubKey: copyPubKey(msg.PaymentPoint),
- },
- DelayBasePoint: keychain.KeyDescriptor{
- PubKey: copyPubKey(msg.DelayedPaymentPoint),
- },
- HtlcBasePoint: keychain.KeyDescriptor{
- PubKey: copyPubKey(msg.HtlcPoint),
- },
- },
- UpfrontShutdown: msg.UpfrontShutdownScript,
- }
- err = resCtx.reservation.ProcessContribution(remoteContribution)
-
- // The wallet has detected that a PSBT funding process was requested by
- // the user and has halted the funding process after negotiating the
- // multisig keys. We now have everything that is needed for the user to
- // start constructing a PSBT that sends to the multisig funding address.
- var psbtIntent *chanfunding.PsbtIntent
- errr := er.Wrapped(err)
- if psbtErr, ok := errr.(*lnwallet.PsbtFundingRequired); ok {
- // Return the information that is needed by the user to
- // construct the PSBT back to the caller.
- addr, amt, packet, err := psbtErr.Intent.FundingParams()
- if err != nil {
- log.Errorf("Unable to process PSBT funding params "+
- "for contribution from %v: %v", peerKey, err)
- f.failFundingFlow(peer, msg.PendingChannelID, err)
- return
- }
- var buf bytes.Buffer
- err = packet.Serialize(&buf)
- if err != nil {
- log.Errorf("Unable to serialize PSBT for "+
- "contribution from %v: %v", peerKey, err)
- f.failFundingFlow(peer, msg.PendingChannelID, err)
- return
- }
- resCtx.updates <- &lnrpc.OpenStatusUpdate{
- PendingChanId: pendingChanID[:],
- Update: &lnrpc.OpenStatusUpdate_PsbtFund{
- PsbtFund: &lnrpc.ReadyForPsbtFunding{
- FundingAddress: addr.EncodeAddress(),
- FundingAmount: amt,
- Psbt: buf.Bytes(),
- },
- },
- }
- psbtIntent = psbtErr.Intent
- } else if err != nil {
- log.Errorf("Unable to process contribution from %v: %v",
- peerKey, err)
- f.failFundingFlow(peer, msg.PendingChannelID, err)
- return
- }
-
- log.Infof("pendingChan(%x): remote party proposes num_confs=%v, "+
- "csv_delay=%v", pendingChanID[:], msg.MinAcceptDepth, msg.CsvDelay)
- log.Debugf("Remote party accepted commitment constraints: %v",
- spew.Sdump(remoteContribution.ChannelConfig.ChannelConstraints))
-
- // If the user requested funding through a PSBT, we cannot directly
- // continue now and need to wait for the fully funded and signed PSBT
- // to arrive. To not block any other channels from opening, we wait in
- // a separate goroutine.
- if psbtIntent != nil {
- f.wg.Add(1)
- go func() {
- defer f.wg.Done()
- f.waitForPsbt(psbtIntent, resCtx, pendingChanID)
- }()
-
- // With the new goroutine spawned, we can now exit to unblock
- // the main event loop.
- return
- }
-
- // In a normal, non-PSBT funding flow, we can jump directly to the next
- // step where we expect our contribution to be finalized.
- f.continueFundingAccept(resCtx, pendingChanID)
-}
-
-// waitForPsbt blocks until either a signed PSBT arrives, an error occurs or
-// the funding manager shuts down. In the case of a valid PSBT, the funding flow
-// is continued.
-//
-// NOTE: This method must be called as a goroutine.
-func (f *fundingManager) waitForPsbt(intent *chanfunding.PsbtIntent,
- resCtx *reservationWithCtx, pendingChanID [32]byte) {
-
- // failFlow is a helper that logs an error message with the current
- // context and then fails the funding flow.
- peerKey := resCtx.peer.IdentityKey()
- failFlow := func(errMsg string, cause er.R) {
- log.Errorf("Unable to handle funding accept message "+
- "for peer_key=%x, pending_chan_id=%x: %s: %v",
- peerKey.SerializeCompressed(), pendingChanID, errMsg,
- cause)
- f.failFundingFlow(resCtx.peer, pendingChanID, cause)
- }
-
- // We'll now wait until the intent has received the final and complete
- // funding transaction. If the channel is closed without any error being
- // sent, we know everything's going as expected.
- select {
- case err := <-intent.PsbtReady:
- switch {
- // If the user canceled the funding reservation, we need to
- // inform the other peer about us canceling the reservation.
- case chanfunding.ErrUserCanceled.Is(err):
- failFlow("aborting PSBT flow", err)
- return
-
- // If the remote canceled the funding reservation, we don't need
- // to send another fail message. But we want to inform the user
- // about what happened.
- case chanfunding.ErrRemoteCanceled.Is(err):
- log.Infof("Remote canceled, aborting PSBT flow "+
- "for peer_key=%x, pending_chan_id=%x",
- peerKey.SerializeCompressed(), pendingChanID)
- return
-
- // Nil error means the flow continues normally now.
- case nil == err:
-
- // For any other error, we'll fail the funding flow.
- default:
- failFlow("error waiting for PSBT flow", err)
- return
- }
-
- // A non-nil error means we can continue the funding flow.
- // Notify the wallet so it can prepare everything we need to
- // continue.
- err = resCtx.reservation.ProcessPsbt()
- if err != nil {
- failFlow("error continuing PSBT flow", err)
- return
- }
-
- // We are now ready to continue the funding flow.
- f.continueFundingAccept(resCtx, pendingChanID)
-
- // Handle a server shutdown as well because the reservation won't
- // survive a restart as it's in memory only.
- case <-f.quit:
- log.Errorf("Unable to handle funding accept message "+
- "for peer_key=%x, pending_chan_id=%x: funding manager "+
- "shutting down", peerKey.SerializeCompressed(),
- pendingChanID)
- return
- }
-}
-
-// continueFundingAccept continues the channel funding flow once our
-// contribution is finalized, the channel output is known and the funding
-// transaction is signed.
-func (f *fundingManager) continueFundingAccept(resCtx *reservationWithCtx,
- pendingChanID [32]byte) {
-
- // Now that we have their contribution, we can extract, then send over
- // both the funding out point and our signature for their version of
- // the commitment transaction to the remote peer.
- outPoint := resCtx.reservation.FundingOutpoint()
- _, sig := resCtx.reservation.OurSignatures()
-
- // A new channel has almost finished the funding process. In order to
- // properly synchronize with the writeHandler goroutine, we add a new
- // channel to the barriers map which will be closed once the channel is
- // fully open.
- f.barrierMtx.Lock()
- channelID := lnwire.NewChanIDFromOutPoint(outPoint)
- log.Debugf("Creating chan barrier for ChanID(%v)", channelID)
- f.newChanBarriers[channelID] = make(chan struct{})
- f.barrierMtx.Unlock()
-
- // The next message that advances the funding flow will reference the
- // channel via its permanent channel ID, so we'll set up this mapping
- // so we can retrieve the reservation context once we get the
- // FundingSigned message.
- f.resMtx.Lock()
- f.signedReservations[channelID] = pendingChanID
- f.resMtx.Unlock()
-
- log.Infof("Generated ChannelPoint(%v) for pending_id(%x)", outPoint,
- pendingChanID[:])
-
- var err er.R
- fundingCreated := &lnwire.FundingCreated{
- PendingChannelID: pendingChanID,
- FundingPoint: *outPoint,
- }
- fundingCreated.CommitSig, err = lnwire.NewSigFromSignature(sig)
- if err != nil {
- log.Errorf("Unable to parse signature: %v", err)
- f.failFundingFlow(resCtx.peer, pendingChanID, err)
- return
- }
- if err := resCtx.peer.SendMessage(true, fundingCreated); err != nil {
- log.Errorf("Unable to send funding complete message: %v", err)
- f.failFundingFlow(resCtx.peer, pendingChanID, err)
- return
- }
-}
-
-// handleFundingCreated progresses the funding workflow when the daemon is on
-// the responding side of a single funder workflow. Once this message has been
-// processed, a signature is sent to the remote peer allowing it to broadcast
-// the funding transaction, progressing the workflow into the final stage.
-func (f *fundingManager) handleFundingCreated(peer lnpeer.Peer,
- msg *lnwire.FundingCreated) {
-
- peerKey := peer.IdentityKey()
- pendingChanID := msg.PendingChannelID
-
- resCtx, err := f.getReservationCtx(peerKey, pendingChanID)
- if err != nil {
- log.Warnf("can't find reservation (peer_id:%v, chan_id:%x)",
- peerKey, pendingChanID[:])
- return
- }
-
- // The channel initiator has responded with the funding outpoint of the
- // final funding transaction, as well as a signature for our version of
- // the commitment transaction. So at this point, we can validate the
- // initiator's commitment transaction, then send our own if it's valid.
- // TODO(roasbeef): make case (p vs P) consistent throughout
- fundingOut := msg.FundingPoint
- log.Infof("completing pending_id(%x) with ChannelPoint(%v)",
- pendingChanID[:], fundingOut)
-
- commitSig, err := msg.CommitSig.ToSignature()
- if err != nil {
- log.Errorf("unable to parse signature: %v", err)
- f.failFundingFlow(peer, pendingChanID, err)
- return
- }
-
- // With all the necessary data available, attempt to advance the
- // funding workflow to the next stage. If this succeeds then the
- // funding transaction will broadcast after our next message.
- // CompleteReservationSingle will also mark the channel as 'IsPending'
- // in the database.
- completeChan, err := resCtx.reservation.CompleteReservationSingle(
- &fundingOut, commitSig,
- )
- if err != nil {
- // TODO(roasbeef): better error logging: peerID, channelID, etc.
- log.Errorf("unable to complete single reservation: %v", err)
- f.failFundingFlow(peer, pendingChanID, err)
- return
- }
-
- // The channel is marked IsPending in the database, and can be removed
- // from the set of active reservations.
- f.deleteReservationCtx(peerKey, msg.PendingChannelID)
-
- // If something goes wrong before the funding transaction is confirmed,
- // we use this convenience method to delete the pending OpenChannel
- // from the database.
- deleteFromDatabase := func() {
- localBalance := completeChan.LocalCommitment.LocalBalance.ToSatoshis()
- closeInfo := &channeldb.ChannelCloseSummary{
- ChanPoint: completeChan.FundingOutpoint,
- ChainHash: completeChan.ChainHash,
- RemotePub: completeChan.IdentityPub,
- CloseType: channeldb.FundingCanceled,
- Capacity: completeChan.Capacity,
- SettledBalance: localBalance,
- RemoteCurrentRevocation: completeChan.RemoteCurrentRevocation,
- RemoteNextRevocation: completeChan.RemoteNextRevocation,
- LocalChanConfig: completeChan.LocalChanCfg,
- }
-
- // Close the channel with us as the initiator because we are
- // deciding to exit the funding flow due to an internal error.
- if err := completeChan.CloseChannel(
- closeInfo, channeldb.ChanStatusLocalCloseInitiator,
- ); err != nil {
- log.Errorf("Failed closing channel %v: %v",
- completeChan.FundingOutpoint, err)
- }
- }
-
- // A new channel has almost finished the funding process. In order to
- // properly synchronize with the writeHandler goroutine, we add a new
- // channel to the barriers map which will be closed once the channel is
- // fully open.
- f.barrierMtx.Lock()
- channelID := lnwire.NewChanIDFromOutPoint(&fundingOut)
- log.Debugf("Creating chan barrier for ChanID(%v)", channelID)
- f.newChanBarriers[channelID] = make(chan struct{})
- f.barrierMtx.Unlock()
-
- log.Infof("sending FundingSigned for pending_id(%x) over "+
- "ChannelPoint(%v)", pendingChanID[:], fundingOut)
-
- // With their signature for our version of the commitment transaction
- // verified, we can now send over our signature to the remote peer.
- _, sig := resCtx.reservation.OurSignatures()
- ourCommitSig, err := lnwire.NewSigFromSignature(sig)
- if err != nil {
- log.Errorf("unable to parse signature: %v", err)
- f.failFundingFlow(peer, pendingChanID, err)
- deleteFromDatabase()
- return
- }
-
- fundingSigned := &lnwire.FundingSigned{
- ChanID: channelID,
- CommitSig: ourCommitSig,
- }
- if err := peer.SendMessage(true, fundingSigned); err != nil {
- log.Errorf("unable to send FundingSigned message: %v", err)
- f.failFundingFlow(peer, pendingChanID, err)
- deleteFromDatabase()
- return
- }
-
- // Now that we've sent over our final signature for this channel, we'll
- // send it to the ChainArbitrator so it can watch for any on-chain
- // actions during this final confirmation stage.
- if err := f.cfg.WatchNewChannel(completeChan, peerKey); err != nil {
- log.Errorf("Unable to send new ChannelPoint(%v) for "+
- "arbitration: %v", fundingOut, err)
- }
-
- // Create an entry in the local discovery map so we can ensure that we
- // process the channel confirmation fully before we receive a funding
- // locked message.
- f.localDiscoveryMtx.Lock()
- f.localDiscoverySignals[channelID] = make(chan struct{})
- f.localDiscoveryMtx.Unlock()
-
- // Inform the ChannelNotifier that the channel has entered
- // pending open state.
- f.cfg.NotifyPendingOpenChannelEvent(fundingOut, completeChan)
-
- // At this point we have sent our last funding message to the
- // initiating peer before the funding transaction will be broadcast.
- // With this last message, our job as the responder is now complete.
- // We'll wait for the funding transaction to reach the specified number
- // of confirmations, then start normal operations.
- //
- // When we get to this point we have sent the signComplete message to
- // the channel funder, and BOLT#2 specifies that we MUST remember the
- // channel for reconnection. The channel is already marked
- // as pending in the database, so in case of a disconnect or restart,
- // we will continue waiting for the confirmation the next time we start
- // the funding manager. In case the funding transaction never appears
- // on the blockchain, we must forget this channel. We therefore
- // completely forget about this channel if we haven't seen the funding
- // transaction in 288 blocks (~ 48 hrs), by canceling the reservation
- // and canceling the wait for the funding confirmation.
- f.wg.Add(1)
- go f.advanceFundingState(completeChan, pendingChanID, nil)
-}
-
-// handleFundingSigned processes the final message received in a single funder
-// workflow. Once this message is processed, the funding transaction is
-// broadcast. Once the funding transaction reaches a sufficient number of
-// confirmations, a message is sent to the responding peer along with a compact
-// encoding of the location of the channel within the blockchain.
-func (f *fundingManager) handleFundingSigned(peer lnpeer.Peer,
- msg *lnwire.FundingSigned) {
-
- // As the funding signed message will reference the reservation by its
- // permanent channel ID, we'll need to perform an intermediate look up
- // before we can obtain the reservation.
- f.resMtx.Lock()
- pendingChanID, ok := f.signedReservations[msg.ChanID]
- delete(f.signedReservations, msg.ChanID)
- f.resMtx.Unlock()
- if !ok {
- err := er.Errorf("unable to find signed reservation for "+
- "chan_id=%x", msg.ChanID)
- log.Warnf(err.String())
- f.failFundingFlow(peer, msg.ChanID, err)
- return
- }
-
- peerKey := peer.IdentityKey()
- resCtx, err := f.getReservationCtx(peerKey, pendingChanID)
- if err != nil {
- log.Warnf("Unable to find reservation (peer_id:%v, "+
- "chan_id:%x)", peerKey, pendingChanID[:])
- // TODO: add ErrChanNotFound?
- f.failFundingFlow(peer, pendingChanID, err)
- return
- }
-
- // Create an entry in the local discovery map so we can ensure that we
- // process the channel confirmation fully before we receive a funding
- // locked message.
- fundingPoint := resCtx.reservation.FundingOutpoint()
- permChanID := lnwire.NewChanIDFromOutPoint(fundingPoint)
- f.localDiscoveryMtx.Lock()
- f.localDiscoverySignals[permChanID] = make(chan struct{})
- f.localDiscoveryMtx.Unlock()
-
- // The remote peer has responded with a signature for our commitment
- // transaction. We'll verify the signature for validity, then commit
- // the state to disk as we can now open the channel.
- commitSig, err := msg.CommitSig.ToSignature()
- if err != nil {
- log.Errorf("Unable to parse signature: %v", err)
- f.failFundingFlow(peer, pendingChanID, err)
- return
- }
-
- completeChan, err := resCtx.reservation.CompleteReservation(
- nil, commitSig,
- )
- if err != nil {
- log.Errorf("Unable to complete reservation sign "+
- "complete: %v", err)
- f.failFundingFlow(peer, pendingChanID, err)
- return
- }
-
- // The channel is now marked IsPending in the database, and we can
- // delete it from our set of active reservations.
- f.deleteReservationCtx(peerKey, pendingChanID)
-
- // Broadcast the finalized funding transaction to the network, but only
- // if we actually have the funding transaction.
- if completeChan.ChanType.HasFundingTx() {
- fundingTx := completeChan.FundingTxn
- var fundingTxBuf bytes.Buffer
- if err := fundingTx.Serialize(&fundingTxBuf); err != nil {
- log.Errorf("Unable to serialize funding "+
- "transaction %v: %v", fundingTx.TxHash(), err)
-
- // Clear the buffer of any bytes that were written
- // before the serialization error to prevent logging an
- // incomplete transaction.
- fundingTxBuf.Reset()
- }
-
- log.Infof("Broadcasting funding tx for ChannelPoint(%v): %x",
- completeChan.FundingOutpoint, fundingTxBuf.Bytes())
-
- // Set a nil short channel ID at this stage because we do not
- // know it until our funding tx confirms.
- label := labels.MakeLabel(
- labels.LabelTypeChannelOpen, nil,
- )
-
- err = f.cfg.PublishTransaction(fundingTx, label)
- if err != nil {
- log.Errorf("Unable to broadcast funding tx %x for "+
- "ChannelPoint(%v): %v", fundingTxBuf.Bytes(),
- completeChan.FundingOutpoint, err)
-
- // We failed to broadcast the funding transaction, but
- // watch the channel regardless, in case the
- // transaction made it to the network. We will retry
- // broadcast at startup.
- //
- // TODO(halseth): retry more often? Handle with CPFP?
- // Just delete from the DB?
- }
- }
-
- // Now that we have a finalized reservation for this funding flow,
- // we'll send the to be active channel to the ChainArbitrator so it can
- // watch for any on-chain actions before the channel has fully
- // confirmed.
- if err := f.cfg.WatchNewChannel(completeChan, peerKey); err != nil {
- log.Errorf("Unable to send new ChannelPoint(%v) for "+
- "arbitration: %v", fundingPoint, err)
- }
-
- log.Infof("Finalizing pending_id(%x) over ChannelPoint(%v), "+
- "waiting for channel open on-chain", pendingChanID[:],
- fundingPoint)
-
- // Send an update to the upstream client that the negotiation process
- // is over.
- //
- // TODO(roasbeef): add abstraction over updates to accommodate
- // long-polling, or SSE, etc.
- upd := &lnrpc.OpenStatusUpdate{
- Update: &lnrpc.OpenStatusUpdate_ChanPending{
- ChanPending: &lnrpc.PendingUpdate{
- Txid: fundingPoint.Hash[:],
- OutputIndex: fundingPoint.Index,
- },
- },
- PendingChanId: pendingChanID[:],
- }
-
- select {
- case resCtx.updates <- upd:
- // Inform the ChannelNotifier that the channel has entered
- // pending open state.
- f.cfg.NotifyPendingOpenChannelEvent(*fundingPoint, completeChan)
- case <-f.quit:
- return
- }
-
- // At this point we have broadcast the funding transaction and done all
- // necessary processing.
- f.wg.Add(1)
- go f.advanceFundingState(completeChan, pendingChanID, resCtx.updates)
-}
-
-// confirmedChannel wraps a confirmed funding transaction, as well as the short
-// channel ID which identifies that channel into a single struct. We'll use
-// this to pass around the final state of a channel after it has been
-// confirmed.
-type confirmedChannel struct {
- // shortChanID expresses where in the block the funding transaction was
- // located.
- shortChanID lnwire.ShortChannelID
-
- // fundingTx is the funding transaction that created the channel.
- fundingTx *wire.MsgTx
-}
-
-// waitForFundingWithTimeout is a wrapper around waitForFundingConfirmation and
-// waitForTimeout that will return ErrConfirmationTimeout if we are not the
-// channel initiator and the maxWaitNumBlocksFundingConf has passed from the
-// funding broadcast height. In case of confirmation, the short channel ID of
-// the channel and the funding transaction will be returned.
-func (f *fundingManager) waitForFundingWithTimeout(
- ch *channeldb.OpenChannel) (*confirmedChannel, er.R) {
-
- confChan := make(chan *confirmedChannel)
- timeoutChan := make(chan er.R, 1)
- cancelChan := make(chan struct{})
-
- f.wg.Add(1)
- go f.waitForFundingConfirmation(ch, cancelChan, confChan)
-
- // If we are not the initiator, we have no money at stake and will
- // timeout waiting for the funding transaction to confirm after a
- // while.
- if !ch.IsInitiator {
- f.wg.Add(1)
- go f.waitForTimeout(ch, cancelChan, timeoutChan)
- }
- defer close(cancelChan)
-
- select {
- case err := <-timeoutChan:
- if err != nil {
- return nil, err
- }
- return nil, ErrConfirmationTimeout.Default()
-
- case <-f.quit:
- // The fundingManager is shutting down, and will resume wait on
- // startup.
- return nil, ErrFundingManagerShuttingDown.Default()
-
- case confirmedChannel, ok := <-confChan:
- if !ok {
- return nil, er.Errorf("waiting for funding" +
- "confirmation failed")
- }
- return confirmedChannel, nil
- }
-}
-
-// makeFundingScript re-creates the funding script for the funding transaction
-// of the target channel.
-func makeFundingScript(channel *channeldb.OpenChannel) ([]byte, er.R) {
- localKey := channel.LocalChanCfg.MultiSigKey.PubKey.SerializeCompressed()
- remoteKey := channel.RemoteChanCfg.MultiSigKey.PubKey.SerializeCompressed()
-
- multiSigScript, err := input.GenMultiSigScript(localKey, remoteKey)
- if err != nil {
- return nil, err
- }
-
- return input.WitnessScriptHash(multiSigScript)
-}
-
-// waitForFundingConfirmation handles the final stages of the channel funding
-// process once the funding transaction has been broadcast. The primary
-// function of waitForFundingConfirmation is to wait for blockchain
-// confirmation, and then to notify the other systems that must be notified
-// when a channel has become active for lightning transactions.
-// The wait can be canceled by closing the cancelChan. In case of success,
-// a *lnwire.ShortChannelID will be passed to confChan.
-//
-// NOTE: This MUST be run as a goroutine.
-func (f *fundingManager) waitForFundingConfirmation(
- completeChan *channeldb.OpenChannel, cancelChan <-chan struct{},
- confChan chan<- *confirmedChannel) {
-
- defer f.wg.Done()
- defer close(confChan)
-
- // Register with the ChainNotifier for a notification once the funding
- // transaction reaches `numConfs` confirmations.
- txid := completeChan.FundingOutpoint.Hash
- fundingScript, err := makeFundingScript(completeChan)
- if err != nil {
- log.Errorf("unable to create funding script for "+
- "ChannelPoint(%v): %v", completeChan.FundingOutpoint,
- err)
- return
- }
- numConfs := uint32(completeChan.NumConfsRequired)
- confNtfn, err := f.cfg.Notifier.RegisterConfirmationsNtfn(
- &txid, fundingScript, numConfs,
- completeChan.FundingBroadcastHeight,
- )
- if err != nil {
- log.Errorf("Unable to register for confirmation of "+
- "ChannelPoint(%v): %v", completeChan.FundingOutpoint,
- err)
- return
- }
-
- log.Infof("Waiting for funding tx (%v) to reach %v confirmations",
- txid, numConfs)
-
- var confDetails *chainntnfs.TxConfirmation
- var ok bool
-
- // Wait until the specified number of confirmations has been reached,
- // we get a cancel signal, or the wallet signals a shutdown.
- select {
- case confDetails, ok = <-confNtfn.Confirmed:
- // fallthrough
-
- case <-cancelChan:
- log.Warnf("canceled waiting for funding confirmation, "+
- "stopping funding flow for ChannelPoint(%v)",
- completeChan.FundingOutpoint)
- return
-
- case <-f.quit:
- log.Warnf("fundingManager shutting down, stopping funding "+
- "flow for ChannelPoint(%v)",
- completeChan.FundingOutpoint)
- return
- }
-
- if !ok {
- log.Warnf("ChainNotifier shutting down, cannot complete "+
- "funding flow for ChannelPoint(%v)",
- completeChan.FundingOutpoint)
- return
- }
-
- fundingPoint := completeChan.FundingOutpoint
- log.Infof("ChannelPoint(%v) is now active: ChannelID(%v)",
- fundingPoint, lnwire.NewChanIDFromOutPoint(&fundingPoint))
-
- // With the block height and the transaction index known, we can
- // construct the compact chanID which is used on the network to unique
- // identify channels.
- shortChanID := lnwire.ShortChannelID{
- BlockHeight: confDetails.BlockHeight,
- TxIndex: confDetails.TxIndex,
- TxPosition: uint16(fundingPoint.Index),
- }
-
- select {
- case confChan <- &confirmedChannel{
- shortChanID: shortChanID,
- fundingTx: confDetails.Tx,
- }:
- case <-f.quit:
- return
- }
-}
-
-// waitForTimeout will close the timeout channel if maxWaitNumBlocksFundingConf
-// has passed from the broadcast height of the given channel. In case of error,
-// the error is sent on timeoutChan. The wait can be canceled by closing the
-// cancelChan.
-//
-// NOTE: timeoutChan MUST be buffered.
-// NOTE: This MUST be run as a goroutine.
-func (f *fundingManager) waitForTimeout(completeChan *channeldb.OpenChannel,
- cancelChan <-chan struct{}, timeoutChan chan<- er.R) {
- defer f.wg.Done()
-
- epochClient, err := f.cfg.Notifier.RegisterBlockEpochNtfn(nil)
- if err != nil {
- timeoutChan <- er.Errorf("unable to register for epoch "+
- "notification: %v", err)
- return
- }
-
- defer epochClient.Cancel()
-
- // On block maxHeight we will cancel the funding confirmation wait.
- maxHeight := completeChan.FundingBroadcastHeight + maxWaitNumBlocksFundingConf
- for {
- select {
- case epoch, ok := <-epochClient.Epochs:
- if !ok {
- timeoutChan <- er.Errorf("epoch client " +
- "shutting down")
- return
- }
-
- // Close the timeout channel and exit if the block is
- // aboce the max height.
- if uint32(epoch.Height) >= maxHeight {
- log.Warnf("Waited for %v blocks without "+
- "seeing funding transaction confirmed,"+
- " cancelling.",
- maxWaitNumBlocksFundingConf)
-
- // Notify the caller of the timeout.
- close(timeoutChan)
- return
- }
-
- // TODO: If we are the channel initiator implement
- // a method for recovering the funds from the funding
- // transaction
-
- case <-cancelChan:
- return
-
- case <-f.quit:
- // The fundingManager is shutting down, will resume
- // waiting for the funding transaction on startup.
- return
- }
- }
-}
-
-// handleFundingConfirmation marks a channel as open in the database, and set
-// the channelOpeningState markedOpen. In addition it will report the now
-// decided short channel ID to the switch, and close the local discovery signal
-// for this channel.
-func (f *fundingManager) handleFundingConfirmation(
- completeChan *channeldb.OpenChannel,
- confChannel *confirmedChannel) er.R {
-
- fundingPoint := completeChan.FundingOutpoint
- chanID := lnwire.NewChanIDFromOutPoint(&fundingPoint)
-
- // TODO(roasbeef): ideally persistent state update for chan above
- // should be abstracted
-
- // Now that that the channel has been fully confirmed, we'll request
- // that the wallet fully verify this channel to ensure that it can be
- // used.
- err := f.cfg.Wallet.ValidateChannel(completeChan, confChannel.fundingTx)
- if err != nil {
- // TODO(roasbeef): delete chan state?
- return er.Errorf("unable to validate channel: %v", err)
- }
-
- // The funding transaction now being confirmed, we add this channel to
- // the fundingManager's internal persistent state machine that we use
- // to track the remaining process of the channel opening. This is
- // useful to resume the opening process in case of restarts. We set the
- // opening state before we mark the channel opened in the database,
- // such that we can receover from one of the db writes failing.
- err = f.saveChannelOpeningState(
- &fundingPoint, markedOpen, &confChannel.shortChanID,
- )
- if err != nil {
- return er.Errorf("error setting channel state to markedOpen: %v",
- err)
- }
-
- // Now that the channel has been fully confirmed and we successfully
- // saved the opening state, we'll mark it as open within the database.
- err = completeChan.MarkAsOpen(confChannel.shortChanID)
- if err != nil {
- return er.Errorf("error setting channel pending flag to false: "+
- "%v", err)
- }
-
- // Inform the ChannelNotifier that the channel has transitioned from
- // pending open to open.
- f.cfg.NotifyOpenChannelEvent(completeChan.FundingOutpoint)
-
- // As there might already be an active link in the switch with an
- // outdated short chan ID, we'll instruct the switch to load the updated
- // short chan id from disk.
- err = f.cfg.ReportShortChanID(fundingPoint)
- if err != nil {
- log.Errorf("unable to report short chan id: %v", err)
- }
-
- // If we opened the channel, and lnd's wallet published our funding tx
- // (which is not the case for some channels) then we update our
- // transaction label with our short channel ID, which is known now that
- // our funding transaction has confirmed. We do not label transactions
- // we did not publish, because our wallet has no knowledge of them.
- if completeChan.IsInitiator && completeChan.ChanType.HasFundingTx() {
- shortChanID := completeChan.ShortChanID()
- label := labels.MakeLabel(
- labels.LabelTypeChannelOpen, &shortChanID,
- )
-
- err = f.cfg.UpdateLabel(
- completeChan.FundingOutpoint.Hash, label,
- )
- if err != nil {
- log.Errorf("unable to update label: %v", err)
- }
- }
-
- // Close the discoverySignal channel, indicating to a separate
- // goroutine that the channel now is marked as open in the database
- // and that it is acceptable to process funding locked messages
- // from the peer.
- f.localDiscoveryMtx.Lock()
- if discoverySignal, ok := f.localDiscoverySignals[chanID]; ok {
- close(discoverySignal)
- }
- f.localDiscoveryMtx.Unlock()
-
- return nil
-}
-
-// sendFundingLocked creates and sends the fundingLocked message.
-// This should be called after the funding transaction has been confirmed,
-// and the channelState is 'markedOpen'.
-func (f *fundingManager) sendFundingLocked(
- completeChan *channeldb.OpenChannel, channel *lnwallet.LightningChannel,
- shortChanID *lnwire.ShortChannelID) er.R {
-
- chanID := lnwire.NewChanIDFromOutPoint(&completeChan.FundingOutpoint)
-
- var peerKey [33]byte
- copy(peerKey[:], completeChan.IdentityPub.SerializeCompressed())
-
- // Next, we'll send over the funding locked message which marks that we
- // consider the channel open by presenting the remote party with our
- // next revocation key. Without the revocation key, the remote party
- // will be unable to propose state transitions.
- nextRevocation, err := channel.NextRevocationKey()
- if err != nil {
- return er.Errorf("unable to create next revocation: %v", err)
- }
- fundingLockedMsg := lnwire.NewFundingLocked(chanID, nextRevocation)
-
- // If the peer has disconnected before we reach this point, we will need
- // to wait for him to come back online before sending the fundingLocked
- // message. This is special for fundingLocked, since failing to send any
- // of the previous messages in the funding flow just cancels the flow.
- // But now the funding transaction is confirmed, the channel is open
- // and we have to make sure the peer gets the fundingLocked message when
- // it comes back online. This is also crucial during restart of lnd,
- // where we might try to resend the fundingLocked message before the
- // server has had the time to connect to the peer. We keep trying to
- // send fundingLocked until we succeed, or the fundingManager is shut
- // down.
- for {
- connected := make(chan lnpeer.Peer, 1)
- f.cfg.NotifyWhenOnline(peerKey, connected)
-
- var peer lnpeer.Peer
- select {
- case peer = <-connected:
- case <-f.quit:
- return ErrFundingManagerShuttingDown.Default()
- }
-
- log.Infof("Peer(%x) is online, sending FundingLocked "+
- "for ChannelID(%v)", peerKey, chanID)
-
- if err := peer.SendMessage(true, fundingLockedMsg); err == nil {
- // Sending succeeded, we can break out and continue the
- // funding flow.
- break
- }
-
- log.Warnf("Unable to send fundingLocked to peer %x: %v. "+
- "Will retry when online", peerKey, err)
- }
-
- return nil
-}
-
-// addToRouterGraph sends a ChannelAnnouncement and a ChannelUpdate to the
-// gossiper so that the channel is added to the Router's internal graph.
-// These announcement messages are NOT broadcasted to the greater network,
-// only to the channel counter party. The proofs required to announce the
-// channel to the greater network will be created and sent in annAfterSixConfs.
-func (f *fundingManager) addToRouterGraph(completeChan *channeldb.OpenChannel,
- shortChanID *lnwire.ShortChannelID) er.R {
-
- chanID := lnwire.NewChanIDFromOutPoint(&completeChan.FundingOutpoint)
-
- // We'll obtain the min HTLC value we can forward in our direction, as
- // we'll use this value within our ChannelUpdate. This constraint is
- // originally set by the remote node, as it will be the one that will
- // need to determine the smallest HTLC it deems economically relevant.
- fwdMinHTLC := completeChan.LocalChanCfg.MinHTLC
-
- // We don't necessarily want to go as low as the remote party
- // allows. Check it against our default forwarding policy.
- if fwdMinHTLC < f.cfg.DefaultRoutingPolicy.MinHTLCOut {
- fwdMinHTLC = f.cfg.DefaultRoutingPolicy.MinHTLCOut
- }
-
- // We'll obtain the max HTLC value we can forward in our direction, as
- // we'll use this value within our ChannelUpdate. This value must be <=
- // channel capacity and <= the maximum in-flight msats set by the peer.
- fwdMaxHTLC := completeChan.LocalChanCfg.MaxPendingAmount
- capacityMSat := lnwire.NewMSatFromSatoshis(completeChan.Capacity)
- if fwdMaxHTLC > capacityMSat {
- fwdMaxHTLC = capacityMSat
- }
-
- ann, err := f.newChanAnnouncement(
- f.cfg.IDKey, completeChan.IdentityPub,
- completeChan.LocalChanCfg.MultiSigKey.PubKey,
- completeChan.RemoteChanCfg.MultiSigKey.PubKey, *shortChanID,
- chanID, fwdMinHTLC, fwdMaxHTLC,
- )
- if err != nil {
- return er.Errorf("error generating channel "+
- "announcement: %v", err)
- }
-
- // Send ChannelAnnouncement and ChannelUpdate to the gossiper to add
- // to the Router's topology.
- errChan := f.cfg.SendAnnouncement(
- ann.chanAnn, discovery.ChannelCapacity(completeChan.Capacity),
- discovery.ChannelPoint(completeChan.FundingOutpoint),
- )
- select {
- case err := <-errChan:
- if err != nil {
- if routing.IsError(er.Wrapped(err), routing.ErrOutdated,
- routing.ErrIgnored) {
- log.Debugf("Router rejected "+
- "ChannelAnnouncement: %v", err)
- } else {
- return er.Errorf("error sending channel "+
- "announcement: %v", err)
- }
- }
- case <-f.quit:
- return ErrFundingManagerShuttingDown.Default()
- }
-
- errChan = f.cfg.SendAnnouncement(ann.chanUpdateAnn)
- select {
- case err := <-errChan:
- if err != nil {
- if routing.IsError(er.Wrapped(err), routing.ErrOutdated,
- routing.ErrIgnored) {
- log.Debugf("Router rejected "+
- "ChannelUpdate: %v", err)
- } else {
- return er.Errorf("error sending channel "+
- "update: %v", err)
- }
- }
- case <-f.quit:
- return ErrFundingManagerShuttingDown.Default()
- }
-
- return nil
-}
-
-// annAfterSixConfs broadcasts the necessary channel announcement messages to
-// the network after 6 confs. Should be called after the fundingLocked message
-// is sent and the channel is added to the router graph (channelState is
-// 'addedToRouterGraph') and the channel is ready to be used. This is the last
-// step in the channel opening process, and the opening state will be deleted
-// from the database if successful.
-func (f *fundingManager) annAfterSixConfs(completeChan *channeldb.OpenChannel,
- shortChanID *lnwire.ShortChannelID) er.R {
-
- // If this channel is not meant to be announced to the greater network,
- // we'll only send our NodeAnnouncement to our counterparty to ensure we
- // don't leak any of our information.
- announceChan := completeChan.ChannelFlags&lnwire.FFAnnounceChannel != 0
- if !announceChan {
- log.Debugf("Will not announce private channel %v.",
- shortChanID.ToUint64())
-
- peerChan := make(chan lnpeer.Peer, 1)
-
- var peerKey [33]byte
- copy(peerKey[:], completeChan.IdentityPub.SerializeCompressed())
-
- f.cfg.NotifyWhenOnline(peerKey, peerChan)
-
- var peer lnpeer.Peer
- select {
- case peer = <-peerChan:
- case <-f.quit:
- return ErrFundingManagerShuttingDown.Default()
- }
-
- nodeAnn, err := f.cfg.CurrentNodeAnnouncement()
- if err != nil {
- return er.Errorf("unable to retrieve current node "+
- "announcement: %v", err)
- }
-
- chanID := lnwire.NewChanIDFromOutPoint(
- &completeChan.FundingOutpoint,
- )
- pubKey := peer.PubKey()
- log.Debugf("Sending our NodeAnnouncement for "+
- "ChannelID(%v) to %x", chanID, pubKey)
-
- // TODO(halseth): make reliable. If the peer is not online this
- // will fail, and the opening process will stop. Should instead
- // block here, waiting for the peer to come online.
- if err := peer.SendMessage(true, &nodeAnn); err != nil {
- return er.Errorf("unable to send node announcement "+
- "to peer %x: %v", pubKey, err)
- }
- } else {
- // Otherwise, we'll wait until the funding transaction has
- // reached 6 confirmations before announcing it.
- numConfs := uint32(completeChan.NumConfsRequired)
- if numConfs < 6 {
- numConfs = 6
- }
- txid := completeChan.FundingOutpoint.Hash
- log.Debugf("Will announce channel %v after ChannelPoint"+
- "(%v) has gotten %d confirmations",
- shortChanID.ToUint64(), completeChan.FundingOutpoint,
- numConfs)
-
- fundingScript, err := makeFundingScript(completeChan)
- if err != nil {
- return er.Errorf("unable to create funding script for "+
- "ChannelPoint(%v): %v",
- completeChan.FundingOutpoint, err)
- }
-
- // Register with the ChainNotifier for a notification once the
- // funding transaction reaches at least 6 confirmations.
- confNtfn, err := f.cfg.Notifier.RegisterConfirmationsNtfn(
- &txid, fundingScript, numConfs,
- completeChan.FundingBroadcastHeight,
- )
- if err != nil {
- return er.Errorf("unable to register for "+
- "confirmation of ChannelPoint(%v): %v",
- completeChan.FundingOutpoint, err)
- }
-
- // Wait until 6 confirmations has been reached or the wallet
- // signals a shutdown.
- select {
- case _, ok := <-confNtfn.Confirmed:
- if !ok {
- return er.Errorf("ChainNotifier shutting "+
- "down, cannot complete funding flow "+
- "for ChannelPoint(%v)",
- completeChan.FundingOutpoint)
- }
- // Fallthrough.
-
- case <-f.quit:
- return er.Errorf("%v, stopping funding flow for "+
- "ChannelPoint(%v)",
- ErrFundingManagerShuttingDown,
- completeChan.FundingOutpoint)
- }
-
- fundingPoint := completeChan.FundingOutpoint
- chanID := lnwire.NewChanIDFromOutPoint(&fundingPoint)
-
- log.Infof("Announcing ChannelPoint(%v), short_chan_id=%v",
- &fundingPoint, shortChanID)
-
- // Create and broadcast the proofs required to make this channel
- // public and usable for other nodes for routing.
- err = f.announceChannel(
- f.cfg.IDKey, completeChan.IdentityPub,
- completeChan.LocalChanCfg.MultiSigKey.PubKey,
- completeChan.RemoteChanCfg.MultiSigKey.PubKey,
- *shortChanID, chanID,
- )
- if err != nil {
- return er.Errorf("channel announcement failed: %v", err)
- }
-
- log.Debugf("Channel with ChannelPoint(%v), short_chan_id=%v "+
- "announced", &fundingPoint, shortChanID)
- }
-
- return nil
-}
-
-// handleFundingLocked finalizes the channel funding process and enables the
-// channel to enter normal operating mode.
-func (f *fundingManager) handleFundingLocked(peer lnpeer.Peer,
- msg *lnwire.FundingLocked) {
-
- defer f.wg.Done()
- log.Debugf("Received FundingLocked for ChannelID(%v) from "+
- "peer %x", msg.ChanID,
- peer.IdentityKey().SerializeCompressed())
-
- // If we are currently in the process of handling a funding locked
- // message for this channel, ignore.
- f.handleFundingLockedMtx.Lock()
- _, ok := f.handleFundingLockedBarriers[msg.ChanID]
- if ok {
- log.Infof("Already handling fundingLocked for "+
- "ChannelID(%v), ignoring.", msg.ChanID)
- f.handleFundingLockedMtx.Unlock()
- return
- }
-
- // If not already handling fundingLocked for this channel, set up
- // barrier, and move on.
- f.handleFundingLockedBarriers[msg.ChanID] = struct{}{}
- f.handleFundingLockedMtx.Unlock()
-
- defer func() {
- f.handleFundingLockedMtx.Lock()
- delete(f.handleFundingLockedBarriers, msg.ChanID)
- f.handleFundingLockedMtx.Unlock()
- }()
-
- f.localDiscoveryMtx.Lock()
- localDiscoverySignal, ok := f.localDiscoverySignals[msg.ChanID]
- f.localDiscoveryMtx.Unlock()
-
- if ok {
- // Before we proceed with processing the funding locked
- // message, we'll wait for the local waitForFundingConfirmation
- // goroutine to signal that it has the necessary state in
- // place. Otherwise, we may be missing critical information
- // required to handle forwarded HTLC's.
- select {
- case <-localDiscoverySignal:
- // Fallthrough
- case <-f.quit:
- return
- }
-
- // With the signal received, we can now safely delete the entry
- // from the map.
- f.localDiscoveryMtx.Lock()
- delete(f.localDiscoverySignals, msg.ChanID)
- f.localDiscoveryMtx.Unlock()
- }
-
- // First, we'll attempt to locate the channel whose funding workflow is
- // being finalized by this message. We go to the database rather than
- // our reservation map as we may have restarted, mid funding flow.
- chanID := msg.ChanID
- channel, err := f.cfg.FindChannel(chanID)
- if err != nil {
- log.Errorf("Unable to locate ChannelID(%v), cannot complete "+
- "funding", chanID)
- return
- }
-
- // If the RemoteNextRevocation is non-nil, it means that we have
- // already processed fundingLocked for this channel, so ignore.
- if channel.RemoteNextRevocation != nil {
- log.Infof("Received duplicate fundingLocked for "+
- "ChannelID(%v), ignoring.", chanID)
- return
- }
-
- // The funding locked message contains the next commitment point we'll
- // need to create the next commitment state for the remote party. So
- // we'll insert that into the channel now before passing it along to
- // other sub-systems.
- err = channel.InsertNextRevocation(msg.NextPerCommitmentPoint)
- if err != nil {
- log.Errorf("unable to insert next commitment point: %v", err)
- return
- }
-
- // Launch a defer so we _ensure_ that the channel barrier is properly
- // closed even if the target peer is no longer online at this point.
- defer func() {
- // Close the active channel barrier signaling the readHandler
- // that commitment related modifications to this channel can
- // now proceed.
- f.barrierMtx.Lock()
- chanBarrier, ok := f.newChanBarriers[chanID]
- if ok {
- log.Tracef("Closing chan barrier for ChanID(%v)",
- chanID)
- close(chanBarrier)
- delete(f.newChanBarriers, chanID)
- }
- f.barrierMtx.Unlock()
- }()
-
- if err := peer.AddNewChannel(channel, f.quit); err != nil {
- log.Errorf("Unable to add new channel %v with peer %x: %v",
- channel.FundingOutpoint,
- peer.IdentityKey().SerializeCompressed(), err,
- )
- }
-}
-
-// chanAnnouncement encapsulates the two authenticated announcements that we
-// send out to the network after a new channel has been created locally.
-type chanAnnouncement struct {
- chanAnn *lnwire.ChannelAnnouncement
- chanUpdateAnn *lnwire.ChannelUpdate
- chanProof *lnwire.AnnounceSignatures
-}
-
-// newChanAnnouncement creates the authenticated channel announcement messages
-// required to broadcast a newly created channel to the network. The
-// announcement is two part: the first part authenticates the existence of the
-// channel and contains four signatures binding the funding pub keys and
-// identity pub keys of both parties to the channel, and the second segment is
-// authenticated only by us and contains our directional routing policy for the
-// channel.
-func (f *fundingManager) newChanAnnouncement(localPubKey, remotePubKey,
- localFundingKey, remoteFundingKey *btcec.PublicKey,
- shortChanID lnwire.ShortChannelID, chanID lnwire.ChannelID,
- fwdMinHTLC, fwdMaxHTLC lnwire.MilliSatoshi) (*chanAnnouncement, er.R) {
-
- chainHash := *f.cfg.Wallet.Cfg.NetParams.GenesisHash
-
- // The unconditional section of the announcement is the ShortChannelID
- // itself which compactly encodes the location of the funding output
- // within the blockchain.
- chanAnn := &lnwire.ChannelAnnouncement{
- ShortChannelID: shortChanID,
- Features: lnwire.NewRawFeatureVector(),
- ChainHash: chainHash,
- }
-
- // The chanFlags field indicates which directed edge of the channel is
- // being updated within the ChannelUpdateAnnouncement announcement
- // below. A value of zero means it's the edge of the "first" node and 1
- // being the other node.
- var chanFlags lnwire.ChanUpdateChanFlags
-
- // The lexicographical ordering of the two identity public keys of the
- // nodes indicates which of the nodes is "first". If our serialized
- // identity key is lower than theirs then we're the "first" node and
- // second otherwise.
- selfBytes := localPubKey.SerializeCompressed()
- remoteBytes := remotePubKey.SerializeCompressed()
- if bytes.Compare(selfBytes, remoteBytes) == -1 {
- copy(chanAnn.NodeID1[:], localPubKey.SerializeCompressed())
- copy(chanAnn.NodeID2[:], remotePubKey.SerializeCompressed())
- copy(chanAnn.BitcoinKey1[:], localFundingKey.SerializeCompressed())
- copy(chanAnn.BitcoinKey2[:], remoteFundingKey.SerializeCompressed())
-
- // If we're the first node then update the chanFlags to
- // indicate the "direction" of the update.
- chanFlags = 0
- } else {
- copy(chanAnn.NodeID1[:], remotePubKey.SerializeCompressed())
- copy(chanAnn.NodeID2[:], localPubKey.SerializeCompressed())
- copy(chanAnn.BitcoinKey1[:], remoteFundingKey.SerializeCompressed())
- copy(chanAnn.BitcoinKey2[:], localFundingKey.SerializeCompressed())
-
- // If we're the second node then update the chanFlags to
- // indicate the "direction" of the update.
- chanFlags = 1
- }
-
- // Our channel update message flags will signal that we support the
- // max_htlc field.
- msgFlags := lnwire.ChanUpdateOptionMaxHtlc
-
- // We announce the channel with the default values. Some of
- // these values can later be changed by crafting a new ChannelUpdate.
- chanUpdateAnn := &lnwire.ChannelUpdate{
- ShortChannelID: shortChanID,
- ChainHash: chainHash,
- Timestamp: uint32(time.Now().Unix()),
- MessageFlags: msgFlags,
- ChannelFlags: chanFlags,
- TimeLockDelta: uint16(f.cfg.DefaultRoutingPolicy.TimeLockDelta),
-
- // We use the HtlcMinimumMsat that the remote party required us
- // to use, as our ChannelUpdate will be used to carry HTLCs
- // towards them.
- HtlcMinimumMsat: fwdMinHTLC,
- HtlcMaximumMsat: fwdMaxHTLC,
-
- BaseFee: uint32(f.cfg.DefaultRoutingPolicy.BaseFee),
- FeeRate: uint32(f.cfg.DefaultRoutingPolicy.FeeRate),
- }
-
- // With the channel update announcement constructed, we'll generate a
- // signature that signs a double-sha digest of the announcement.
- // This'll serve to authenticate this announcement and any other future
- // updates we may send.
- chanUpdateMsg, err := chanUpdateAnn.DataToSign()
- if err != nil {
- return nil, err
- }
- sig, err := f.cfg.SignMessage(f.cfg.IDKey, chanUpdateMsg)
- if err != nil {
- return nil, er.Errorf("unable to generate channel "+
- "update announcement signature: %v", err)
- }
- chanUpdateAnn.Signature, err = lnwire.NewSigFromSignature(sig)
- if err != nil {
- return nil, er.Errorf("unable to generate channel "+
- "update announcement signature: %v", err)
- }
-
- // The channel existence proofs itself is currently announced in
- // distinct message. In order to properly authenticate this message, we
- // need two signatures: one under the identity public key used which
- // signs the message itself and another signature of the identity
- // public key under the funding key itself.
- //
- // TODO(roasbeef): use SignAnnouncement here instead?
- chanAnnMsg, err := chanAnn.DataToSign()
- if err != nil {
- return nil, err
- }
- nodeSig, err := f.cfg.SignMessage(f.cfg.IDKey, chanAnnMsg)
- if err != nil {
- return nil, er.Errorf("unable to generate node "+
- "signature for channel announcement: %v", err)
- }
- bitcoinSig, err := f.cfg.SignMessage(localFundingKey, chanAnnMsg)
- if err != nil {
- return nil, er.Errorf("unable to generate bitcoin "+
- "signature for node public key: %v", err)
- }
-
- // Finally, we'll generate the announcement proof which we'll use to
- // provide the other side with the necessary signatures required to
- // allow them to reconstruct the full channel announcement.
- proof := &lnwire.AnnounceSignatures{
- ChannelID: chanID,
- ShortChannelID: shortChanID,
- }
- proof.NodeSignature, err = lnwire.NewSigFromSignature(nodeSig)
- if err != nil {
- return nil, err
- }
- proof.BitcoinSignature, err = lnwire.NewSigFromSignature(bitcoinSig)
- if err != nil {
- return nil, err
- }
-
- return &chanAnnouncement{
- chanAnn: chanAnn,
- chanUpdateAnn: chanUpdateAnn,
- chanProof: proof,
- }, nil
-}
-
-// announceChannel announces a newly created channel to the rest of the network
-// by crafting the two authenticated announcements required for the peers on
-// the network to recognize the legitimacy of the channel. The crafted
-// announcements are then sent to the channel router to handle broadcasting to
-// the network during its next trickle.
-// This method is synchronous and will return when all the network requests
-// finish, either successfully or with an error.
-func (f *fundingManager) announceChannel(localIDKey, remoteIDKey, localFundingKey,
- remoteFundingKey *btcec.PublicKey, shortChanID lnwire.ShortChannelID,
- chanID lnwire.ChannelID) er.R {
-
- // First, we'll create the batch of announcements to be sent upon
- // initial channel creation. This includes the channel announcement
- // itself, the channel update announcement, and our half of the channel
- // proof needed to fully authenticate the channel.
- //
- // We can pass in zeroes for the min and max htlc policy, because we
- // only use the channel announcement message from the returned struct.
- ann, err := f.newChanAnnouncement(localIDKey, remoteIDKey,
- localFundingKey, remoteFundingKey, shortChanID, chanID,
- 0, 0,
- )
- if err != nil {
- log.Errorf("can't generate channel announcement: %v", err)
- return err
- }
-
- // We only send the channel proof announcement and the node announcement
- // because addToRouterGraph previously sent the ChannelAnnouncement and
- // the ChannelUpdate announcement messages. The channel proof and node
- // announcements are broadcast to the greater network.
- errChan := f.cfg.SendAnnouncement(ann.chanProof)
- select {
- case err := <-errChan:
- if err != nil {
- if routing.IsError(er.Wrapped(err), routing.ErrOutdated,
- routing.ErrIgnored) {
- log.Debugf("Router rejected "+
- "AnnounceSignatures: %v", err)
- } else {
- log.Errorf("Unable to send channel "+
- "proof: %v", err)
- return err
- }
- }
-
- case <-f.quit:
- return ErrFundingManagerShuttingDown.Default()
- }
-
- // Now that the channel is announced to the network, we will also
- // obtain and send a node announcement. This is done since a node
- // announcement is only accepted after a channel is known for that
- // particular node, and this might be our first channel.
- nodeAnn, err := f.cfg.CurrentNodeAnnouncement()
- if err != nil {
- log.Errorf("can't generate node announcement: %v", err)
- return err
- }
-
- errChan = f.cfg.SendAnnouncement(&nodeAnn)
- select {
- case err := <-errChan:
- if err != nil {
- if routing.IsError(er.Wrapped(err), routing.ErrOutdated,
- routing.ErrIgnored) {
- log.Debugf("Router rejected "+
- "NodeAnnouncement: %v", err)
- } else {
- log.Errorf("Unable to send node "+
- "announcement: %v", err)
- return err
- }
- }
-
- case <-f.quit:
- return ErrFundingManagerShuttingDown.Default()
- }
-
- return nil
-}
-
-// initFundingWorkflow sends a message to the funding manager instructing it
-// to initiate a single funder workflow with the source peer.
-// TODO(roasbeef): re-visit blocking nature..
-func (f *fundingManager) initFundingWorkflow(peer lnpeer.Peer, req *openChanReq) {
- f.fundingRequests <- &initFundingMsg{
- peer: peer,
- openChanReq: req,
- }
-}
-
-// getUpfrontShutdownScript takes a user provided script and a getScript
-// function which can be used to generate an upfront shutdown script. If our
-// peer does not support the feature, this function will error if a non-zero
-// script was provided by the user, and return an empty script otherwise. If
-// our peer does support the feature, we will return the user provided script
-// if non-zero, or a freshly generated script if our node is configured to set
-// upfront shutdown scripts automatically.
-func getUpfrontShutdownScript(enableUpfrontShutdown bool, peer lnpeer.Peer,
- script lnwire.DeliveryAddress,
- getScript func() (lnwire.DeliveryAddress, er.R)) (lnwire.DeliveryAddress,
- er.R) {
-
- // Check whether the remote peer supports upfront shutdown scripts.
- remoteUpfrontShutdown := peer.RemoteFeatures().HasFeature(
- lnwire.UpfrontShutdownScriptOptional,
- )
-
- // If the peer does not support upfront shutdown scripts, and one has been
- // provided, return an error because the feature is not supported.
- if !remoteUpfrontShutdown && len(script) != 0 {
- return nil, errUpfrontShutdownScriptNotSupported.Default()
- }
-
- // If the peer does not support upfront shutdown, return an empty address.
- if !remoteUpfrontShutdown {
- return nil, nil
- }
-
- // If the user has provided an script and the peer supports the feature,
- // return it. Note that user set scripts override the enable upfront
- // shutdown flag.
- if len(script) > 0 {
- return script, nil
- }
-
- // If we do not have setting of upfront shutdown script enabled, return
- // an empty script.
- if !enableUpfrontShutdown {
- return nil, nil
- }
-
- return getScript()
-}
-
-// handleInitFundingMsg creates a channel reservation within the daemon's
-// wallet, then sends a funding request to the remote peer kicking off the
-// funding workflow.
-func (f *fundingManager) handleInitFundingMsg(msg *initFundingMsg) {
- var (
- peerKey = msg.peer.IdentityKey()
- localAmt = msg.localFundingAmt
- minHtlcIn = msg.minHtlcIn
- remoteCsvDelay = msg.remoteCsvDelay
- maxValue = msg.maxValueInFlight
- maxHtlcs = msg.maxHtlcs
- maxCSV = msg.maxLocalCsv
- )
-
- // If no maximum CSV delay was set for this channel, we use our default
- // value.
- if maxCSV == 0 {
- maxCSV = f.cfg.MaxLocalCSVDelay
- }
-
- // We'll determine our dust limit depending on which chain is active.
- var ourDustLimit btcutil.Amount
- switch f.cfg.RegisteredChains.PrimaryChain() {
- case chainreg.BitcoinChain:
- ourDustLimit = lnwallet.DefaultDustLimit()
- case chainreg.LitecoinChain:
- ourDustLimit = chainreg.DefaultLitecoinDustLimit
- }
-
- log.Infof("Initiating fundingRequest(local_amt=%v "+
- "(subtract_fees=%v), push_amt=%v, chain_hash=%v, peer=%x, "+
- "dust_limit=%v, min_confs=%v)", localAmt, msg.subtractFees,
- msg.pushAmt, msg.chainHash, peerKey.SerializeCompressed(),
- ourDustLimit, msg.minConfs)
-
- // First, we'll query the fee estimator for a fee that should get the
- // commitment transaction confirmed by the next few blocks (conf target
- // of 3). We target the near blocks here to ensure that we'll be able
- // to execute a timely unilateral channel closure if needed.
- commitFeePerKw, err := f.cfg.FeeEstimator.EstimateFeePerKW(3)
- if err != nil {
- msg.err <- err
- return
- }
-
- // We set the channel flags to indicate whether we want this channel to
- // be announced to the network.
- var channelFlags lnwire.FundingFlag
- if !msg.openChanReq.private {
- // This channel will be announced.
- channelFlags = lnwire.FFAnnounceChannel
- }
-
- // If the caller specified their own channel ID, then we'll use that.
- // Otherwise we'll generate a fresh one as normal. This will be used
- // to track this reservation throughout its lifetime.
- var chanID [32]byte
- if msg.pendingChanID == zeroID {
- chanID = f.nextPendingChanID()
- } else {
- // If the user specified their own pending channel ID, then
- // we'll ensure it doesn't collide with any existing pending
- // channel ID.
- chanID = msg.pendingChanID
- if _, err := f.getReservationCtx(peerKey, chanID); err == nil {
- msg.err <- er.Errorf("pendingChannelID(%x) "+
- "already present", chanID[:])
- return
- }
- }
-
- // Check whether the peer supports upfront shutdown, and get an address
- // which should be used (either a user specified address or a new
- // address from the wallet if our node is configured to set shutdown
- // address by default).
- shutdown, err := getUpfrontShutdownScript(
- f.cfg.EnableUpfrontShutdown, msg.peer,
- msg.openChanReq.shutdownScript,
- func() (lnwire.DeliveryAddress, er.R) {
- addr, err := f.cfg.Wallet.NewAddress(
- lnwallet.WitnessPubKey, false,
- )
- if err != nil {
- return nil, err
- }
- return txscript.PayToAddrScript(addr)
- },
- )
- if err != nil {
- msg.err <- err
- return
- }
-
- // Initialize a funding reservation with the local wallet. If the
- // wallet doesn't have enough funds to commit to this channel, then the
- // request will fail, and be aborted.
- //
- // Before we init the channel, we'll also check to see if we've
- // negotiated the new tweakless commitment format. This is only the
- // case if *both* us and the remote peer are signaling the proper
- // feature bit.
- commitType := commitmentType(
- msg.peer.LocalFeatures(), msg.peer.RemoteFeatures(),
- )
- req := &lnwallet.InitFundingReserveMsg{
- ChainHash: &msg.chainHash,
- PendingChanID: chanID,
- NodeID: peerKey,
- NodeAddr: msg.peer.Address(),
- SubtractFees: msg.subtractFees,
- LocalFundingAmt: localAmt,
- RemoteFundingAmt: 0,
- CommitFeePerKw: commitFeePerKw,
- FundingFeePerKw: msg.fundingFeePerKw,
- PushMSat: msg.pushAmt,
- Flags: channelFlags,
- MinConfs: msg.minConfs,
- CommitType: commitType,
- ChanFunder: msg.chanFunder,
- }
-
- reservation, err := f.cfg.Wallet.InitChannelReservation(req)
- if err != nil {
- msg.err <- err
- return
- }
-
- // Set our upfront shutdown address in the existing reservation.
- reservation.SetOurUpfrontShutdown(shutdown)
-
- // Now that we have successfully reserved funds for this channel in the
- // wallet, we can fetch the final channel capacity. This is done at
- // this point since the final capacity might change in case of
- // SubtractFees=true.
- capacity := reservation.Capacity()
-
- log.Infof("Target commit tx sat/kw for pendingID(%x): %v", chanID,
- int64(commitFeePerKw))
-
- // If the remote CSV delay was not set in the open channel request,
- // we'll use the RequiredRemoteDelay closure to compute the delay we
- // require given the total amount of funds within the channel.
- if remoteCsvDelay == 0 {
- remoteCsvDelay = f.cfg.RequiredRemoteDelay(capacity)
- }
-
- // If no minimum HTLC value was specified, use the default one.
- if minHtlcIn == 0 {
- minHtlcIn = f.cfg.DefaultMinHtlcIn
- }
-
- // If no max value was specified, use the default one.
- if maxValue == 0 {
- maxValue = f.cfg.RequiredRemoteMaxValue(capacity)
- }
-
- if maxHtlcs == 0 {
- maxHtlcs = f.cfg.RequiredRemoteMaxHTLCs(capacity)
- }
-
- // If a pending channel map for this peer isn't already created, then
- // we create one, ultimately allowing us to track this pending
- // reservation within the target peer.
- peerIDKey := newSerializedKey(peerKey)
- f.resMtx.Lock()
- if _, ok := f.activeReservations[peerIDKey]; !ok {
- f.activeReservations[peerIDKey] = make(pendingChannels)
- }
-
- resCtx := &reservationWithCtx{
- chanAmt: capacity,
- remoteCsvDelay: remoteCsvDelay,
- remoteMinHtlc: minHtlcIn,
- remoteMaxValue: maxValue,
- remoteMaxHtlcs: maxHtlcs,
- maxLocalCsv: maxCSV,
- reservation: reservation,
- peer: msg.peer,
- updates: msg.updates,
- err: msg.err,
- }
- f.activeReservations[peerIDKey][chanID] = resCtx
- f.resMtx.Unlock()
-
- // Update the timestamp once the initFundingMsg has been handled.
- defer resCtx.updateTimestamp()
-
- // Once the reservation has been created, and indexed, queue a funding
- // request to the remote peer, kicking off the funding workflow.
- ourContribution := reservation.OurContribution()
-
- // Finally, we'll use the current value of the channels and our default
- // policy to determine of required commitment constraints for the
- // remote party.
- chanReserve := f.cfg.RequiredRemoteChanReserve(capacity, ourDustLimit)
-
- log.Infof("Starting funding workflow with %v for pending_id(%x), "+
- "committype=%v", msg.peer.Address(), chanID, commitType)
-
- fundingOpen := lnwire.OpenChannel{
- ChainHash: *f.cfg.Wallet.Cfg.NetParams.GenesisHash,
- PendingChannelID: chanID,
- FundingAmount: capacity,
- PushAmount: msg.pushAmt,
- DustLimit: ourContribution.DustLimit,
- MaxValueInFlight: maxValue,
- ChannelReserve: chanReserve,
- HtlcMinimum: minHtlcIn,
- FeePerKiloWeight: uint32(commitFeePerKw),
- CsvDelay: remoteCsvDelay,
- MaxAcceptedHTLCs: maxHtlcs,
- FundingKey: ourContribution.MultiSigKey.PubKey,
- RevocationPoint: ourContribution.RevocationBasePoint.PubKey,
- PaymentPoint: ourContribution.PaymentBasePoint.PubKey,
- HtlcPoint: ourContribution.HtlcBasePoint.PubKey,
- DelayedPaymentPoint: ourContribution.DelayBasePoint.PubKey,
- FirstCommitmentPoint: ourContribution.FirstCommitmentPoint,
- ChannelFlags: channelFlags,
- UpfrontShutdownScript: shutdown,
- }
- if err := msg.peer.SendMessage(true, &fundingOpen); err != nil {
- e := er.Errorf("unable to send funding request message: %v",
- err)
- log.Errorf(e.String())
-
- // Since we were unable to send the initial message to the peer
- // and start the funding flow, we'll cancel this reservation.
- _, err := f.cancelReservationCtx(peerKey, chanID, false)
- if err != nil {
- log.Errorf("unable to cancel reservation: %v", err)
- }
-
- msg.err <- e
- return
- }
-}
-
-// handleErrorMsg processes the error which was received from remote peer,
-// depending on the type of error we should do different clean up steps and
-// inform the user about it.
-func (f *fundingManager) handleErrorMsg(peer lnpeer.Peer,
- msg *lnwire.Error) {
-
- chanID := msg.ChanID
- peerKey := peer.IdentityKey()
-
- // First, we'll attempt to retrieve and cancel the funding workflow
- // that this error was tied to. If we're unable to do so, then we'll
- // exit early as this was an unwarranted error.
- resCtx, err := f.cancelReservationCtx(peerKey, chanID, true)
- if err != nil {
- log.Warnf("Received error for non-existent funding "+
- "flow: %v (%v)", err, msg.Error())
- return
- }
-
- // If we did indeed find the funding workflow, then we'll return the
- // error back to the caller (if any), and cancel the workflow itself.
- fundingErr := er.Errorf("received funding error from %x: %v",
- peerKey.SerializeCompressed(), msg.Error(),
- )
- log.Errorf(fundingErr.String())
-
- // If this was a PSBT funding flow, the remote likely timed out because
- // we waited too long. Return a nice error message to the user in that
- // case so the user knows what's the problem.
- if resCtx.reservation.IsPsbt() {
- fundingErr = chanfunding.ErrRemoteCanceled.New("", fundingErr)
- }
-
- resCtx.err <- fundingErr
-}
-
-// pruneZombieReservations loops through all pending reservations and fails the
-// funding flow for any reservations that have not been updated since the
-// ReservationTimeout and are not locked waiting for the funding transaction.
-func (f *fundingManager) pruneZombieReservations() {
- zombieReservations := make(pendingChannels)
-
- f.resMtx.RLock()
- for _, pendingReservations := range f.activeReservations {
- for pendingChanID, resCtx := range pendingReservations {
- if resCtx.isLocked() {
- continue
- }
-
- // We don't want to expire PSBT funding reservations.
- // These reservations are always initiated by us and the
- // remote peer is likely going to cancel them after some
- // idle time anyway. So no need for us to also prune
- // them.
- sinceLastUpdate := time.Since(resCtx.lastUpdated)
- isExpired := sinceLastUpdate > f.cfg.ReservationTimeout
- if !resCtx.reservation.IsPsbt() && isExpired {
- zombieReservations[pendingChanID] = resCtx
- }
- }
- }
- f.resMtx.RUnlock()
-
- for pendingChanID, resCtx := range zombieReservations {
- err := er.Errorf("reservation timed out waiting for peer "+
- "(peer_id:%x, chan_id:%x)", resCtx.peer.IdentityKey(),
- pendingChanID[:])
- log.Warnf(err.String())
- f.failFundingFlow(resCtx.peer, pendingChanID, err)
- }
-}
-
-// cancelReservationCtx does all needed work in order to securely cancel the
-// reservation.
-func (f *fundingManager) cancelReservationCtx(peerKey *btcec.PublicKey,
- pendingChanID [32]byte, byRemote bool) (*reservationWithCtx, er.R) {
-
- log.Infof("Cancelling funding reservation for node_key=%x, "+
- "chan_id=%x", peerKey.SerializeCompressed(), pendingChanID[:])
-
- peerIDKey := newSerializedKey(peerKey)
- f.resMtx.Lock()
- defer f.resMtx.Unlock()
-
- nodeReservations, ok := f.activeReservations[peerIDKey]
- if !ok {
- // No reservations for this node.
- return nil, er.Errorf("no active reservations for peer(%x)",
- peerIDKey[:])
- }
-
- ctx, ok := nodeReservations[pendingChanID]
- if !ok {
- return nil, er.Errorf("unknown channel (id: %x) for "+
- "peer(%x)", pendingChanID[:], peerIDKey[:])
- }
-
- // If the reservation was a PSBT funding flow and it was canceled by the
- // remote peer, then we need to thread through a different error message
- // to the subroutine that's waiting for the user input so it can return
- // a nice error message to the user.
- if ctx.reservation.IsPsbt() && byRemote {
- ctx.reservation.RemoteCanceled()
- }
-
- if err := ctx.reservation.Cancel(); err != nil {
- return nil, er.Errorf("unable to cancel reservation: %v",
- err)
- }
-
- delete(nodeReservations, pendingChanID)
-
- // If this was the last active reservation for this peer, delete the
- // peer's entry altogether.
- if len(nodeReservations) == 0 {
- delete(f.activeReservations, peerIDKey)
- }
- return ctx, nil
-}
-
-// deleteReservationCtx deletes the reservation uniquely identified by the
-// target public key of the peer, and the specified pending channel ID.
-func (f *fundingManager) deleteReservationCtx(peerKey *btcec.PublicKey,
- pendingChanID [32]byte) {
-
- // TODO(roasbeef): possibly cancel funding barrier in peer's
- // channelManager?
- peerIDKey := newSerializedKey(peerKey)
- f.resMtx.Lock()
- defer f.resMtx.Unlock()
-
- nodeReservations, ok := f.activeReservations[peerIDKey]
- if !ok {
- // No reservations for this node.
- return
- }
- delete(nodeReservations, pendingChanID)
-
- // If this was the last active reservation for this peer, delete the
- // peer's entry altogether.
- if len(nodeReservations) == 0 {
- delete(f.activeReservations, peerIDKey)
- }
-}
-
-// getReservationCtx returns the reservation context for a particular pending
-// channel ID for a target peer.
-func (f *fundingManager) getReservationCtx(peerKey *btcec.PublicKey,
- pendingChanID [32]byte) (*reservationWithCtx, er.R) {
-
- peerIDKey := newSerializedKey(peerKey)
- f.resMtx.RLock()
- resCtx, ok := f.activeReservations[peerIDKey][pendingChanID]
- f.resMtx.RUnlock()
-
- if !ok {
- return nil, er.Errorf("unknown channel (id: %x) for "+
- "peer(%x)", pendingChanID[:], peerIDKey[:])
- }
-
- return resCtx, nil
-}
-
-// IsPendingChannel returns a boolean indicating whether the channel identified
-// by the pendingChanID and given peer is pending, meaning it is in the process
-// of being funded. After the funding transaction has been confirmed, the
-// channel will receive a new, permanent channel ID, and will no longer be
-// considered pending.
-func (f *fundingManager) IsPendingChannel(pendingChanID [32]byte,
- peer lnpeer.Peer) bool {
-
- peerIDKey := newSerializedKey(peer.IdentityKey())
- f.resMtx.RLock()
- _, ok := f.activeReservations[peerIDKey][pendingChanID]
- f.resMtx.RUnlock()
-
- return ok
-}
-
-func copyPubKey(pub *btcec.PublicKey) *btcec.PublicKey {
- return &btcec.PublicKey{
- Curve: btcec.S256(),
- X: pub.X,
- Y: pub.Y,
- }
-}
-
-// saveChannelOpeningState saves the channelOpeningState for the provided
-// chanPoint to the channelOpeningStateBucket.
-func (f *fundingManager) saveChannelOpeningState(chanPoint *wire.OutPoint,
- state channelOpeningState, shortChanID *lnwire.ShortChannelID) er.R {
- return kvdb.Update(f.cfg.Wallet.Cfg.Database, func(tx kvdb.RwTx) er.R {
-
- bucket, err := tx.CreateTopLevelBucket(channelOpeningStateBucket)
- if err != nil {
- return err
- }
-
- var outpointBytes bytes.Buffer
- if errr := writeOutpoint(&outpointBytes, chanPoint); errr != nil {
- return errr
- }
-
- // Save state and the uint64 representation of the shortChanID
- // for later use.
- scratch := make([]byte, 10)
- byteOrder.PutUint16(scratch[:2], uint16(state))
- byteOrder.PutUint64(scratch[2:], shortChanID.ToUint64())
-
- return bucket.Put(outpointBytes.Bytes(), scratch)
- }, func() {})
-}
-
-// getChannelOpeningState fetches the channelOpeningState for the provided
-// chanPoint from the database, or returns ErrChannelNotFound if the channel
-// is not found.
-func (f *fundingManager) getChannelOpeningState(chanPoint *wire.OutPoint) (
- channelOpeningState, *lnwire.ShortChannelID, er.R) {
-
- var state channelOpeningState
- var shortChanID lnwire.ShortChannelID
- err := kvdb.View(f.cfg.Wallet.Cfg.Database, func(tx kvdb.RTx) er.R {
-
- bucket := tx.ReadBucket(channelOpeningStateBucket)
- if bucket == nil {
- // If the bucket does not exist, it means we never added
- // a channel to the db, so return ErrChannelNotFound.
- return ErrChannelNotFound.Default()
- }
-
- var outpointBytes bytes.Buffer
- if err := writeOutpoint(&outpointBytes, chanPoint); err != nil {
- return err
- }
-
- value := bucket.Get(outpointBytes.Bytes())
- if value == nil {
- return ErrChannelNotFound.Default()
- }
-
- state = channelOpeningState(byteOrder.Uint16(value[:2]))
- shortChanID = lnwire.NewShortChanIDFromInt(byteOrder.Uint64(value[2:]))
- return nil
- }, func() {})
- if err != nil {
- return 0, nil, err
- }
-
- return state, &shortChanID, nil
-}
-
-// deleteChannelOpeningState removes any state for chanPoint from the database.
-func (f *fundingManager) deleteChannelOpeningState(chanPoint *wire.OutPoint) er.R {
- return kvdb.Update(f.cfg.Wallet.Cfg.Database, func(tx kvdb.RwTx) er.R {
- bucket := tx.ReadWriteBucket(channelOpeningStateBucket)
- if bucket == nil {
- return er.Errorf("bucket not found")
- }
-
- var outpointBytes bytes.Buffer
- if err := writeOutpoint(&outpointBytes, chanPoint); err != nil {
- return err
- }
-
- return bucket.Delete(outpointBytes.Bytes())
- }, func() {})
-}
diff --git a/lnd/fundingmanager_test.go b/lnd/fundingmanager_test.go
deleted file mode 100644
index aaabc751..00000000
--- a/lnd/fundingmanager_test.go
+++ /dev/null
@@ -1,3497 +0,0 @@
-// +build !rpctest
-
-package lnd
-
-import (
- "bytes"
- "io/ioutil"
- "math/big"
- "net"
- "os"
- "path/filepath"
- "runtime"
- "strings"
- "testing"
- "time"
-
- "github.com/pkt-cash/pktd/btcec"
- "github.com/pkt-cash/pktd/btcutil"
- "github.com/pkt-cash/pktd/btcutil/er"
- "github.com/pkt-cash/pktd/btcutil/util"
- "github.com/pkt-cash/pktd/chaincfg"
- "github.com/pkt-cash/pktd/chaincfg/chainhash"
- "github.com/pkt-cash/pktd/lnd/chainntnfs"
- "github.com/pkt-cash/pktd/lnd/chainreg"
- "github.com/pkt-cash/pktd/lnd/chanacceptor"
- "github.com/pkt-cash/pktd/lnd/channeldb"
- "github.com/pkt-cash/pktd/lnd/channelnotifier"
- "github.com/pkt-cash/pktd/lnd/discovery"
- "github.com/pkt-cash/pktd/lnd/htlcswitch"
- "github.com/pkt-cash/pktd/lnd/input"
- "github.com/pkt-cash/pktd/lnd/keychain"
- "github.com/pkt-cash/pktd/lnd/lncfg"
- "github.com/pkt-cash/pktd/lnd/lnpeer"
- "github.com/pkt-cash/pktd/lnd/lnrpc"
- "github.com/pkt-cash/pktd/lnd/lntest/mock"
- "github.com/pkt-cash/pktd/lnd/lnwallet"
- "github.com/pkt-cash/pktd/lnd/lnwallet/chainfee"
- "github.com/pkt-cash/pktd/lnd/lnwire"
- "github.com/pkt-cash/pktd/wire"
- "github.com/stretchr/testify/require"
-)
-
-const (
- // testPollNumTries is the number of times we attempt to query
- // for a certain expected database state before we give up and
- // consider the test failed. Since it sometimes can take a
- // while to update the database, we poll a certain amount of
- // times, until it gets into the state we expect, or we are out
- // of tries.
- testPollNumTries = 10
-
- // testPollSleepMs is the number of milliseconds to sleep between
- // each attempt to access the database to check its state.
- testPollSleepMs = 500
-
- // maxPending is the maximum number of channels we allow opening to the
- // same peer in the max pending channels test.
- maxPending = 4
-
- // A dummy value to use for the funding broadcast height.
- fundingBroadcastHeight = 123
-)
-
-var (
- // Use hard-coded keys for Alice and Bob, the two FundingManagers that
- // we will test the interaction between.
- alicePrivKeyBytes = [32]byte{
- 0xb7, 0x94, 0x38, 0x5f, 0x2d, 0x1e, 0xf7, 0xab,
- 0x4d, 0x92, 0x73, 0xd1, 0x90, 0x63, 0x81, 0xb4,
- 0x4f, 0x2f, 0x6f, 0x25, 0x88, 0xa3, 0xef, 0xb9,
- 0x6a, 0x49, 0x18, 0x83, 0x31, 0x98, 0x47, 0x53,
- }
-
- alicePrivKey, alicePubKey = btcec.PrivKeyFromBytes(btcec.S256(),
- alicePrivKeyBytes[:])
-
- aliceTCPAddr, _ = net.ResolveTCPAddr("tcp", "10.0.0.2:9001")
-
- aliceAddr = &lnwire.NetAddress{
- IdentityKey: alicePubKey,
- Address: aliceTCPAddr,
- }
-
- bobPrivKeyBytes = [32]byte{
- 0x81, 0xb6, 0x37, 0xd8, 0xfc, 0xd2, 0xc6, 0xda,
- 0x63, 0x59, 0xe6, 0x96, 0x31, 0x13, 0xa1, 0x17,
- 0xd, 0xe7, 0x95, 0xe4, 0xb7, 0x25, 0xb8, 0x4d,
- 0x1e, 0xb, 0x4c, 0xfd, 0x9e, 0xc5, 0x8c, 0xe9,
- }
-
- bobPrivKey, bobPubKey = btcec.PrivKeyFromBytes(btcec.S256(),
- bobPrivKeyBytes[:])
-
- bobTCPAddr, _ = net.ResolveTCPAddr("tcp", "10.0.0.2:9000")
-
- bobAddr = &lnwire.NetAddress{
- IdentityKey: bobPubKey,
- Address: bobTCPAddr,
- }
-
- testSig = &btcec.Signature{
- R: new(big.Int),
- S: new(big.Int),
- }
- _, _ = testSig.R.SetString("63724406601629180062774974542967536251589935445068131219452686511677818569431", 10)
- _, _ = testSig.S.SetString("18801056069249825825291287104931333862866033135609736119018462340006816851118", 10)
-
- fundingNetParams = chainreg.BitcoinTestNetParams
-)
-
-type mockNotifier struct {
- oneConfChannel chan *chainntnfs.TxConfirmation
- sixConfChannel chan *chainntnfs.TxConfirmation
- epochChan chan *chainntnfs.BlockEpoch
-}
-
-func (m *mockNotifier) RegisterConfirmationsNtfn(txid *chainhash.Hash,
- _ []byte, numConfs, heightHint uint32) (*chainntnfs.ConfirmationEvent, er.R) {
-
- if numConfs == 6 {
- return &chainntnfs.ConfirmationEvent{
- Confirmed: m.sixConfChannel,
- }, nil
- }
- return &chainntnfs.ConfirmationEvent{
- Confirmed: m.oneConfChannel,
- }, nil
-}
-
-func (m *mockNotifier) RegisterBlockEpochNtfn(
- bestBlock *chainntnfs.BlockEpoch) (*chainntnfs.BlockEpochEvent, er.R) {
- return &chainntnfs.BlockEpochEvent{
- Epochs: m.epochChan,
- Cancel: func() {},
- }, nil
-}
-
-func (m *mockNotifier) Start() er.R {
- return nil
-}
-
-func (m *mockNotifier) Started() bool {
- return true
-}
-
-func (m *mockNotifier) Stop() er.R {
- return nil
-}
-
-func (m *mockNotifier) RegisterSpendNtfn(outpoint *wire.OutPoint, _ []byte,
- heightHint uint32) (*chainntnfs.SpendEvent, er.R) {
- return &chainntnfs.SpendEvent{
- Spend: make(chan *chainntnfs.SpendDetail),
- Cancel: func() {},
- }, nil
-}
-
-type mockChanEvent struct {
- openEvent chan wire.OutPoint
- pendingOpenEvent chan channelnotifier.PendingOpenChannelEvent
-}
-
-func (m *mockChanEvent) NotifyOpenChannelEvent(outpoint wire.OutPoint) {
- m.openEvent <- outpoint
-}
-
-func (m *mockChanEvent) NotifyPendingOpenChannelEvent(outpoint wire.OutPoint,
- pendingChannel *channeldb.OpenChannel) {
-
- m.pendingOpenEvent <- channelnotifier.PendingOpenChannelEvent{
- ChannelPoint: &outpoint,
- PendingChannel: pendingChannel,
- }
-}
-
-type newChannelMsg struct {
- channel *channeldb.OpenChannel
- err chan er.R
-}
-
-type testNode struct {
- privKey *btcec.PrivateKey
- addr *lnwire.NetAddress
- msgChan chan lnwire.Message
- announceChan chan lnwire.Message
- publTxChan chan *wire.MsgTx
- fundingMgr *fundingManager
- newChannels chan *newChannelMsg
- mockNotifier *mockNotifier
- mockChanEvent *mockChanEvent
- testDir string
- shutdownChannel chan struct{}
- remoteFeatures []lnwire.FeatureBit
-
- remotePeer *testNode
- sendMessage func(lnwire.Message) er.R
-}
-
-var _ lnpeer.Peer = (*testNode)(nil)
-
-func (n *testNode) IdentityKey() *btcec.PublicKey {
- return n.addr.IdentityKey
-}
-
-func (n *testNode) Address() net.Addr {
- return n.addr.Address
-}
-
-func (n *testNode) PubKey() [33]byte {
- return newSerializedKey(n.addr.IdentityKey)
-}
-
-func (n *testNode) SendMessage(_ bool, msg ...lnwire.Message) er.R {
- return n.sendMessage(msg[0])
-}
-
-func (n *testNode) SendMessageLazy(sync bool, msgs ...lnwire.Message) er.R {
- return n.SendMessage(sync, msgs...)
-}
-
-func (n *testNode) WipeChannel(_ *wire.OutPoint) {}
-
-func (n *testNode) QuitSignal() <-chan struct{} {
- return n.shutdownChannel
-}
-
-func (n *testNode) LocalFeatures() *lnwire.FeatureVector {
- return lnwire.NewFeatureVector(nil, nil)
-}
-
-func (n *testNode) RemoteFeatures() *lnwire.FeatureVector {
- return lnwire.NewFeatureVector(
- lnwire.NewRawFeatureVector(n.remoteFeatures...), nil,
- )
-}
-
-func (n *testNode) AddNewChannel(channel *channeldb.OpenChannel,
- quit <-chan struct{}) er.R {
-
- errChan := make(chan er.R)
- msg := &newChannelMsg{
- channel: channel,
- err: errChan,
- }
-
- select {
- case n.newChannels <- msg:
- case <-quit:
- return ErrFundingManagerShuttingDown.Default()
- }
-
- select {
- case err := <-errChan:
- return err
- case <-quit:
- return ErrFundingManagerShuttingDown.Default()
- }
-}
-
-func createTestWallet(cdb *channeldb.DB, netParams *chaincfg.Params,
- notifier chainntnfs.ChainNotifier, wc lnwallet.WalletController,
- signer input.Signer, keyRing keychain.SecretKeyRing,
- bio lnwallet.BlockChainIO,
- estimator chainfee.Estimator) (*lnwallet.LightningWallet, er.R) {
-
- wallet, err := lnwallet.NewLightningWallet(lnwallet.Config{
- Database: cdb,
- Notifier: notifier,
- SecretKeyRing: keyRing,
- WalletController: wc,
- Signer: signer,
- ChainIO: bio,
- FeeEstimator: estimator,
- NetParams: *netParams,
- DefaultConstraints: chainreg.DefaultBtcChannelConstraints,
- })
- if err != nil {
- return nil, err
- }
-
- if err := wallet.Startup(); err != nil {
- return nil, err
- }
-
- return wallet, nil
-}
-
-func createTestFundingManager(t *testing.T, privKey *btcec.PrivateKey,
- addr *lnwire.NetAddress, tempTestDir string,
- options ...cfgOption) (*testNode, er.R) {
-
- netParams := fundingNetParams.Params
- estimator := chainfee.NewStaticEstimator(62500, 0)
-
- chainNotifier := &mockNotifier{
- oneConfChannel: make(chan *chainntnfs.TxConfirmation, 1),
- sixConfChannel: make(chan *chainntnfs.TxConfirmation, 1),
- epochChan: make(chan *chainntnfs.BlockEpoch, 2),
- }
-
- sentMessages := make(chan lnwire.Message)
- sentAnnouncements := make(chan lnwire.Message)
- publTxChan := make(chan *wire.MsgTx, 1)
- shutdownChan := make(chan struct{})
-
- wc := &mock.WalletController{
- RootKey: alicePrivKey,
- }
- signer := &mock.SingleSigner{
- Privkey: alicePrivKey,
- }
- bio := &mock.ChainIO{
- BestHeight: fundingBroadcastHeight,
- }
-
- // The mock channel event notifier will receive events for each pending
- // open and open channel. Because some tests will create multiple
- // channels in a row before advancing to the next step, these channels
- // need to be buffered.
- evt := &mockChanEvent{
- openEvent: make(chan wire.OutPoint, maxPending),
- pendingOpenEvent: make(
- chan channelnotifier.PendingOpenChannelEvent, maxPending,
- ),
- }
-
- dbDir := filepath.Join(tempTestDir, "cdb")
- cdb, err := channeldb.Open(dbDir)
- if err != nil {
- return nil, err
- }
-
- keyRing := &mock.SecretKeyRing{
- RootKey: alicePrivKey,
- }
-
- lnw, err := createTestWallet(
- cdb, netParams, chainNotifier, wc, signer, keyRing, bio,
- estimator,
- )
- if err != nil {
- t.Fatalf("unable to create test ln wallet: %v", err)
- }
-
- var chanIDSeed [32]byte
-
- chainedAcceptor := chanacceptor.NewChainedAcceptor()
-
- fundingCfg := fundingConfig{
- IDKey: privKey.PubKey(),
- Wallet: lnw,
- Notifier: chainNotifier,
- FeeEstimator: estimator,
- SignMessage: func(pubKey *btcec.PublicKey,
- msg []byte) (input.Signature, er.R) {
-
- return testSig, nil
- },
- SendAnnouncement: func(msg lnwire.Message,
- _ ...discovery.OptionalMsgField) chan er.R {
-
- errChan := make(chan er.R, 1)
- select {
- case sentAnnouncements <- msg:
- errChan <- nil
- case <-shutdownChan:
- errChan <- er.Errorf("shutting down")
- }
- return errChan
- },
- CurrentNodeAnnouncement: func() (lnwire.NodeAnnouncement, er.R) {
- return lnwire.NodeAnnouncement{}, nil
- },
- TempChanIDSeed: chanIDSeed,
- FindChannel: func(chanID lnwire.ChannelID) (
- *channeldb.OpenChannel, er.R) {
- dbChannels, err := cdb.FetchAllChannels()
- if err != nil {
- return nil, err
- }
-
- for _, channel := range dbChannels {
- if chanID.IsChanPoint(&channel.FundingOutpoint) {
- return channel, nil
- }
- }
-
- return nil, er.Errorf("unable to find channel")
- },
- DefaultRoutingPolicy: htlcswitch.ForwardingPolicy{
- MinHTLCOut: 5,
- BaseFee: 100,
- FeeRate: 1000,
- TimeLockDelta: 10,
- },
- DefaultMinHtlcIn: 5,
- NumRequiredConfs: func(chanAmt btcutil.Amount,
- pushAmt lnwire.MilliSatoshi) uint16 {
- return 3
- },
- RequiredRemoteDelay: func(amt btcutil.Amount) uint16 {
- return 4
- },
- RequiredRemoteChanReserve: func(chanAmt,
- dustLimit btcutil.Amount) btcutil.Amount {
-
- reserve := chanAmt / 100
- if reserve < dustLimit {
- reserve = dustLimit
- }
-
- return reserve
- },
- RequiredRemoteMaxValue: func(chanAmt btcutil.Amount) lnwire.MilliSatoshi {
- reserve := lnwire.NewMSatFromSatoshis(chanAmt / 100)
- return lnwire.NewMSatFromSatoshis(chanAmt) - reserve
- },
- RequiredRemoteMaxHTLCs: func(chanAmt btcutil.Amount) uint16 {
- return uint16(input.MaxHTLCNumber / 2)
- },
- WatchNewChannel: func(*channeldb.OpenChannel, *btcec.PublicKey) er.R {
- return nil
- },
- ReportShortChanID: func(wire.OutPoint) er.R {
- return nil
- },
- PublishTransaction: func(txn *wire.MsgTx, _ string) er.R {
- publTxChan <- txn
- return nil
- },
- UpdateLabel: func(chainhash.Hash, string) er.R {
- return nil
- },
- ZombieSweeperInterval: 1 * time.Hour,
- ReservationTimeout: 1 * time.Nanosecond,
- MaxChanSize: MaxFundingAmount,
- MaxLocalCSVDelay: defaultMaxLocalCSVDelay,
- MaxPendingChannels: lncfg.DefaultMaxPendingChannels,
- NotifyOpenChannelEvent: evt.NotifyOpenChannelEvent,
- OpenChannelPredicate: chainedAcceptor,
- NotifyPendingOpenChannelEvent: evt.NotifyPendingOpenChannelEvent,
- RegisteredChains: chainreg.NewChainRegistry(),
- }
-
- for _, op := range options {
- op(&fundingCfg)
- }
-
- f, err := newFundingManager(fundingCfg)
- if err != nil {
- t.Fatalf("failed creating fundingManager: %v", err)
- }
- if err = f.Start(); err != nil {
- t.Fatalf("failed starting fundingManager: %v", err)
- }
-
- testNode := &testNode{
- privKey: privKey,
- msgChan: sentMessages,
- newChannels: make(chan *newChannelMsg),
- announceChan: sentAnnouncements,
- publTxChan: publTxChan,
- fundingMgr: f,
- mockNotifier: chainNotifier,
- mockChanEvent: evt,
- testDir: tempTestDir,
- shutdownChannel: shutdownChan,
- addr: addr,
- }
-
- f.cfg.NotifyWhenOnline = func(peer [33]byte,
- connectedChan chan<- lnpeer.Peer) {
-
- connectedChan <- testNode.remotePeer
- }
-
- return testNode, nil
-}
-
-func recreateAliceFundingManager(t *testing.T, alice *testNode) {
- // Stop the old fundingManager before creating a new one.
- close(alice.shutdownChannel)
- if err := alice.fundingMgr.Stop(); err != nil {
- t.Fatalf("unable to stop old fundingManager: %v", err)
- }
-
- aliceMsgChan := make(chan lnwire.Message)
- aliceAnnounceChan := make(chan lnwire.Message)
- shutdownChan := make(chan struct{})
- publishChan := make(chan *wire.MsgTx, 10)
-
- oldCfg := alice.fundingMgr.cfg
-
- chainedAcceptor := chanacceptor.NewChainedAcceptor()
-
- f, err := newFundingManager(fundingConfig{
- IDKey: oldCfg.IDKey,
- Wallet: oldCfg.Wallet,
- Notifier: oldCfg.Notifier,
- FeeEstimator: oldCfg.FeeEstimator,
- SignMessage: func(pubKey *btcec.PublicKey,
- msg []byte) (input.Signature, er.R) {
- return testSig, nil
- },
- SendAnnouncement: func(msg lnwire.Message,
- _ ...discovery.OptionalMsgField) chan er.R {
-
- errChan := make(chan er.R, 1)
- select {
- case aliceAnnounceChan <- msg:
- errChan <- nil
- case <-shutdownChan:
- errChan <- er.Errorf("shutting down")
- }
- return errChan
- },
- CurrentNodeAnnouncement: func() (lnwire.NodeAnnouncement, er.R) {
- return lnwire.NodeAnnouncement{}, nil
- },
- NotifyWhenOnline: func(peer [33]byte,
- connectedChan chan<- lnpeer.Peer) {
-
- connectedChan <- alice.remotePeer
- },
- TempChanIDSeed: oldCfg.TempChanIDSeed,
- FindChannel: oldCfg.FindChannel,
- DefaultRoutingPolicy: htlcswitch.ForwardingPolicy{
- MinHTLCOut: 5,
- BaseFee: 100,
- FeeRate: 1000,
- TimeLockDelta: 10,
- },
- DefaultMinHtlcIn: 5,
- RequiredRemoteMaxValue: oldCfg.RequiredRemoteMaxValue,
- PublishTransaction: func(txn *wire.MsgTx, _ string) er.R {
- publishChan <- txn
- return nil
- },
- UpdateLabel: func(chainhash.Hash, string) er.R {
- return nil
- },
- ZombieSweeperInterval: oldCfg.ZombieSweeperInterval,
- ReservationTimeout: oldCfg.ReservationTimeout,
- OpenChannelPredicate: chainedAcceptor,
- })
- if err != nil {
- t.Fatalf("failed recreating aliceFundingManager: %v", err)
- }
-
- alice.fundingMgr = f
- alice.msgChan = aliceMsgChan
- alice.announceChan = aliceAnnounceChan
- alice.publTxChan = publishChan
- alice.shutdownChannel = shutdownChan
-
- if err = f.Start(); err != nil {
- t.Fatalf("failed starting fundingManager: %v", err)
- }
-}
-
-type cfgOption func(*fundingConfig)
-
-func setupFundingManagers(t *testing.T,
- options ...cfgOption) (*testNode, *testNode) {
-
- aliceTestDir, errr := ioutil.TempDir("", "alicelnwallet")
- if errr != nil {
- t.Fatalf("unable to create temp directory: %v", errr)
- }
-
- alice, err := createTestFundingManager(
- t, alicePrivKey, aliceAddr, aliceTestDir, options...,
- )
- if err != nil {
- t.Fatalf("failed creating fundingManager: %v", err)
- }
-
- bobTestDir, errr := ioutil.TempDir("", "boblnwallet")
- if errr != nil {
- t.Fatalf("unable to create temp directory: %v", errr)
- }
-
- bob, err := createTestFundingManager(
- t, bobPrivKey, bobAddr, bobTestDir, options...,
- )
- if err != nil {
- t.Fatalf("failed creating fundingManager: %v", err)
- }
-
- // With the funding manager's created, we'll now attempt to mimic a
- // connection pipe between them. In order to intercept the messages
- // within it, we'll redirect all messages back to the msgChan of the
- // sender. Since the fundingManager now has a reference to peers itself,
- // alice.sendMessage will be triggered when Bob's funding manager
- // attempts to send a message to Alice and vice versa.
- alice.remotePeer = bob
- alice.sendMessage = func(msg lnwire.Message) er.R {
- select {
- case alice.remotePeer.msgChan <- msg:
- case <-alice.shutdownChannel:
- return er.New("shutting down")
- }
- return nil
- }
-
- bob.remotePeer = alice
- bob.sendMessage = func(msg lnwire.Message) er.R {
- select {
- case bob.remotePeer.msgChan <- msg:
- case <-bob.shutdownChannel:
- return er.New("shutting down")
- }
- return nil
- }
-
- return alice, bob
-}
-
-func tearDownFundingManagers(t *testing.T, a, b *testNode) {
- close(a.shutdownChannel)
- close(b.shutdownChannel)
-
- if err := a.fundingMgr.Stop(); err != nil {
- t.Fatalf("unable to stop fundingManager: %v", err)
- }
- if err := b.fundingMgr.Stop(); err != nil {
- t.Fatalf("unable to stop fundingManager: %v", err)
- }
- os.RemoveAll(a.testDir)
- os.RemoveAll(b.testDir)
-}
-
-// openChannel takes the funding process to the point where the funding
-// transaction is confirmed on-chain. Returns the funding out point.
-func openChannel(t *testing.T, alice, bob *testNode, localFundingAmt,
- pushAmt btcutil.Amount, numConfs uint32,
- updateChan chan *lnrpc.OpenStatusUpdate, announceChan bool) (
- *wire.OutPoint, *wire.MsgTx) {
-
- publ := fundChannel(
- t, alice, bob, localFundingAmt, pushAmt, false, numConfs,
- updateChan, announceChan,
- )
- fundingOutPoint := &wire.OutPoint{
- Hash: publ.TxHash(),
- Index: 0,
- }
- return fundingOutPoint, publ
-}
-
-// fundChannel takes the funding process to the point where the funding
-// transaction is confirmed on-chain. Returns the funding tx.
-func fundChannel(t *testing.T, alice, bob *testNode, localFundingAmt,
- pushAmt btcutil.Amount, subtractFees bool, numConfs uint32,
- updateChan chan *lnrpc.OpenStatusUpdate, announceChan bool) *wire.MsgTx {
-
- // Create a funding request and start the workflow.
- errChan := make(chan er.R, 1)
- initReq := &openChanReq{
- targetPubkey: bob.privKey.PubKey(),
- chainHash: *fundingNetParams.GenesisHash,
- subtractFees: subtractFees,
- localFundingAmt: localFundingAmt,
- pushAmt: lnwire.NewMSatFromSatoshis(pushAmt),
- fundingFeePerKw: 1000,
- private: !announceChan,
- updates: updateChan,
- err: errChan,
- }
-
- alice.fundingMgr.initFundingWorkflow(bob, initReq)
-
- // Alice should have sent the OpenChannel message to Bob.
- var aliceMsg lnwire.Message
- select {
- case aliceMsg = <-alice.msgChan:
- case err := <-initReq.err:
- t.Fatalf("error init funding workflow: %v", err)
- case <-time.After(time.Second * 5):
- t.Fatalf("alice did not send OpenChannel message")
- }
-
- openChannelReq, ok := aliceMsg.(*lnwire.OpenChannel)
- if !ok {
- errorMsg, gotError := aliceMsg.(*lnwire.Error)
- if gotError {
- t.Fatalf("expected OpenChannel to be sent "+
- "from bob, instead got error: %v",
- errorMsg.Error())
- }
- t.Fatalf("expected OpenChannel to be sent from "+
- "alice, instead got %T", aliceMsg)
- }
-
- // Let Bob handle the init message.
- bob.fundingMgr.ProcessFundingMsg(openChannelReq, alice)
-
- // Bob should answer with an AcceptChannel message.
- acceptChannelResponse := assertFundingMsgSent(
- t, bob.msgChan, "AcceptChannel",
- ).(*lnwire.AcceptChannel)
-
- // They now should both have pending reservations for this channel
- // active.
- assertNumPendingReservations(t, alice, bobPubKey, 1)
- assertNumPendingReservations(t, bob, alicePubKey, 1)
-
- // Forward the response to Alice.
- alice.fundingMgr.ProcessFundingMsg(acceptChannelResponse, bob)
-
- // Alice responds with a FundingCreated message.
- fundingCreated := assertFundingMsgSent(
- t, alice.msgChan, "FundingCreated",
- ).(*lnwire.FundingCreated)
-
- // Give the message to Bob.
- bob.fundingMgr.ProcessFundingMsg(fundingCreated, alice)
-
- // Finally, Bob should send the FundingSigned message.
- fundingSigned := assertFundingMsgSent(
- t, bob.msgChan, "FundingSigned",
- ).(*lnwire.FundingSigned)
-
- // Forward the signature to Alice.
- alice.fundingMgr.ProcessFundingMsg(fundingSigned, bob)
-
- // After Alice processes the singleFundingSignComplete message, she will
- // broadcast the funding transaction to the network. We expect to get a
- // channel update saying the channel is pending.
- var pendingUpdate *lnrpc.OpenStatusUpdate
- select {
- case pendingUpdate = <-updateChan:
- case <-time.After(time.Second * 5):
- t.Fatalf("alice did not send OpenStatusUpdate_ChanPending")
- }
-
- _, ok = pendingUpdate.Update.(*lnrpc.OpenStatusUpdate_ChanPending)
- if !ok {
- t.Fatal("OpenStatusUpdate was not OpenStatusUpdate_ChanPending")
- }
-
- // Get and return the transaction Alice published to the network.
- var publ *wire.MsgTx
- select {
- case publ = <-alice.publTxChan:
- case <-time.After(time.Second * 5):
- t.Fatalf("alice did not publish funding tx")
- }
-
- // Make sure the notification about the pending channel was sent out.
- select {
- case <-alice.mockChanEvent.pendingOpenEvent:
- case <-time.After(time.Second * 5):
- t.Fatalf("alice did not send pending channel event")
- }
- select {
- case <-bob.mockChanEvent.pendingOpenEvent:
- case <-time.After(time.Second * 5):
- t.Fatalf("bob did not send pending channel event")
- }
-
- // Finally, make sure neither have active reservation for the channel
- // now pending open in the database.
- assertNumPendingReservations(t, alice, bobPubKey, 0)
- assertNumPendingReservations(t, bob, alicePubKey, 0)
-
- return publ
-}
-
-func assertErrorNotSent(t *testing.T, msgChan chan lnwire.Message) {
- t.Helper()
-
- select {
- case <-msgChan:
- t.Fatalf("error sent unexpectedly")
- case <-time.After(100 * time.Millisecond):
- // Expected, return.
- }
-}
-
-func assertErrorSent(t *testing.T, msgChan chan lnwire.Message) {
- t.Helper()
-
- var msg lnwire.Message
- select {
- case msg = <-msgChan:
- case <-time.After(time.Second * 5):
- t.Fatalf("node did not send Error message")
- }
- _, ok := msg.(*lnwire.Error)
- if !ok {
- t.Fatalf("expected Error to be sent from "+
- "node, instead got %T", msg)
- }
-}
-
-func assertFundingMsgSent(t *testing.T, msgChan chan lnwire.Message,
- msgType string) lnwire.Message {
- t.Helper()
-
- var msg lnwire.Message
- select {
- case msg = <-msgChan:
- case <-time.After(time.Second * 5):
- t.Fatalf("peer did not send %s message", msgType)
- }
-
- var (
- sentMsg lnwire.Message
- ok bool
- )
- switch msgType {
- case "AcceptChannel":
- sentMsg, ok = msg.(*lnwire.AcceptChannel)
- case "FundingCreated":
- sentMsg, ok = msg.(*lnwire.FundingCreated)
- case "FundingSigned":
- sentMsg, ok = msg.(*lnwire.FundingSigned)
- case "FundingLocked":
- sentMsg, ok = msg.(*lnwire.FundingLocked)
- case "Error":
- sentMsg, ok = msg.(*lnwire.Error)
- default:
- t.Fatalf("unknown message type: %s", msgType)
- }
-
- if !ok {
- errorMsg, gotError := msg.(*lnwire.Error)
- if gotError {
- t.Fatalf("expected %s to be sent, instead got error: %v",
- msgType, errorMsg.Error())
- }
-
- _, _, line, _ := runtime.Caller(1)
- t.Fatalf("expected %s to be sent, instead got %T at %v",
- msgType, msg, line)
- }
-
- return sentMsg
-}
-
-func assertNumPendingReservations(t *testing.T, node *testNode,
- peerPubKey *btcec.PublicKey, expectedNum int) {
- t.Helper()
-
- serializedPubKey := newSerializedKey(peerPubKey)
- actualNum := len(node.fundingMgr.activeReservations[serializedPubKey])
- if actualNum == expectedNum {
- // Success, return.
- return
- }
-
- t.Fatalf("Expected node to have %d pending reservations, had %v",
- expectedNum, actualNum)
-}
-
-func assertNumPendingChannelsBecomes(t *testing.T, node *testNode, expectedNum int) {
- t.Helper()
-
- var numPendingChans int
- for i := 0; i < testPollNumTries; i++ {
- // If this is not the first try, sleep before retrying.
- if i > 0 {
- time.Sleep(testPollSleepMs * time.Millisecond)
- }
- pendingChannels, err := node.fundingMgr.
- cfg.Wallet.Cfg.Database.FetchPendingChannels()
- if err != nil {
- t.Fatalf("unable to fetch pending channels: %v", err)
- }
-
- numPendingChans = len(pendingChannels)
- if numPendingChans == expectedNum {
- // Success, return.
- return
- }
- }
-
- t.Fatalf("Expected node to have %d pending channels, had %v",
- expectedNum, numPendingChans)
-}
-
-func assertNumPendingChannelsRemains(t *testing.T, node *testNode, expectedNum int) {
- t.Helper()
-
- var numPendingChans int
- for i := 0; i < 5; i++ {
- // If this is not the first try, sleep before retrying.
- if i > 0 {
- time.Sleep(200 * time.Millisecond)
- }
- pendingChannels, err := node.fundingMgr.
- cfg.Wallet.Cfg.Database.FetchPendingChannels()
- if err != nil {
- t.Fatalf("unable to fetch pending channels: %v", err)
- }
-
- numPendingChans = len(pendingChannels)
- if numPendingChans != expectedNum {
-
- t.Fatalf("Expected node to have %d pending channels, had %v",
- expectedNum, numPendingChans)
- }
- }
-}
-
-func assertDatabaseState(t *testing.T, node *testNode,
- fundingOutPoint *wire.OutPoint, expectedState channelOpeningState) {
- t.Helper()
-
- var state channelOpeningState
- var err er.R
- for i := 0; i < testPollNumTries; i++ {
- // If this is not the first try, sleep before retrying.
- if i > 0 {
- time.Sleep(testPollSleepMs * time.Millisecond)
- }
- state, _, err = node.fundingMgr.getChannelOpeningState(
- fundingOutPoint)
- if err != nil && !ErrChannelNotFound.Is(err) {
- t.Fatalf("unable to get channel state: %v", err)
- }
-
- // If we found the channel, check if it had the expected state.
- if !ErrChannelNotFound.Is(err) && state == expectedState {
- // Got expected state, return with success.
- return
- }
- }
-
- // 10 tries without success.
- if err != nil {
- t.Fatalf("error getting channelOpeningState: %v", err)
- } else {
- t.Fatalf("expected state to be %v, was %v", expectedState,
- state)
- }
-}
-
-func assertMarkedOpen(t *testing.T, alice, bob *testNode,
- fundingOutPoint *wire.OutPoint) {
- t.Helper()
-
- // Make sure the notification about the pending channel was sent out.
- select {
- case <-alice.mockChanEvent.openEvent:
- case <-time.After(time.Second * 5):
- t.Fatalf("alice did not send open channel event")
- }
- select {
- case <-bob.mockChanEvent.openEvent:
- case <-time.After(time.Second * 5):
- t.Fatalf("bob did not send open channel event")
- }
-
- assertDatabaseState(t, alice, fundingOutPoint, markedOpen)
- assertDatabaseState(t, bob, fundingOutPoint, markedOpen)
-}
-
-func assertFundingLockedSent(t *testing.T, alice, bob *testNode,
- fundingOutPoint *wire.OutPoint) {
- t.Helper()
-
- assertDatabaseState(t, alice, fundingOutPoint, fundingLockedSent)
- assertDatabaseState(t, bob, fundingOutPoint, fundingLockedSent)
-}
-
-func assertAddedToRouterGraph(t *testing.T, alice, bob *testNode,
- fundingOutPoint *wire.OutPoint) {
- t.Helper()
-
- assertDatabaseState(t, alice, fundingOutPoint, addedToRouterGraph)
- assertDatabaseState(t, bob, fundingOutPoint, addedToRouterGraph)
-}
-
-// assertChannelAnnouncements checks that alice and bob both sends the expected
-// announcements (ChannelAnnouncement, ChannelUpdate) after the funding tx has
-// confirmed. The last arguments can be set if we expect the nodes to advertise
-// custom min_htlc values as part of their ChannelUpdate. We expect Alice to
-// advertise the value required by Bob and vice versa. If they are not set the
-// advertised value will be checked against the other node's default min_htlc
-// value.
-func assertChannelAnnouncements(t *testing.T, alice, bob *testNode,
- capacity btcutil.Amount, customMinHtlc []lnwire.MilliSatoshi,
- customMaxHtlc []lnwire.MilliSatoshi) {
- t.Helper()
-
- // After the FundingLocked message is sent, Alice and Bob will each
- // send the following messages to their gossiper:
- // 1) ChannelAnnouncement
- // 2) ChannelUpdate
- // The ChannelAnnouncement is kept locally, while the ChannelUpdate
- // is sent directly to the other peer, so the edge policies are
- // known to both peers.
- nodes := []*testNode{alice, bob}
- for j, node := range nodes {
- announcements := make([]lnwire.Message, 2)
- for i := 0; i < len(announcements); i++ {
- select {
- case announcements[i] = <-node.announceChan:
- case <-time.After(time.Second * 5):
- t.Fatalf("node did not send announcement: %v", i)
- }
- }
-
- gotChannelAnnouncement := false
- gotChannelUpdate := false
- for _, msg := range announcements {
- switch m := msg.(type) {
- case *lnwire.ChannelAnnouncement:
- gotChannelAnnouncement = true
- case *lnwire.ChannelUpdate:
-
- // The channel update sent by the node should
- // advertise the MinHTLC value required by the
- // _other_ node.
- other := (j + 1) % 2
- minHtlc := nodes[other].fundingMgr.cfg.
- DefaultMinHtlcIn
-
- // We might expect a custom MinHTLC value.
- if len(customMinHtlc) > 0 {
- if len(customMinHtlc) != 2 {
- t.Fatalf("only 0 or 2 custom " +
- "min htlc values " +
- "currently supported")
- }
-
- minHtlc = customMinHtlc[j]
- }
-
- if m.HtlcMinimumMsat != minHtlc {
- t.Fatalf("expected ChannelUpdate to "+
- "advertise min HTLC %v, had %v",
- minHtlc, m.HtlcMinimumMsat)
- }
-
- maxHtlc := alice.fundingMgr.cfg.RequiredRemoteMaxValue(
- capacity,
- )
- // We might expect a custom MaxHltc value.
- if len(customMaxHtlc) > 0 {
- if len(customMaxHtlc) != 2 {
- t.Fatalf("only 0 or 2 custom " +
- "min htlc values " +
- "currently supported")
- }
-
- maxHtlc = customMaxHtlc[j]
- }
- if m.MessageFlags != 1 {
- t.Fatalf("expected message flags to "+
- "be 1, was %v", m.MessageFlags)
- }
-
- if maxHtlc != m.HtlcMaximumMsat {
- t.Fatalf("expected ChannelUpdate to "+
- "advertise max HTLC %v, had %v",
- maxHtlc,
- m.HtlcMaximumMsat)
- }
-
- gotChannelUpdate = true
- }
- }
-
- if !gotChannelAnnouncement {
- t.Fatalf("did not get ChannelAnnouncement from node %d",
- j)
- }
- if !gotChannelUpdate {
- t.Fatalf("did not get ChannelUpdate from node %d", j)
- }
-
- // Make sure no other message is sent.
- select {
- case <-node.announceChan:
- t.Fatalf("received unexpected announcement")
- case <-time.After(300 * time.Millisecond):
- // Expected
- }
- }
-}
-
-func assertAnnouncementSignatures(t *testing.T, alice, bob *testNode) {
- t.Helper()
-
- // After the FundingLocked message is sent and six confirmations have
- // been reached, the channel will be announced to the greater network
- // by having the nodes exchange announcement signatures.
- // Two distinct messages will be sent:
- // 1) AnnouncementSignatures
- // 2) NodeAnnouncement
- // These may arrive in no particular order.
- // Note that sending the NodeAnnouncement at this point is an
- // implementation detail, and not something required by the LN spec.
- for j, node := range []*testNode{alice, bob} {
- announcements := make([]lnwire.Message, 2)
- for i := 0; i < len(announcements); i++ {
- select {
- case announcements[i] = <-node.announceChan:
- case <-time.After(time.Second * 5):
- t.Fatalf("node did not send announcement %v", i)
- }
- }
-
- gotAnnounceSignatures := false
- gotNodeAnnouncement := false
- for _, msg := range announcements {
- switch msg.(type) {
- case *lnwire.AnnounceSignatures:
- gotAnnounceSignatures = true
- case *lnwire.NodeAnnouncement:
- gotNodeAnnouncement = true
- }
- }
-
- if !gotAnnounceSignatures {
- t.Fatalf("did not get AnnounceSignatures from node %d",
- j)
- }
- if !gotNodeAnnouncement {
- t.Fatalf("did not get NodeAnnouncement from node %d", j)
- }
- }
-}
-
-func waitForOpenUpdate(t *testing.T, updateChan chan *lnrpc.OpenStatusUpdate) {
- var openUpdate *lnrpc.OpenStatusUpdate
- select {
- case openUpdate = <-updateChan:
- case <-time.After(time.Second * 5):
- t.Fatalf("alice did not send OpenStatusUpdate")
- }
-
- _, ok := openUpdate.Update.(*lnrpc.OpenStatusUpdate_ChanOpen)
- if !ok {
- t.Fatal("OpenStatusUpdate was not OpenStatusUpdate_ChanOpen")
- }
-}
-
-func assertNoChannelState(t *testing.T, alice, bob *testNode,
- fundingOutPoint *wire.OutPoint) {
- t.Helper()
-
- assertErrChannelNotFound(t, alice, fundingOutPoint)
- assertErrChannelNotFound(t, bob, fundingOutPoint)
-}
-
-func assertErrChannelNotFound(t *testing.T, node *testNode,
- fundingOutPoint *wire.OutPoint) {
- t.Helper()
-
- var state channelOpeningState
- var err er.R
- for i := 0; i < testPollNumTries; i++ {
- // If this is not the first try, sleep before retrying.
- if i > 0 {
- time.Sleep(testPollSleepMs * time.Millisecond)
- }
- state, _, err = node.fundingMgr.getChannelOpeningState(
- fundingOutPoint)
- if ErrChannelNotFound.Is(err) {
- // Got expected state, return with success.
- return
- } else if err != nil {
- t.Fatalf("unable to get channel state: %v", err)
- }
- }
-
- // 10 tries without success.
- t.Fatalf("expected to not find state, found state %v", state)
-}
-
-func assertHandleFundingLocked(t *testing.T, alice, bob *testNode) {
- t.Helper()
-
- // They should both send the new channel state to their peer.
- select {
- case c := <-alice.newChannels:
- close(c.err)
- case <-time.After(time.Second * 15):
- t.Fatalf("alice did not send new channel to peer")
- }
-
- select {
- case c := <-bob.newChannels:
- close(c.err)
- case <-time.After(time.Second * 15):
- t.Fatalf("bob did not send new channel to peer")
- }
-}
-
-func TestFundingManagerNormalWorkflow(t *testing.T) {
- t.Parallel()
-
- alice, bob := setupFundingManagers(t)
- defer tearDownFundingManagers(t, alice, bob)
-
- // We will consume the channel updates as we go, so no buffering is needed.
- updateChan := make(chan *lnrpc.OpenStatusUpdate)
-
- // Run through the process of opening the channel, up until the funding
- // transaction is broadcasted.
- localAmt := btcutil.Amount(500000)
- pushAmt := btcutil.Amount(0)
- capacity := localAmt + pushAmt
- fundingOutPoint, fundingTx := openChannel(
- t, alice, bob, localAmt, pushAmt, 1, updateChan, true,
- )
-
- // Check that neither Alice nor Bob sent an error message.
- assertErrorNotSent(t, alice.msgChan)
- assertErrorNotSent(t, bob.msgChan)
-
- // Notify that transaction was mined.
- alice.mockNotifier.oneConfChannel <- &chainntnfs.TxConfirmation{
- Tx: fundingTx,
- }
- bob.mockNotifier.oneConfChannel <- &chainntnfs.TxConfirmation{
- Tx: fundingTx,
- }
-
- // The funding transaction was mined, so assert that both funding
- // managers now have the state of this channel 'markedOpen' in their
- // internal state machine.
- assertMarkedOpen(t, alice, bob, fundingOutPoint)
-
- // After the funding transaction is mined, Alice will send
- // fundingLocked to Bob.
- fundingLockedAlice := assertFundingMsgSent(
- t, alice.msgChan, "FundingLocked",
- ).(*lnwire.FundingLocked)
-
- // And similarly Bob will send funding locked to Alice.
- fundingLockedBob := assertFundingMsgSent(
- t, bob.msgChan, "FundingLocked",
- ).(*lnwire.FundingLocked)
-
- // Check that the state machine is updated accordingly
- assertFundingLockedSent(t, alice, bob, fundingOutPoint)
-
- // Make sure both fundingManagers send the expected channel
- // announcements.
- assertChannelAnnouncements(t, alice, bob, capacity, nil, nil)
-
- // Check that the state machine is updated accordingly
- assertAddedToRouterGraph(t, alice, bob, fundingOutPoint)
-
- // The funding transaction is now confirmed, wait for the
- // OpenStatusUpdate_ChanOpen update
- waitForOpenUpdate(t, updateChan)
-
- // Exchange the fundingLocked messages.
- alice.fundingMgr.ProcessFundingMsg(fundingLockedBob, bob)
- bob.fundingMgr.ProcessFundingMsg(fundingLockedAlice, alice)
-
- // Check that they notify the breach arbiter and peer about the new
- // channel.
- assertHandleFundingLocked(t, alice, bob)
-
- // Notify that six confirmations has been reached on funding transaction.
- alice.mockNotifier.sixConfChannel <- &chainntnfs.TxConfirmation{
- Tx: fundingTx,
- }
- bob.mockNotifier.sixConfChannel <- &chainntnfs.TxConfirmation{
- Tx: fundingTx,
- }
-
- // Make sure the fundingManagers exchange announcement signatures.
- assertAnnouncementSignatures(t, alice, bob)
-
- // The internal state-machine should now have deleted the channelStates
- // from the database, as the channel is announced.
- assertNoChannelState(t, alice, bob, fundingOutPoint)
-}
-
-// TestFundingManagerRejectCSV tests checking of local CSV values against our
-// local CSV limit for incoming and outgoing channels.
-func TestFundingManagerRejectCSV(t *testing.T) {
- t.Run("csv too high", func(t *testing.T) {
- testLocalCSVLimit(t, 400, 500)
- })
- t.Run("csv within limit", func(t *testing.T) {
- testLocalCSVLimit(t, 600, 500)
- })
-}
-
-// testLocalCSVLimit creates two funding managers, alice and bob, where alice
-// has a limit on her maximum local CSV and bob sets his required CSV for alice.
-// We test an incoming and outgoing channel, ensuring that alice accepts csvs
-// below her maximum, and rejects those above it.
-func testLocalCSVLimit(t *testing.T, aliceMaxCSV, bobRequiredCSV uint16) {
- t.Parallel()
-
- alice, bob := setupFundingManagers(t)
- defer tearDownFundingManagers(t, alice, bob)
-
- // Set a maximum local delay in alice's config to aliceMaxCSV and overwrite
- // bob's required remote delay function to return bobRequiredCSV.
- alice.fundingMgr.cfg.MaxLocalCSVDelay = aliceMaxCSV
- bob.fundingMgr.cfg.RequiredRemoteDelay = func(_ btcutil.Amount) uint16 {
- return bobRequiredCSV
- }
-
- // For convenience, we bump our max pending channels to 2 so that we
- // can test incoming and outgoing channels without needing to step
- // through the full funding process.
- alice.fundingMgr.cfg.MaxPendingChannels = 2
- bob.fundingMgr.cfg.MaxPendingChannels = 2
-
- // If our maximum is less than the value bob sets, we expect this test
- // to fail.
- expectFail := aliceMaxCSV < bobRequiredCSV
-
- // First, we will initiate an outgoing channel from Alice -> Bob.
- errChan := make(chan er.R, 1)
- updateChan := make(chan *lnrpc.OpenStatusUpdate)
- initReq := &openChanReq{
- targetPubkey: bob.privKey.PubKey(),
- chainHash: *fundingNetParams.GenesisHash,
- localFundingAmt: 200000,
- fundingFeePerKw: 1000,
- updates: updateChan,
- err: errChan,
- }
-
- // Alice should have sent the OpenChannel message to Bob.
- alice.fundingMgr.initFundingWorkflow(bob, initReq)
- var aliceMsg lnwire.Message
- select {
- case aliceMsg = <-alice.msgChan:
-
- case err := <-initReq.err:
- t.Fatalf("error init funding workflow: %v", err)
-
- case <-time.After(time.Second * 5):
- t.Fatalf("alice did not send OpenChannel message")
- }
-
- openChannelReq, ok := aliceMsg.(*lnwire.OpenChannel)
- require.True(t, ok)
-
- // Let Bob handle the init message.
- bob.fundingMgr.ProcessFundingMsg(openChannelReq, alice)
-
- // Bob should answer with an AcceptChannel message.
- acceptChannelResponse := assertFundingMsgSent(
- t, bob.msgChan, "AcceptChannel",
- ).(*lnwire.AcceptChannel)
-
- // They now should both have pending reservations for this channel
- // active.
- assertNumPendingReservations(t, alice, bobPubKey, 1)
- assertNumPendingReservations(t, bob, alicePubKey, 1)
-
- // Forward the response to Alice.
- alice.fundingMgr.ProcessFundingMsg(acceptChannelResponse, bob)
-
- // At this point, Alice has received an AcceptChannel message from
- // bob with the CSV value that he has set for her, and has to evaluate
- // whether she wants to accept this channel. If we get an error, we
- // assert that we expected the channel to fail, otherwise we assert that
- // she proceeded with the channel open as usual.
- select {
- case err := <-errChan:
- util.RequireErr(t, err)
- require.True(t, expectFail)
-
- case msg := <-alice.msgChan:
- _, ok := msg.(*lnwire.FundingCreated)
- require.True(t, ok)
- require.False(t, expectFail)
-
- case <-time.After(time.Second):
- t.Fatal("funding flow was not failed")
- }
-
- // We do not need to complete the rest of the funding flow (it is
- // covered in other tests). So now we test that Alice will appropriately
- // handle incoming channels, opening a channel from Bob->Alice.
- errChan = make(chan er.R, 1)
- updateChan = make(chan *lnrpc.OpenStatusUpdate)
- initReq = &openChanReq{
- targetPubkey: alice.privKey.PubKey(),
- chainHash: *fundingNetParams.GenesisHash,
- localFundingAmt: 200000,
- fundingFeePerKw: 1000,
- updates: updateChan,
- err: errChan,
- }
-
- bob.fundingMgr.initFundingWorkflow(alice, initReq)
-
- // Bob should have sent the OpenChannel message to Alice.
- var bobMsg lnwire.Message
- select {
- case bobMsg = <-bob.msgChan:
-
- case err := <-initReq.err:
- t.Fatalf("bob OpenChannel message failed: %v", err)
-
- case <-time.After(time.Second * 5):
- t.Fatalf("bob did not send OpenChannel message")
- }
-
- openChannelReq, ok = bobMsg.(*lnwire.OpenChannel)
- require.True(t, ok)
-
- // Let Alice handle the init message.
- alice.fundingMgr.ProcessFundingMsg(openChannelReq, bob)
-
- // We expect a error message from Alice if we're expecting the channel
- // to fail, otherwise we expect her to proceed with the channel as
- // usual.
- select {
- case msg := <-alice.msgChan:
- var ok bool
- if expectFail {
- _, ok = msg.(*lnwire.Error)
- } else {
- _, ok = msg.(*lnwire.AcceptChannel)
- }
- require.True(t, ok)
-
- case <-time.After(time.Second * 5):
- t.Fatal("funding flow was not failed")
- }
-}
-
-func TestFundingManagerRestartBehavior(t *testing.T) {
- t.Parallel()
-
- alice, bob := setupFundingManagers(t)
- defer tearDownFundingManagers(t, alice, bob)
-
- // Run through the process of opening the channel, up until the funding
- // transaction is broadcasted.
- localAmt := btcutil.Amount(500000)
- pushAmt := btcutil.Amount(0)
- capacity := localAmt + pushAmt
- updateChan := make(chan *lnrpc.OpenStatusUpdate)
- fundingOutPoint, fundingTx := openChannel(
- t, alice, bob, localAmt, pushAmt, 1, updateChan, true,
- )
-
- // After the funding transaction gets mined, both nodes will send the
- // fundingLocked message to the other peer. If the funding node fails
- // before this message has been successfully sent, it should retry
- // sending it on restart. We mimic this behavior by letting the
- // SendToPeer method return an error, as if the message was not
- // successfully sent. We then recreate the fundingManager and make sure
- // it continues the process as expected. We'll save the current
- // implementation of sendMessage to restore the original behavior later
- // on.
- workingSendMessage := bob.sendMessage
- bob.sendMessage = func(msg lnwire.Message) er.R {
- return er.Errorf("intentional error in SendToPeer")
- }
- alice.fundingMgr.cfg.NotifyWhenOnline = func(peer [33]byte,
- con chan<- lnpeer.Peer) {
- // Intentionally empty.
- }
-
- // Notify that transaction was mined
- alice.mockNotifier.oneConfChannel <- &chainntnfs.TxConfirmation{
- Tx: fundingTx,
- }
- bob.mockNotifier.oneConfChannel <- &chainntnfs.TxConfirmation{
- Tx: fundingTx,
- }
-
- // The funding transaction was mined, so assert that both funding
- // managers now have the state of this channel 'markedOpen' in their
- // internal state machine.
- assertMarkedOpen(t, alice, bob, fundingOutPoint)
-
- // After the funding transaction was mined, Bob should have successfully
- // sent the fundingLocked message, while Alice failed sending it. In
- // Alice's case this means that there should be no messages for Bob, and
- // the channel should still be in state 'markedOpen'
- select {
- case msg := <-alice.msgChan:
- t.Fatalf("did not expect any message from Alice: %v", msg)
- default:
- // Expected.
- }
-
- // Bob will send funding locked to Alice.
- fundingLockedBob := assertFundingMsgSent(
- t, bob.msgChan, "FundingLocked",
- ).(*lnwire.FundingLocked)
-
- // Alice should still be markedOpen
- assertDatabaseState(t, alice, fundingOutPoint, markedOpen)
-
- // While Bob successfully sent fundingLocked.
- assertDatabaseState(t, bob, fundingOutPoint, fundingLockedSent)
-
- // We now recreate Alice's fundingManager with the correct sendMessage
- // implementation, and expect it to retry sending the fundingLocked
- // message. We'll explicitly shut down Alice's funding manager to
- // prevent a race when overriding the sendMessage implementation.
- if err := alice.fundingMgr.Stop(); err != nil {
- t.Fatalf("unable to stop alice's funding manager: %v", err)
- }
- bob.sendMessage = workingSendMessage
- recreateAliceFundingManager(t, alice)
-
- // Intentionally make the channel announcements fail
- alice.fundingMgr.cfg.SendAnnouncement = func(msg lnwire.Message,
- _ ...discovery.OptionalMsgField) chan er.R {
-
- errChan := make(chan er.R, 1)
- errChan <- er.Errorf("intentional error in SendAnnouncement")
- return errChan
- }
-
- fundingLockedAlice := assertFundingMsgSent(
- t, alice.msgChan, "FundingLocked",
- ).(*lnwire.FundingLocked)
-
- // The state should now be fundingLockedSent
- assertDatabaseState(t, alice, fundingOutPoint, fundingLockedSent)
-
- // Check that the channel announcements were never sent
- select {
- case ann := <-alice.announceChan:
- t.Fatalf("unexpectedly got channel announcement message: %v",
- ann)
- default:
- // Expected
- }
-
- // Exchange the fundingLocked messages.
- alice.fundingMgr.ProcessFundingMsg(fundingLockedBob, bob)
- bob.fundingMgr.ProcessFundingMsg(fundingLockedAlice, alice)
-
- // Check that they notify the breach arbiter and peer about the new
- // channel.
- assertHandleFundingLocked(t, alice, bob)
-
- // Next up, we check that Alice rebroadcasts the announcement
- // messages on restart. Bob should as expected send announcements.
- recreateAliceFundingManager(t, alice)
- time.Sleep(300 * time.Millisecond)
-
- // Make sure both fundingManagers send the expected channel
- // announcements.
- assertChannelAnnouncements(t, alice, bob, capacity, nil, nil)
-
- // Check that the state machine is updated accordingly
- assertAddedToRouterGraph(t, alice, bob, fundingOutPoint)
-
- // Next, we check that Alice sends the announcement signatures
- // on restart after six confirmations. Bob should as expected send
- // them as well.
- recreateAliceFundingManager(t, alice)
- time.Sleep(300 * time.Millisecond)
-
- // Notify that six confirmations has been reached on funding transaction.
- alice.mockNotifier.sixConfChannel <- &chainntnfs.TxConfirmation{
- Tx: fundingTx,
- }
- bob.mockNotifier.sixConfChannel <- &chainntnfs.TxConfirmation{
- Tx: fundingTx,
- }
-
- // Make sure the fundingManagers exchange announcement signatures.
- assertAnnouncementSignatures(t, alice, bob)
-
- // The internal state-machine should now have deleted the channelStates
- // from the database, as the channel is announced.
- assertNoChannelState(t, alice, bob, fundingOutPoint)
-}
-
-// TestFundingManagerOfflinePeer checks that the fundingManager waits for the
-// server to notify when the peer comes online, in case sending the
-// fundingLocked message fails the first time.
-func TestFundingManagerOfflinePeer(t *testing.T) {
- t.Parallel()
-
- alice, bob := setupFundingManagers(t)
- defer tearDownFundingManagers(t, alice, bob)
-
- // Run through the process of opening the channel, up until the funding
- // transaction is broadcasted.
- localAmt := btcutil.Amount(500000)
- pushAmt := btcutil.Amount(0)
- capacity := localAmt + pushAmt
- updateChan := make(chan *lnrpc.OpenStatusUpdate)
- fundingOutPoint, fundingTx := openChannel(
- t, alice, bob, localAmt, pushAmt, 1, updateChan, true,
- )
-
- // After the funding transaction gets mined, both nodes will send the
- // fundingLocked message to the other peer. If the funding node fails
- // to send the fundingLocked message to the peer, it should wait for
- // the server to notify it that the peer is back online, and try again.
- // We'll save the current implementation of sendMessage to restore the
- // original behavior later on.
- workingSendMessage := bob.sendMessage
- bob.sendMessage = func(msg lnwire.Message) er.R {
- return er.Errorf("intentional error in SendToPeer")
- }
- peerChan := make(chan [33]byte, 1)
- conChan := make(chan chan<- lnpeer.Peer, 1)
- alice.fundingMgr.cfg.NotifyWhenOnline = func(peer [33]byte,
- connected chan<- lnpeer.Peer) {
-
- peerChan <- peer
- conChan <- connected
- }
-
- // Notify that transaction was mined
- alice.mockNotifier.oneConfChannel <- &chainntnfs.TxConfirmation{
- Tx: fundingTx,
- }
- bob.mockNotifier.oneConfChannel <- &chainntnfs.TxConfirmation{
- Tx: fundingTx,
- }
-
- // The funding transaction was mined, so assert that both funding
- // managers now have the state of this channel 'markedOpen' in their
- // internal state machine.
- assertMarkedOpen(t, alice, bob, fundingOutPoint)
-
- // After the funding transaction was mined, Bob should have successfully
- // sent the fundingLocked message, while Alice failed sending it. In
- // Alice's case this means that there should be no messages for Bob, and
- // the channel should still be in state 'markedOpen'
- select {
- case msg := <-alice.msgChan:
- t.Fatalf("did not expect any message from Alice: %v", msg)
- default:
- // Expected.
- }
-
- // Bob will send funding locked to Alice
- fundingLockedBob := assertFundingMsgSent(
- t, bob.msgChan, "FundingLocked",
- ).(*lnwire.FundingLocked)
-
- // Alice should still be markedOpen
- assertDatabaseState(t, alice, fundingOutPoint, markedOpen)
-
- // While Bob successfully sent fundingLocked.
- assertDatabaseState(t, bob, fundingOutPoint, fundingLockedSent)
-
- // Alice should be waiting for the server to notify when Bob comes back
- // online.
- var peer [33]byte
- var con chan<- lnpeer.Peer
- select {
- case peer = <-peerChan:
- // Expected
- case <-time.After(time.Second * 3):
- t.Fatalf("alice did not register peer with server")
- }
-
- select {
- case con = <-conChan:
- // Expected
- case <-time.After(time.Second * 3):
- t.Fatalf("alice did not register connectedChan with server")
- }
-
- if !bytes.Equal(peer[:], bobPubKey.SerializeCompressed()) {
- t.Fatalf("expected to receive Bob's pubkey (%v), instead got %v",
- bobPubKey, peer)
- }
-
- // Restore the correct sendMessage implementation, and notify that Bob
- // is back online.
- bob.sendMessage = workingSendMessage
- con <- bob
-
- // This should make Alice send the fundingLocked.
- fundingLockedAlice := assertFundingMsgSent(
- t, alice.msgChan, "FundingLocked",
- ).(*lnwire.FundingLocked)
-
- // The state should now be fundingLockedSent
- assertDatabaseState(t, alice, fundingOutPoint, fundingLockedSent)
-
- // Make sure both fundingManagers send the expected channel
- // announcements.
- assertChannelAnnouncements(t, alice, bob, capacity, nil, nil)
-
- // Check that the state machine is updated accordingly
- assertAddedToRouterGraph(t, alice, bob, fundingOutPoint)
-
- // The funding transaction is now confirmed, wait for the
- // OpenStatusUpdate_ChanOpen update
- waitForOpenUpdate(t, updateChan)
-
- // Exchange the fundingLocked messages.
- alice.fundingMgr.ProcessFundingMsg(fundingLockedBob, bob)
- bob.fundingMgr.ProcessFundingMsg(fundingLockedAlice, alice)
-
- // Check that they notify the breach arbiter and peer about the new
- // channel.
- assertHandleFundingLocked(t, alice, bob)
-
- // Notify that six confirmations has been reached on funding transaction.
- alice.mockNotifier.sixConfChannel <- &chainntnfs.TxConfirmation{
- Tx: fundingTx,
- }
- bob.mockNotifier.sixConfChannel <- &chainntnfs.TxConfirmation{
- Tx: fundingTx,
- }
-
- // Make sure both fundingManagers send the expected announcement
- // signatures.
- assertAnnouncementSignatures(t, alice, bob)
-
- // The internal state-machine should now have deleted the channelStates
- // from the database, as the channel is announced.
- assertNoChannelState(t, alice, bob, fundingOutPoint)
-}
-
-// TestFundingManagerPeerTimeoutAfterInitFunding checks that the zombie sweeper
-// will properly clean up a zombie reservation that times out after the
-// initFundingMsg has been handled.
-func TestFundingManagerPeerTimeoutAfterInitFunding(t *testing.T) {
- t.Parallel()
-
- alice, bob := setupFundingManagers(t)
- defer tearDownFundingManagers(t, alice, bob)
-
- // We will consume the channel updates as we go, so no buffering is needed.
- updateChan := make(chan *lnrpc.OpenStatusUpdate)
-
- // Create a funding request and start the workflow.
- errChan := make(chan er.R, 1)
- initReq := &openChanReq{
- targetPubkey: bob.privKey.PubKey(),
- chainHash: *fundingNetParams.GenesisHash,
- localFundingAmt: 500000,
- pushAmt: lnwire.NewMSatFromSatoshis(0),
- private: false,
- updates: updateChan,
- err: errChan,
- }
-
- alice.fundingMgr.initFundingWorkflow(bob, initReq)
-
- // Alice should have sent the OpenChannel message to Bob.
- var aliceMsg lnwire.Message
- select {
- case aliceMsg = <-alice.msgChan:
- case err := <-initReq.err:
- t.Fatalf("error init funding workflow: %v", err)
- case <-time.After(time.Second * 5):
- t.Fatalf("alice did not send OpenChannel message")
- }
-
- _, ok := aliceMsg.(*lnwire.OpenChannel)
- if !ok {
- errorMsg, gotError := aliceMsg.(*lnwire.Error)
- if gotError {
- t.Fatalf("expected OpenChannel to be sent "+
- "from bob, instead got error: %v",
- errorMsg.Error())
- }
- t.Fatalf("expected OpenChannel to be sent from "+
- "alice, instead got %T", aliceMsg)
- }
-
- // Alice should have a new pending reservation.
- assertNumPendingReservations(t, alice, bobPubKey, 1)
-
- // Make sure Alice's reservation times out and then run her zombie sweeper.
- time.Sleep(1 * time.Millisecond)
- go alice.fundingMgr.pruneZombieReservations()
-
- // Alice should have sent an Error message to Bob.
- assertErrorSent(t, alice.msgChan)
-
- // Alice's zombie reservation should have been pruned.
- assertNumPendingReservations(t, alice, bobPubKey, 0)
-}
-
-// TestFundingManagerPeerTimeoutAfterFundingOpen checks that the zombie sweeper
-// will properly clean up a zombie reservation that times out after the
-// fundingOpenMsg has been handled.
-func TestFundingManagerPeerTimeoutAfterFundingOpen(t *testing.T) {
- t.Parallel()
-
- alice, bob := setupFundingManagers(t)
- defer tearDownFundingManagers(t, alice, bob)
-
- // We will consume the channel updates as we go, so no buffering is needed.
- updateChan := make(chan *lnrpc.OpenStatusUpdate)
-
- // Create a funding request and start the workflow.
- errChan := make(chan er.R, 1)
- initReq := &openChanReq{
- targetPubkey: bob.privKey.PubKey(),
- chainHash: *fundingNetParams.GenesisHash,
- localFundingAmt: 500000,
- pushAmt: lnwire.NewMSatFromSatoshis(0),
- private: false,
- updates: updateChan,
- err: errChan,
- }
-
- alice.fundingMgr.initFundingWorkflow(bob, initReq)
-
- // Alice should have sent the OpenChannel message to Bob.
- var aliceMsg lnwire.Message
- select {
- case aliceMsg = <-alice.msgChan:
- case err := <-initReq.err:
- t.Fatalf("error init funding workflow: %v", err)
- case <-time.After(time.Second * 5):
- t.Fatalf("alice did not send OpenChannel message")
- }
-
- openChannelReq, ok := aliceMsg.(*lnwire.OpenChannel)
- if !ok {
- errorMsg, gotError := aliceMsg.(*lnwire.Error)
- if gotError {
- t.Fatalf("expected OpenChannel to be sent "+
- "from bob, instead got error: %v",
- errorMsg.Error())
- }
- t.Fatalf("expected OpenChannel to be sent from "+
- "alice, instead got %T", aliceMsg)
- }
-
- // Alice should have a new pending reservation.
- assertNumPendingReservations(t, alice, bobPubKey, 1)
-
- // Let Bob handle the init message.
- bob.fundingMgr.ProcessFundingMsg(openChannelReq, alice)
-
- // Bob should answer with an AcceptChannel.
- assertFundingMsgSent(t, bob.msgChan, "AcceptChannel")
-
- // Bob should have a new pending reservation.
- assertNumPendingReservations(t, bob, alicePubKey, 1)
-
- // Make sure Bob's reservation times out and then run his zombie sweeper.
- time.Sleep(1 * time.Millisecond)
- go bob.fundingMgr.pruneZombieReservations()
-
- // Bob should have sent an Error message to Alice.
- assertErrorSent(t, bob.msgChan)
-
- // Bob's zombie reservation should have been pruned.
- assertNumPendingReservations(t, bob, alicePubKey, 0)
-}
-
-// TestFundingManagerPeerTimeoutAfterFundingAccept checks that the zombie sweeper
-// will properly clean up a zombie reservation that times out after the
-// fundingAcceptMsg has been handled.
-func TestFundingManagerPeerTimeoutAfterFundingAccept(t *testing.T) {
- t.Parallel()
-
- alice, bob := setupFundingManagers(t)
- defer tearDownFundingManagers(t, alice, bob)
-
- // We will consume the channel updates as we go, so no buffering is needed.
- updateChan := make(chan *lnrpc.OpenStatusUpdate)
-
- // Create a funding request and start the workflow.
- errChan := make(chan er.R, 1)
- initReq := &openChanReq{
- targetPubkey: bob.privKey.PubKey(),
- chainHash: *fundingNetParams.GenesisHash,
- localFundingAmt: 500000,
- pushAmt: lnwire.NewMSatFromSatoshis(0),
- private: false,
- updates: updateChan,
- err: errChan,
- }
-
- alice.fundingMgr.initFundingWorkflow(bob, initReq)
-
- // Alice should have sent the OpenChannel message to Bob.
- var aliceMsg lnwire.Message
- select {
- case aliceMsg = <-alice.msgChan:
- case err := <-initReq.err:
- t.Fatalf("error init funding workflow: %v", err)
- case <-time.After(time.Second * 5):
- t.Fatalf("alice did not send OpenChannel message")
- }
-
- openChannelReq, ok := aliceMsg.(*lnwire.OpenChannel)
- if !ok {
- errorMsg, gotError := aliceMsg.(*lnwire.Error)
- if gotError {
- t.Fatalf("expected OpenChannel to be sent "+
- "from bob, instead got error: %v",
- errorMsg.Error())
- }
- t.Fatalf("expected OpenChannel to be sent from "+
- "alice, instead got %T", aliceMsg)
- }
-
- // Alice should have a new pending reservation.
- assertNumPendingReservations(t, alice, bobPubKey, 1)
-
- // Let Bob handle the init message.
- bob.fundingMgr.ProcessFundingMsg(openChannelReq, alice)
-
- // Bob should answer with an AcceptChannel.
- acceptChannelResponse := assertFundingMsgSent(
- t, bob.msgChan, "AcceptChannel",
- ).(*lnwire.AcceptChannel)
-
- // Bob should have a new pending reservation.
- assertNumPendingReservations(t, bob, alicePubKey, 1)
-
- // Forward the response to Alice.
- alice.fundingMgr.ProcessFundingMsg(acceptChannelResponse, bob)
-
- // Alice responds with a FundingCreated messages.
- assertFundingMsgSent(t, alice.msgChan, "FundingCreated")
-
- // Make sure Alice's reservation times out and then run her zombie sweeper.
- time.Sleep(1 * time.Millisecond)
- go alice.fundingMgr.pruneZombieReservations()
-
- // Alice should have sent an Error message to Bob.
- assertErrorSent(t, alice.msgChan)
-
- // Alice's zombie reservation should have been pruned.
- assertNumPendingReservations(t, alice, bobPubKey, 0)
-}
-
-func TestFundingManagerFundingTimeout(t *testing.T) {
- t.Parallel()
-
- alice, bob := setupFundingManagers(t)
- defer tearDownFundingManagers(t, alice, bob)
-
- // We will consume the channel updates as we go, so no buffering is needed.
- updateChan := make(chan *lnrpc.OpenStatusUpdate)
-
- // Run through the process of opening the channel, up until the funding
- // transaction is broadcasted.
- _, _ = openChannel(t, alice, bob, 500000, 0, 1, updateChan, true)
-
- // Bob will at this point be waiting for the funding transaction to be
- // confirmed, so the channel should be considered pending.
- pendingChannels, err := bob.fundingMgr.cfg.Wallet.Cfg.Database.FetchPendingChannels()
- if err != nil {
- t.Fatalf("unable to fetch pending channels: %v", err)
- }
- if len(pendingChannels) != 1 {
- t.Fatalf("Expected Bob to have 1 pending channel, had %v",
- len(pendingChannels))
- }
-
- // We expect Bob to forget the channel after 2016 blocks (2 weeks), so
- // mine 2016-1, and check that it is still pending.
- bob.mockNotifier.epochChan <- &chainntnfs.BlockEpoch{
- Height: fundingBroadcastHeight + maxWaitNumBlocksFundingConf - 1,
- }
-
- // Bob should still be waiting for the channel to open.
- assertNumPendingChannelsRemains(t, bob, 1)
-
- bob.mockNotifier.epochChan <- &chainntnfs.BlockEpoch{
- Height: fundingBroadcastHeight + maxWaitNumBlocksFundingConf,
- }
-
- // Bob should have sent an Error message to Alice.
- assertErrorSent(t, bob.msgChan)
-
- // Should not be pending anymore.
- assertNumPendingChannelsBecomes(t, bob, 0)
-}
-
-// TestFundingManagerFundingNotTimeoutInitiator checks that if the user was
-// the channel initiator, that it does not timeout when the lnd restarts.
-func TestFundingManagerFundingNotTimeoutInitiator(t *testing.T) {
- t.Parallel()
-
- alice, bob := setupFundingManagers(t)
- defer tearDownFundingManagers(t, alice, bob)
-
- // We will consume the channel updates as we go, so no buffering is needed.
- updateChan := make(chan *lnrpc.OpenStatusUpdate)
-
- // Run through the process of opening the channel, up until the funding
- // transaction is broadcasted.
- _, _ = openChannel(t, alice, bob, 500000, 0, 1, updateChan, true)
-
- // Alice will at this point be waiting for the funding transaction to be
- // confirmed, so the channel should be considered pending.
- pendingChannels, err := alice.fundingMgr.cfg.Wallet.Cfg.Database.FetchPendingChannels()
- if err != nil {
- t.Fatalf("unable to fetch pending channels: %v", err)
- }
- if len(pendingChannels) != 1 {
- t.Fatalf("Expected Alice to have 1 pending channel, had %v",
- len(pendingChannels))
- }
-
- recreateAliceFundingManager(t, alice)
-
- // We should receive the rebroadcasted funding txn.
- select {
- case <-alice.publTxChan:
- case <-time.After(time.Second * 5):
- t.Fatalf("alice did not publish funding tx")
- }
-
- // Increase the height to 1 minus the maxWaitNumBlocksFundingConf height.
- alice.mockNotifier.epochChan <- &chainntnfs.BlockEpoch{
- Height: fundingBroadcastHeight + maxWaitNumBlocksFundingConf - 1,
- }
-
- bob.mockNotifier.epochChan <- &chainntnfs.BlockEpoch{
- Height: fundingBroadcastHeight + maxWaitNumBlocksFundingConf - 1,
- }
-
- // Assert both and Alice and Bob still have 1 pending channels.
- assertNumPendingChannelsRemains(t, alice, 1)
-
- assertNumPendingChannelsRemains(t, bob, 1)
-
- // Increase both Alice and Bob to maxWaitNumBlocksFundingConf height.
- alice.mockNotifier.epochChan <- &chainntnfs.BlockEpoch{
- Height: fundingBroadcastHeight + maxWaitNumBlocksFundingConf,
- }
-
- bob.mockNotifier.epochChan <- &chainntnfs.BlockEpoch{
- Height: fundingBroadcastHeight + maxWaitNumBlocksFundingConf,
- }
-
- // Since Alice was the initiator, the channel should not have timed out.
- assertNumPendingChannelsRemains(t, alice, 1)
-
- // Bob should have sent an Error message to Alice.
- assertErrorSent(t, bob.msgChan)
-
- // Since Bob was not the initiator, the channel should timeout.
- assertNumPendingChannelsBecomes(t, bob, 0)
-}
-
-// TestFundingManagerReceiveFundingLockedTwice checks that the fundingManager
-// continues to operate as expected in case we receive a duplicate fundingLocked
-// message.
-func TestFundingManagerReceiveFundingLockedTwice(t *testing.T) {
- t.Parallel()
-
- alice, bob := setupFundingManagers(t)
- defer tearDownFundingManagers(t, alice, bob)
-
- // We will consume the channel updates as we go, so no buffering is needed.
- updateChan := make(chan *lnrpc.OpenStatusUpdate)
-
- // Run through the process of opening the channel, up until the funding
- // transaction is broadcasted.
- localAmt := btcutil.Amount(500000)
- pushAmt := btcutil.Amount(0)
- capacity := localAmt + pushAmt
- fundingOutPoint, fundingTx := openChannel(
- t, alice, bob, localAmt, pushAmt, 1, updateChan, true,
- )
-
- // Notify that transaction was mined
- alice.mockNotifier.oneConfChannel <- &chainntnfs.TxConfirmation{
- Tx: fundingTx,
- }
- bob.mockNotifier.oneConfChannel <- &chainntnfs.TxConfirmation{
- Tx: fundingTx,
- }
-
- // The funding transaction was mined, so assert that both funding
- // managers now have the state of this channel 'markedOpen' in their
- // internal state machine.
- assertMarkedOpen(t, alice, bob, fundingOutPoint)
-
- // After the funding transaction is mined, Alice will send
- // fundingLocked to Bob.
- fundingLockedAlice := assertFundingMsgSent(
- t, alice.msgChan, "FundingLocked",
- ).(*lnwire.FundingLocked)
-
- // And similarly Bob will send funding locked to Alice.
- fundingLockedBob := assertFundingMsgSent(
- t, bob.msgChan, "FundingLocked",
- ).(*lnwire.FundingLocked)
-
- // Check that the state machine is updated accordingly
- assertFundingLockedSent(t, alice, bob, fundingOutPoint)
-
- // Make sure both fundingManagers send the expected channel
- // announcements.
- assertChannelAnnouncements(t, alice, bob, capacity, nil, nil)
-
- // Check that the state machine is updated accordingly
- assertAddedToRouterGraph(t, alice, bob, fundingOutPoint)
-
- // The funding transaction is now confirmed, wait for the
- // OpenStatusUpdate_ChanOpen update
- waitForOpenUpdate(t, updateChan)
-
- // Send the fundingLocked message twice to Alice, and once to Bob.
- alice.fundingMgr.ProcessFundingMsg(fundingLockedBob, bob)
- alice.fundingMgr.ProcessFundingMsg(fundingLockedBob, bob)
- bob.fundingMgr.ProcessFundingMsg(fundingLockedAlice, alice)
-
- // Check that they notify the breach arbiter and peer about the new
- // channel.
- assertHandleFundingLocked(t, alice, bob)
-
- // Alice should not send the channel state the second time, as the
- // second funding locked should just be ignored.
- select {
- case <-alice.newChannels:
- t.Fatalf("alice sent new channel to peer a second time")
- case <-time.After(time.Millisecond * 300):
- // Expected
- }
-
- // Another fundingLocked should also be ignored, since Alice should
- // have updated her database at this point.
- alice.fundingMgr.ProcessFundingMsg(fundingLockedBob, bob)
- select {
- case <-alice.newChannels:
- t.Fatalf("alice sent new channel to peer a second time")
- case <-time.After(time.Millisecond * 300):
- // Expected
- }
-
- // Notify that six confirmations has been reached on funding transaction.
- alice.mockNotifier.sixConfChannel <- &chainntnfs.TxConfirmation{
- Tx: fundingTx,
- }
- bob.mockNotifier.sixConfChannel <- &chainntnfs.TxConfirmation{
- Tx: fundingTx,
- }
-
- // Make sure the fundingManagers exchange announcement signatures.
- assertAnnouncementSignatures(t, alice, bob)
-
- // The internal state-machine should now have deleted the channelStates
- // from the database, as the channel is announced.
- assertNoChannelState(t, alice, bob, fundingOutPoint)
-}
-
-// TestFundingManagerRestartAfterChanAnn checks that the fundingManager properly
-// handles receiving a fundingLocked after the its own fundingLocked and channel
-// announcement is sent and gets restarted.
-func TestFundingManagerRestartAfterChanAnn(t *testing.T) {
- t.Parallel()
-
- alice, bob := setupFundingManagers(t)
- defer tearDownFundingManagers(t, alice, bob)
-
- // We will consume the channel updates as we go, so no buffering is needed.
- updateChan := make(chan *lnrpc.OpenStatusUpdate)
-
- // Run through the process of opening the channel, up until the funding
- // transaction is broadcasted.
- localAmt := btcutil.Amount(500000)
- pushAmt := btcutil.Amount(0)
- capacity := localAmt + pushAmt
- fundingOutPoint, fundingTx := openChannel(
- t, alice, bob, localAmt, pushAmt, 1, updateChan, true,
- )
-
- // Notify that transaction was mined
- alice.mockNotifier.oneConfChannel <- &chainntnfs.TxConfirmation{
- Tx: fundingTx,
- }
- bob.mockNotifier.oneConfChannel <- &chainntnfs.TxConfirmation{
- Tx: fundingTx,
- }
-
- // The funding transaction was mined, so assert that both funding
- // managers now have the state of this channel 'markedOpen' in their
- // internal state machine.
- assertMarkedOpen(t, alice, bob, fundingOutPoint)
-
- // After the funding transaction is mined, Alice will send
- // fundingLocked to Bob.
- fundingLockedAlice := assertFundingMsgSent(
- t, alice.msgChan, "FundingLocked",
- ).(*lnwire.FundingLocked)
-
- // And similarly Bob will send funding locked to Alice.
- fundingLockedBob := assertFundingMsgSent(
- t, bob.msgChan, "FundingLocked",
- ).(*lnwire.FundingLocked)
-
- // Check that the state machine is updated accordingly
- assertFundingLockedSent(t, alice, bob, fundingOutPoint)
-
- // Make sure both fundingManagers send the expected channel
- // announcements.
- assertChannelAnnouncements(t, alice, bob, capacity, nil, nil)
-
- // Check that the state machine is updated accordingly
- assertAddedToRouterGraph(t, alice, bob, fundingOutPoint)
-
- // The funding transaction is now confirmed, wait for the
- // OpenStatusUpdate_ChanOpen update
- waitForOpenUpdate(t, updateChan)
-
- // At this point we restart Alice's fundingManager, before she receives
- // the fundingLocked message. After restart, she will receive it, and
- // we expect her to be able to handle it correctly.
- recreateAliceFundingManager(t, alice)
-
- // Exchange the fundingLocked messages.
- alice.fundingMgr.ProcessFundingMsg(fundingLockedBob, bob)
- bob.fundingMgr.ProcessFundingMsg(fundingLockedAlice, alice)
-
- // Check that they notify the breach arbiter and peer about the new
- // channel.
- assertHandleFundingLocked(t, alice, bob)
-
- // Notify that six confirmations has been reached on funding transaction.
- alice.mockNotifier.sixConfChannel <- &chainntnfs.TxConfirmation{
- Tx: fundingTx,
- }
- bob.mockNotifier.sixConfChannel <- &chainntnfs.TxConfirmation{
- Tx: fundingTx,
- }
-
- // Make sure both fundingManagers send the expected channel announcements.
- assertAnnouncementSignatures(t, alice, bob)
-
- // The internal state-machine should now have deleted the channelStates
- // from the database, as the channel is announced.
- assertNoChannelState(t, alice, bob, fundingOutPoint)
-}
-
-// TestFundingManagerRestartAfterReceivingFundingLocked checks that the
-// fundingManager continues to operate as expected after it has received
-// fundingLocked and then gets restarted.
-func TestFundingManagerRestartAfterReceivingFundingLocked(t *testing.T) {
- t.Parallel()
-
- alice, bob := setupFundingManagers(t)
- defer tearDownFundingManagers(t, alice, bob)
-
- // We will consume the channel updates as we go, so no buffering is needed.
- updateChan := make(chan *lnrpc.OpenStatusUpdate)
-
- // Run through the process of opening the channel, up until the funding
- // transaction is broadcasted.
- localAmt := btcutil.Amount(500000)
- pushAmt := btcutil.Amount(0)
- capacity := localAmt + pushAmt
- fundingOutPoint, fundingTx := openChannel(
- t, alice, bob, localAmt, pushAmt, 1, updateChan, true,
- )
-
- // Notify that transaction was mined
- alice.mockNotifier.oneConfChannel <- &chainntnfs.TxConfirmation{
- Tx: fundingTx,
- }
- bob.mockNotifier.oneConfChannel <- &chainntnfs.TxConfirmation{
- Tx: fundingTx,
- }
-
- // The funding transaction was mined, so assert that both funding
- // managers now have the state of this channel 'markedOpen' in their
- // internal state machine.
- assertMarkedOpen(t, alice, bob, fundingOutPoint)
-
- // After the funding transaction is mined, Alice will send
- // fundingLocked to Bob.
- fundingLockedAlice := assertFundingMsgSent(
- t, alice.msgChan, "FundingLocked",
- ).(*lnwire.FundingLocked)
-
- // And similarly Bob will send funding locked to Alice.
- fundingLockedBob := assertFundingMsgSent(
- t, bob.msgChan, "FundingLocked",
- ).(*lnwire.FundingLocked)
-
- // Check that the state machine is updated accordingly
- assertFundingLockedSent(t, alice, bob, fundingOutPoint)
-
- // Let Alice immediately get the fundingLocked message.
- alice.fundingMgr.ProcessFundingMsg(fundingLockedBob, bob)
-
- // Also let Bob get the fundingLocked message.
- bob.fundingMgr.ProcessFundingMsg(fundingLockedAlice, alice)
-
- // Check that they notify the breach arbiter and peer about the new
- // channel.
- assertHandleFundingLocked(t, alice, bob)
-
- // At this point we restart Alice's fundingManager.
- recreateAliceFundingManager(t, alice)
-
- // Make sure both fundingManagers send the expected channel
- // announcements.
- assertChannelAnnouncements(t, alice, bob, capacity, nil, nil)
-
- // Check that the state machine is updated accordingly
- assertAddedToRouterGraph(t, alice, bob, fundingOutPoint)
-
- // Notify that six confirmations has been reached on funding transaction.
- alice.mockNotifier.sixConfChannel <- &chainntnfs.TxConfirmation{
- Tx: fundingTx,
- }
- bob.mockNotifier.sixConfChannel <- &chainntnfs.TxConfirmation{
- Tx: fundingTx,
- }
-
- // Make sure both fundingManagers send the expected channel announcements.
- assertAnnouncementSignatures(t, alice, bob)
-
- // The internal state-machine should now have deleted the channelStates
- // from the database, as the channel is announced.
- assertNoChannelState(t, alice, bob, fundingOutPoint)
-}
-
-// TestFundingManagerPrivateChannel tests that if we open a private channel
-// (a channel not supposed to be announced to the rest of the network),
-// the announcementSignatures nor the nodeAnnouncement messages are sent.
-func TestFundingManagerPrivateChannel(t *testing.T) {
- t.Parallel()
-
- alice, bob := setupFundingManagers(t)
- defer tearDownFundingManagers(t, alice, bob)
-
- // We will consume the channel updates as we go, so no buffering is needed.
- updateChan := make(chan *lnrpc.OpenStatusUpdate)
-
- // Run through the process of opening the channel, up until the funding
- // transaction is broadcasted.
- localAmt := btcutil.Amount(500000)
- pushAmt := btcutil.Amount(0)
- capacity := localAmt + pushAmt
- fundingOutPoint, fundingTx := openChannel(
- t, alice, bob, localAmt, pushAmt, 1, updateChan, false,
- )
-
- // Notify that transaction was mined
- alice.mockNotifier.oneConfChannel <- &chainntnfs.TxConfirmation{
- Tx: fundingTx,
- }
- bob.mockNotifier.oneConfChannel <- &chainntnfs.TxConfirmation{
- Tx: fundingTx,
- }
-
- // The funding transaction was mined, so assert that both funding
- // managers now have the state of this channel 'markedOpen' in their
- // internal state machine.
- assertMarkedOpen(t, alice, bob, fundingOutPoint)
-
- // After the funding transaction is mined, Alice will send
- // fundingLocked to Bob.
- fundingLockedAlice := assertFundingMsgSent(
- t, alice.msgChan, "FundingLocked",
- ).(*lnwire.FundingLocked)
-
- // And similarly Bob will send funding locked to Alice.
- fundingLockedBob := assertFundingMsgSent(
- t, bob.msgChan, "FundingLocked",
- ).(*lnwire.FundingLocked)
-
- // Check that the state machine is updated accordingly
- assertFundingLockedSent(t, alice, bob, fundingOutPoint)
-
- // Make sure both fundingManagers send the expected channel
- // announcements.
- assertChannelAnnouncements(t, alice, bob, capacity, nil, nil)
-
- // The funding transaction is now confirmed, wait for the
- // OpenStatusUpdate_ChanOpen update
- waitForOpenUpdate(t, updateChan)
-
- // Exchange the fundingLocked messages.
- alice.fundingMgr.ProcessFundingMsg(fundingLockedBob, bob)
- bob.fundingMgr.ProcessFundingMsg(fundingLockedAlice, alice)
-
- // Check that they notify the breach arbiter and peer about the new
- // channel.
- assertHandleFundingLocked(t, alice, bob)
-
- // Notify that six confirmations has been reached on funding transaction.
- alice.mockNotifier.sixConfChannel <- &chainntnfs.TxConfirmation{
- Tx: fundingTx,
- }
- bob.mockNotifier.sixConfChannel <- &chainntnfs.TxConfirmation{
- Tx: fundingTx,
- }
-
- // Since this is a private channel, we shouldn't receive the
- // announcement signatures.
- select {
- case ann := <-alice.announceChan:
- t.Fatalf("unexpectedly got channel announcement message: %v", ann)
- case <-time.After(300 * time.Millisecond):
- // Expected
- }
-
- select {
- case ann := <-bob.announceChan:
- t.Fatalf("unexpectedly got channel announcement message: %v", ann)
- case <-time.After(300 * time.Millisecond):
- // Expected
- }
-
- // We should however receive each side's node announcement.
- select {
- case msg := <-alice.msgChan:
- if _, ok := msg.(*lnwire.NodeAnnouncement); !ok {
- t.Fatalf("expected to receive node announcement")
- }
- case <-time.After(time.Second):
- t.Fatalf("expected to receive node announcement")
- }
-
- select {
- case msg := <-bob.msgChan:
- if _, ok := msg.(*lnwire.NodeAnnouncement); !ok {
- t.Fatalf("expected to receive node announcement")
- }
- case <-time.After(time.Second):
- t.Fatalf("expected to receive node announcement")
- }
-
- // The internal state-machine should now have deleted the channelStates
- // from the database, as the channel is announced.
- assertNoChannelState(t, alice, bob, fundingOutPoint)
-}
-
-// TestFundingManagerPrivateRestart tests that the privacy guarantees granted
-// by the private channel persist even on restart. This means that the
-// announcement signatures nor the node announcement messages are sent upon
-// restart.
-func TestFundingManagerPrivateRestart(t *testing.T) {
- t.Parallel()
-
- alice, bob := setupFundingManagers(t)
- defer tearDownFundingManagers(t, alice, bob)
-
- // We will consume the channel updates as we go, so no buffering is needed.
- updateChan := make(chan *lnrpc.OpenStatusUpdate)
-
- // Run through the process of opening the channel, up until the funding
- // transaction is broadcasted.
- localAmt := btcutil.Amount(500000)
- pushAmt := btcutil.Amount(0)
- capacity := localAmt + pushAmt
- fundingOutPoint, fundingTx := openChannel(
- t, alice, bob, localAmt, pushAmt, 1, updateChan, false,
- )
-
- // Notify that transaction was mined
- alice.mockNotifier.oneConfChannel <- &chainntnfs.TxConfirmation{
- Tx: fundingTx,
- }
- bob.mockNotifier.oneConfChannel <- &chainntnfs.TxConfirmation{
- Tx: fundingTx,
- }
-
- // The funding transaction was mined, so assert that both funding
- // managers now have the state of this channel 'markedOpen' in their
- // internal state machine.
- assertMarkedOpen(t, alice, bob, fundingOutPoint)
-
- // After the funding transaction is mined, Alice will send
- // fundingLocked to Bob.
- fundingLockedAlice := assertFundingMsgSent(
- t, alice.msgChan, "FundingLocked",
- ).(*lnwire.FundingLocked)
-
- // And similarly Bob will send funding locked to Alice.
- fundingLockedBob := assertFundingMsgSent(
- t, bob.msgChan, "FundingLocked",
- ).(*lnwire.FundingLocked)
-
- // Check that the state machine is updated accordingly
- assertFundingLockedSent(t, alice, bob, fundingOutPoint)
-
- // Make sure both fundingManagers send the expected channel
- // announcements.
- assertChannelAnnouncements(t, alice, bob, capacity, nil, nil)
-
- // Note: We don't check for the addedToRouterGraph state because in
- // the private channel mode, the state is quickly changed from
- // addedToRouterGraph to deleted from the database since the public
- // announcement phase is skipped.
-
- // The funding transaction is now confirmed, wait for the
- // OpenStatusUpdate_ChanOpen update
- waitForOpenUpdate(t, updateChan)
-
- // Exchange the fundingLocked messages.
- alice.fundingMgr.ProcessFundingMsg(fundingLockedBob, bob)
- bob.fundingMgr.ProcessFundingMsg(fundingLockedAlice, alice)
-
- // Check that they notify the breach arbiter and peer about the new
- // channel.
- assertHandleFundingLocked(t, alice, bob)
-
- // Notify that six confirmations has been reached on funding transaction.
- alice.mockNotifier.sixConfChannel <- &chainntnfs.TxConfirmation{
- Tx: fundingTx,
- }
- bob.mockNotifier.sixConfChannel <- &chainntnfs.TxConfirmation{
- Tx: fundingTx,
- }
-
- // Since this is a private channel, we shouldn't receive the public
- // channel announcement messages.
- select {
- case ann := <-alice.announceChan:
- t.Fatalf("unexpectedly got channel announcement message: %v", ann)
- case <-time.After(300 * time.Millisecond):
- }
-
- select {
- case ann := <-bob.announceChan:
- t.Fatalf("unexpectedly got channel announcement message: %v", ann)
- case <-time.After(300 * time.Millisecond):
- }
-
- // We should however receive each side's node announcement.
- select {
- case msg := <-alice.msgChan:
- if _, ok := msg.(*lnwire.NodeAnnouncement); !ok {
- t.Fatalf("expected to receive node announcement")
- }
- case <-time.After(time.Second):
- t.Fatalf("expected to receive node announcement")
- }
-
- select {
- case msg := <-bob.msgChan:
- if _, ok := msg.(*lnwire.NodeAnnouncement); !ok {
- t.Fatalf("expected to receive node announcement")
- }
- case <-time.After(time.Second):
- t.Fatalf("expected to receive node announcement")
- }
-
- // Restart Alice's fundingManager so we can prove that the public
- // channel announcements are not sent upon restart and that the private
- // setting persists upon restart.
- recreateAliceFundingManager(t, alice)
-
- select {
- case ann := <-alice.announceChan:
- t.Fatalf("unexpectedly got channel announcement message: %v", ann)
- case <-time.After(300 * time.Millisecond):
- // Expected
- }
-
- select {
- case ann := <-bob.announceChan:
- t.Fatalf("unexpectedly got channel announcement message: %v", ann)
- case <-time.After(300 * time.Millisecond):
- // Expected
- }
-
- // The internal state-machine should now have deleted the channelStates
- // from the database, as the channel is announced.
- assertNoChannelState(t, alice, bob, fundingOutPoint)
-}
-
-// TestFundingManagerCustomChannelParameters checks that custom requirements we
-// specify during the channel funding flow is preserved correcly on both sides.
-func TestFundingManagerCustomChannelParameters(t *testing.T) {
- t.Parallel()
-
- alice, bob := setupFundingManagers(t)
- defer tearDownFundingManagers(t, alice, bob)
-
- // This is the custom parameters we'll use.
- const csvDelay = 67
- const minHtlcIn = 1234
- const maxValueInFlight = 50000
- const fundingAmt = 5000000
-
- // We will consume the channel updates as we go, so no buffering is
- // needed.
- updateChan := make(chan *lnrpc.OpenStatusUpdate)
-
- localAmt := btcutil.Amount(5000000)
- pushAmt := btcutil.Amount(0)
- capacity := localAmt + pushAmt
-
- // Create a funding request with the custom parameters and start the
- // workflow.
- errChan := make(chan er.R, 1)
- initReq := &openChanReq{
- targetPubkey: bob.privKey.PubKey(),
- chainHash: *fundingNetParams.GenesisHash,
- localFundingAmt: localAmt,
- pushAmt: lnwire.NewMSatFromSatoshis(pushAmt),
- private: false,
- maxValueInFlight: maxValueInFlight,
- minHtlcIn: minHtlcIn,
- remoteCsvDelay: csvDelay,
- updates: updateChan,
- err: errChan,
- }
-
- alice.fundingMgr.initFundingWorkflow(bob, initReq)
-
- // Alice should have sent the OpenChannel message to Bob.
- var aliceMsg lnwire.Message
- select {
- case aliceMsg = <-alice.msgChan:
- case err := <-initReq.err:
- t.Fatalf("error init funding workflow: %v", err)
- case <-time.After(time.Second * 5):
- t.Fatalf("alice did not send OpenChannel message")
- }
-
- openChannelReq, ok := aliceMsg.(*lnwire.OpenChannel)
- if !ok {
- errorMsg, gotError := aliceMsg.(*lnwire.Error)
- if gotError {
- t.Fatalf("expected OpenChannel to be sent "+
- "from bob, instead got error: %v",
- errorMsg.Error())
- }
- t.Fatalf("expected OpenChannel to be sent from "+
- "alice, instead got %T", aliceMsg)
- }
-
- // Check that the custom CSV delay is sent as part of OpenChannel.
- if openChannelReq.CsvDelay != csvDelay {
- t.Fatalf("expected OpenChannel to have CSV delay %v, got %v",
- csvDelay, openChannelReq.CsvDelay)
- }
-
- // Check that the custom minHTLC value is sent.
- if openChannelReq.HtlcMinimum != minHtlcIn {
- t.Fatalf("expected OpenChannel to have minHtlc %v, got %v",
- minHtlcIn, openChannelReq.HtlcMinimum)
- }
-
- // Check that the max value in flight is sent as part of OpenChannel.
- if openChannelReq.MaxValueInFlight != maxValueInFlight {
- t.Fatalf("expected OpenChannel to have MaxValueInFlight %v, got %v",
- maxValueInFlight, openChannelReq.MaxValueInFlight)
- }
-
- chanID := openChannelReq.PendingChannelID
-
- // Let Bob handle the init message.
- bob.fundingMgr.ProcessFundingMsg(openChannelReq, alice)
-
- // Bob should answer with an AcceptChannel message.
- acceptChannelResponse := assertFundingMsgSent(
- t, bob.msgChan, "AcceptChannel",
- ).(*lnwire.AcceptChannel)
-
- // Bob should require the default delay of 4.
- if acceptChannelResponse.CsvDelay != 4 {
- t.Fatalf("expected AcceptChannel to have CSV delay %v, got %v",
- 4, acceptChannelResponse.CsvDelay)
- }
-
- // And the default MinHTLC value of 5.
- if acceptChannelResponse.HtlcMinimum != 5 {
- t.Fatalf("expected AcceptChannel to have minHtlc %v, got %v",
- 5, acceptChannelResponse.HtlcMinimum)
- }
-
- reserve := lnwire.NewMSatFromSatoshis(fundingAmt / 100)
- maxValueAcceptChannel := lnwire.NewMSatFromSatoshis(fundingAmt) - reserve
-
- if acceptChannelResponse.MaxValueInFlight != maxValueAcceptChannel {
- t.Fatalf("expected AcceptChannel to have MaxValueInFlight %v, got %v",
- maxValueAcceptChannel, acceptChannelResponse.MaxValueInFlight)
- }
-
- // Forward the response to Alice.
- alice.fundingMgr.ProcessFundingMsg(acceptChannelResponse, bob)
-
- // Alice responds with a FundingCreated message.
- fundingCreated := assertFundingMsgSent(
- t, alice.msgChan, "FundingCreated",
- ).(*lnwire.FundingCreated)
-
- // Helper method for checking the CSV delay stored for a reservation.
- assertDelay := func(resCtx *reservationWithCtx,
- ourDelay, theirDelay uint16) er.R {
-
- ourCsvDelay := resCtx.reservation.OurContribution().CsvDelay
- if ourCsvDelay != ourDelay {
- return er.Errorf("expected our CSV delay to be %v, "+
- "was %v", ourDelay, ourCsvDelay)
- }
-
- theirCsvDelay := resCtx.reservation.TheirContribution().CsvDelay
- if theirCsvDelay != theirDelay {
- return er.Errorf("expected their CSV delay to be %v, "+
- "was %v", theirDelay, theirCsvDelay)
- }
- return nil
- }
-
- // Helper method for checking the MinHtlc value stored for a
- // reservation.
- assertMinHtlc := func(resCtx *reservationWithCtx,
- expOurMinHtlc, expTheirMinHtlc lnwire.MilliSatoshi) er.R {
-
- ourMinHtlc := resCtx.reservation.OurContribution().MinHTLC
- if ourMinHtlc != expOurMinHtlc {
- return er.Errorf("expected our minHtlc to be %v, "+
- "was %v", expOurMinHtlc, ourMinHtlc)
- }
-
- theirMinHtlc := resCtx.reservation.TheirContribution().MinHTLC
- if theirMinHtlc != expTheirMinHtlc {
- return er.Errorf("expected their minHtlc to be %v, "+
- "was %v", expTheirMinHtlc, theirMinHtlc)
- }
- return nil
- }
-
- // Helper method for checking the MaxValueInFlight stored for a
- // reservation.
- assertMaxHtlc := func(resCtx *reservationWithCtx,
- expOurMaxValue, expTheirMaxValue lnwire.MilliSatoshi) er.R {
-
- ourMaxValue :=
- resCtx.reservation.OurContribution().MaxPendingAmount
- if ourMaxValue != expOurMaxValue {
- return er.Errorf("expected our maxValue to be %v, "+
- "was %v", expOurMaxValue, ourMaxValue)
- }
-
- theirMaxValue :=
- resCtx.reservation.TheirContribution().MaxPendingAmount
- if theirMaxValue != expTheirMaxValue {
- return er.Errorf("expected their MaxPendingAmount to be %v, "+
- "was %v", expTheirMaxValue, theirMaxValue)
- }
- return nil
- }
-
- // Check that the custom channel parameters were properly set in the
- // channel reservation.
- resCtx, err := alice.fundingMgr.getReservationCtx(bobPubKey, chanID)
- if err != nil {
- t.Fatalf("unable to find ctx: %v", err)
- }
-
- // Alice's CSV delay should be 4 since Bob sent the default value, and
- // Bob's should be 67 since Alice sent the custom value.
- if err := assertDelay(resCtx, 4, csvDelay); err != nil {
- t.Fatal(err)
- }
-
- // The minimum HTLC value Alice can offer should be 5, and the minimum
- // Bob can offer should be 1234.
- if err := assertMinHtlc(resCtx, 5, minHtlcIn); err != nil {
- t.Fatal(err)
- }
-
- // The max value in flight Alice can have should be maxValueAcceptChannel,
- // which is the default value and the maxium Bob can offer should be
- // maxValueInFlight.
- if err := assertMaxHtlc(resCtx,
- maxValueAcceptChannel, maxValueInFlight); err != nil {
- t.Fatal(err)
- }
-
- // Also make sure the parameters are properly set on Bob's end.
- resCtx, err = bob.fundingMgr.getReservationCtx(alicePubKey, chanID)
- if err != nil {
- t.Fatalf("unable to find ctx: %v", err)
- }
-
- if err := assertDelay(resCtx, csvDelay, 4); err != nil {
- t.Fatal(err)
- }
-
- if err := assertMinHtlc(resCtx, minHtlcIn, 5); err != nil {
- t.Fatal(err)
- }
-
- if err := assertMaxHtlc(resCtx,
- maxValueInFlight, maxValueAcceptChannel); err != nil {
- t.Fatal(err)
- }
- // Give the message to Bob.
- bob.fundingMgr.ProcessFundingMsg(fundingCreated, alice)
-
- // Finally, Bob should send the FundingSigned message.
- fundingSigned := assertFundingMsgSent(
- t, bob.msgChan, "FundingSigned",
- ).(*lnwire.FundingSigned)
-
- // Forward the signature to Alice.
- alice.fundingMgr.ProcessFundingMsg(fundingSigned, bob)
-
- // After Alice processes the singleFundingSignComplete message, she will
- // broadcast the funding transaction to the network. We expect to get a
- // channel update saying the channel is pending.
- var pendingUpdate *lnrpc.OpenStatusUpdate
- select {
- case pendingUpdate = <-updateChan:
- case <-time.After(time.Second * 5):
- t.Fatalf("alice did not send OpenStatusUpdate_ChanPending")
- }
-
- _, ok = pendingUpdate.Update.(*lnrpc.OpenStatusUpdate_ChanPending)
- if !ok {
- t.Fatal("OpenStatusUpdate was not OpenStatusUpdate_ChanPending")
- }
-
- // Wait for Alice to published the funding tx to the network.
- var fundingTx *wire.MsgTx
- select {
- case fundingTx = <-alice.publTxChan:
- case <-time.After(time.Second * 5):
- t.Fatalf("alice did not publish funding tx")
- }
-
- // Notify that transaction was mined.
- alice.mockNotifier.oneConfChannel <- &chainntnfs.TxConfirmation{
- Tx: fundingTx,
- }
- bob.mockNotifier.oneConfChannel <- &chainntnfs.TxConfirmation{
- Tx: fundingTx,
- }
-
- // After the funding transaction is mined, Alice will send
- // fundingLocked to Bob.
- _ = assertFundingMsgSent(
- t, alice.msgChan, "FundingLocked",
- ).(*lnwire.FundingLocked)
-
- // And similarly Bob will send funding locked to Alice.
- _ = assertFundingMsgSent(
- t, bob.msgChan, "FundingLocked",
- ).(*lnwire.FundingLocked)
-
- // Make sure both fundingManagers send the expected channel
- // announcements.
- // Alice should advertise the default MinHTLC value of
- // 5, while bob should advertise the value minHtlc, since Alice
- // required him to use it.
- minHtlcArr := []lnwire.MilliSatoshi{5, minHtlcIn}
-
- // For maxHltc Alice should advertise the default MaxHtlc value of
- // maxValueAcceptChannel, while bob should advertise the value
- // maxValueInFlight since Alice required him to use it.
- maxHtlcArr := []lnwire.MilliSatoshi{maxValueAcceptChannel, maxValueInFlight}
-
- assertChannelAnnouncements(t, alice, bob, capacity, minHtlcArr, maxHtlcArr)
-
- // The funding transaction is now confirmed, wait for the
- // OpenStatusUpdate_ChanOpen update
- waitForOpenUpdate(t, updateChan)
-}
-
-// TestFundingManagerMaxPendingChannels checks that trying to open another
-// channel with the same peer when MaxPending channels are pending fails.
-func TestFundingManagerMaxPendingChannels(t *testing.T) {
- t.Parallel()
-
- alice, bob := setupFundingManagers(
- t, func(cfg *fundingConfig) {
- cfg.MaxPendingChannels = maxPending
- },
- )
- defer tearDownFundingManagers(t, alice, bob)
-
- // Create openChanReqs for maxPending+1 channels.
- var initReqs []*openChanReq
- for i := 0; i < maxPending+1; i++ {
- updateChan := make(chan *lnrpc.OpenStatusUpdate)
- errChan := make(chan er.R, 1)
- initReq := &openChanReq{
- targetPubkey: bob.privKey.PubKey(),
- chainHash: *fundingNetParams.GenesisHash,
- localFundingAmt: 5000000,
- pushAmt: lnwire.NewMSatFromSatoshis(0),
- private: false,
- updates: updateChan,
- err: errChan,
- }
- initReqs = append(initReqs, initReq)
- }
-
- // Kick of maxPending+1 funding workflows.
- var accepts []*lnwire.AcceptChannel
- var lastOpen *lnwire.OpenChannel
- for i, initReq := range initReqs {
- alice.fundingMgr.initFundingWorkflow(bob, initReq)
-
- // Alice should have sent the OpenChannel message to Bob.
- var aliceMsg lnwire.Message
- select {
- case aliceMsg = <-alice.msgChan:
- case err := <-initReq.err:
- t.Fatalf("error init funding workflow: %v", err)
- case <-time.After(time.Second * 5):
- t.Fatalf("alice did not send OpenChannel message")
- }
-
- openChannelReq, ok := aliceMsg.(*lnwire.OpenChannel)
- if !ok {
- errorMsg, gotError := aliceMsg.(*lnwire.Error)
- if gotError {
- t.Fatalf("expected OpenChannel to be sent "+
- "from bob, instead got error: %v",
- errorMsg.Error())
- }
- t.Fatalf("expected OpenChannel to be sent from "+
- "alice, instead got %T", aliceMsg)
- }
-
- // Let Bob handle the init message.
- bob.fundingMgr.ProcessFundingMsg(openChannelReq, alice)
-
- // Bob should answer with an AcceptChannel message for the
- // first maxPending channels.
- if i < maxPending {
- acceptChannelResponse := assertFundingMsgSent(
- t, bob.msgChan, "AcceptChannel",
- ).(*lnwire.AcceptChannel)
- accepts = append(accepts, acceptChannelResponse)
- continue
- }
-
- // For the last channel, Bob should answer with an error.
- lastOpen = openChannelReq
- _ = assertFundingMsgSent(
- t, bob.msgChan, "Error",
- ).(*lnwire.Error)
-
- }
-
- // Forward the responses to Alice.
- var signs []*lnwire.FundingSigned
- for _, accept := range accepts {
- alice.fundingMgr.ProcessFundingMsg(accept, bob)
-
- // Alice responds with a FundingCreated message.
- fundingCreated := assertFundingMsgSent(
- t, alice.msgChan, "FundingCreated",
- ).(*lnwire.FundingCreated)
-
- // Give the message to Bob.
- bob.fundingMgr.ProcessFundingMsg(fundingCreated, alice)
-
- // Finally, Bob should send the FundingSigned message.
- fundingSigned := assertFundingMsgSent(
- t, bob.msgChan, "FundingSigned",
- ).(*lnwire.FundingSigned)
-
- signs = append(signs, fundingSigned)
- }
-
- // Sending another init request from Alice should still make Bob
- // respond with an error.
- bob.fundingMgr.ProcessFundingMsg(lastOpen, alice)
- _ = assertFundingMsgSent(
- t, bob.msgChan, "Error",
- ).(*lnwire.Error)
-
- // Give the FundingSigned messages to Alice.
- var txs []*wire.MsgTx
- for i, sign := range signs {
- alice.fundingMgr.ProcessFundingMsg(sign, bob)
-
- // Alice should send a status update for each channel, and
- // publish a funding tx to the network.
- var pendingUpdate *lnrpc.OpenStatusUpdate
- select {
- case pendingUpdate = <-initReqs[i].updates:
- case <-time.After(time.Second * 5):
- t.Fatalf("alice did not send OpenStatusUpdate_ChanPending")
- }
-
- _, ok := pendingUpdate.Update.(*lnrpc.OpenStatusUpdate_ChanPending)
- if !ok {
- t.Fatal("OpenStatusUpdate was not OpenStatusUpdate_ChanPending")
- }
-
- select {
- case tx := <-alice.publTxChan:
- txs = append(txs, tx)
- case <-time.After(time.Second * 5):
- t.Fatalf("alice did not publish funding tx")
- }
-
- }
-
- // Sending another init request from Alice should still make Bob
- // respond with an error, since the funding transactions are not
- // confirmed yet,
- bob.fundingMgr.ProcessFundingMsg(lastOpen, alice)
- _ = assertFundingMsgSent(
- t, bob.msgChan, "Error",
- ).(*lnwire.Error)
-
- // Notify that the transactions were mined.
- for i := 0; i < maxPending; i++ {
- alice.mockNotifier.oneConfChannel <- &chainntnfs.TxConfirmation{
- Tx: txs[i],
- }
- bob.mockNotifier.oneConfChannel <- &chainntnfs.TxConfirmation{
- Tx: txs[i],
- }
-
- // Expect both to be sending FundingLocked.
- _ = assertFundingMsgSent(
- t, alice.msgChan, "FundingLocked",
- ).(*lnwire.FundingLocked)
-
- _ = assertFundingMsgSent(
- t, bob.msgChan, "FundingLocked",
- ).(*lnwire.FundingLocked)
-
- }
-
- // Now opening another channel should work.
- bob.fundingMgr.ProcessFundingMsg(lastOpen, alice)
-
- // Bob should answer with an AcceptChannel message.
- _ = assertFundingMsgSent(
- t, bob.msgChan, "AcceptChannel",
- ).(*lnwire.AcceptChannel)
-}
-
-// TestFundingManagerRejectPush checks behaviour of 'rejectpush'
-// option, namely that non-zero incoming push amounts are disabled.
-func TestFundingManagerRejectPush(t *testing.T) {
- t.Parallel()
-
- // Enable 'rejectpush' option and initialize funding managers.
- alice, bob := setupFundingManagers(
- t, func(cfg *fundingConfig) {
- cfg.RejectPush = true
- },
- )
- defer tearDownFundingManagers(t, alice, bob)
-
- // Create a funding request and start the workflow.
- updateChan := make(chan *lnrpc.OpenStatusUpdate)
- errChan := make(chan er.R, 1)
- initReq := &openChanReq{
- targetPubkey: bob.privKey.PubKey(),
- chainHash: *fundingNetParams.GenesisHash,
- localFundingAmt: 500000,
- pushAmt: lnwire.NewMSatFromSatoshis(10),
- private: true,
- updates: updateChan,
- err: errChan,
- }
-
- alice.fundingMgr.initFundingWorkflow(bob, initReq)
-
- // Alice should have sent the OpenChannel message to Bob.
- var aliceMsg lnwire.Message
- select {
- case aliceMsg = <-alice.msgChan:
- case err := <-initReq.err:
- t.Fatalf("error init funding workflow: %v", err)
- case <-time.After(time.Second * 5):
- t.Fatalf("alice did not send OpenChannel message")
- }
-
- openChannelReq, ok := aliceMsg.(*lnwire.OpenChannel)
- if !ok {
- errorMsg, gotError := aliceMsg.(*lnwire.Error)
- if gotError {
- t.Fatalf("expected OpenChannel to be sent "+
- "from bob, instead got error: %v",
- errorMsg.Error())
- }
- t.Fatalf("expected OpenChannel to be sent from "+
- "alice, instead got %T", aliceMsg)
- }
-
- // Let Bob handle the init message.
- bob.fundingMgr.ProcessFundingMsg(openChannelReq, alice)
-
- // Assert Bob responded with an ErrNonZeroPushAmount error.
- err := assertFundingMsgSent(t, bob.msgChan, "Error").(*lnwire.Error)
- if !strings.Contains(err.Error(), "non-zero push amounts are disabled") {
- t.Fatalf("expected ErrNonZeroPushAmount error, got \"%v\"",
- err.Error())
- }
-}
-
-// TestFundingManagerMaxConfs ensures that we don't accept a funding proposal
-// that proposes a MinAcceptDepth greater than the maximum number of
-// confirmations we're willing to accept.
-func TestFundingManagerMaxConfs(t *testing.T) {
- t.Parallel()
-
- alice, bob := setupFundingManagers(t)
- defer tearDownFundingManagers(t, alice, bob)
-
- // Create a funding request and start the workflow.
- updateChan := make(chan *lnrpc.OpenStatusUpdate)
- errChan := make(chan er.R, 1)
- initReq := &openChanReq{
- targetPubkey: bob.privKey.PubKey(),
- chainHash: *fundingNetParams.GenesisHash,
- localFundingAmt: 500000,
- pushAmt: lnwire.NewMSatFromSatoshis(10),
- private: false,
- updates: updateChan,
- err: errChan,
- }
-
- alice.fundingMgr.initFundingWorkflow(bob, initReq)
-
- // Alice should have sent the OpenChannel message to Bob.
- var aliceMsg lnwire.Message
- select {
- case aliceMsg = <-alice.msgChan:
- case err := <-initReq.err:
- t.Fatalf("error init funding workflow: %v", err)
- case <-time.After(time.Second * 5):
- t.Fatalf("alice did not send OpenChannel message")
- }
-
- openChannelReq, ok := aliceMsg.(*lnwire.OpenChannel)
- if !ok {
- errorMsg, gotError := aliceMsg.(*lnwire.Error)
- if gotError {
- t.Fatalf("expected OpenChannel to be sent "+
- "from bob, instead got error: %v",
- errorMsg.Error())
- }
- t.Fatalf("expected OpenChannel to be sent from "+
- "alice, instead got %T", aliceMsg)
- }
-
- // Let Bob handle the init message.
- bob.fundingMgr.ProcessFundingMsg(openChannelReq, alice)
-
- // Bob should answer with an AcceptChannel message.
- acceptChannelResponse := assertFundingMsgSent(
- t, bob.msgChan, "AcceptChannel",
- ).(*lnwire.AcceptChannel)
-
- // Modify the AcceptChannel message Bob is proposing to including a
- // MinAcceptDepth Alice won't be willing to accept.
- acceptChannelResponse.MinAcceptDepth = chainntnfs.MaxNumConfs + 1
-
- alice.fundingMgr.ProcessFundingMsg(acceptChannelResponse, bob)
-
- // Alice should respond back with an error indicating MinAcceptDepth is
- // too large.
- err := assertFundingMsgSent(t, alice.msgChan, "Error").(*lnwire.Error)
- if !strings.Contains(err.Error(), "minimum depth") {
- t.Fatalf("expected ErrNumConfsTooLarge, got \"%v\"",
- err.Error())
- }
-}
-
-// TestFundingManagerFundAll tests that we can initiate a funding request to
-// use the funds remaining in the wallet. This should produce a funding tx with
-// no change output.
-func TestFundingManagerFundAll(t *testing.T) {
- t.Parallel()
-
- // We set up our mock wallet to control a list of UTXOs that sum to
- // less than the max channel size.
- allCoins := []*lnwallet.Utxo{
- {
- AddressType: lnwallet.WitnessPubKey,
- Value: btcutil.Amount(
- 0.05 * btcutil.UnitsPerCoinF(),
- ),
- PkScript: mock.CoinPkScript,
- OutPoint: wire.OutPoint{
- Hash: chainhash.Hash{},
- Index: 0,
- },
- },
- {
- AddressType: lnwallet.WitnessPubKey,
- Value: btcutil.Amount(
- 0.06 * btcutil.UnitsPerCoinF(),
- ),
- PkScript: mock.CoinPkScript,
- OutPoint: wire.OutPoint{
- Hash: chainhash.Hash{},
- Index: 1,
- },
- },
- }
-
- tests := []struct {
- spendAmt btcutil.Amount
- change bool
- }{
- {
- // We will spend all the funds in the wallet, and
- // expects no change output.
- spendAmt: btcutil.Amount(
- 0.11 * btcutil.UnitsPerCoinF(),
- ),
- change: false,
- },
- {
- // We spend a little less than the funds in the wallet,
- // so a change output should be created.
- spendAmt: btcutil.Amount(
- 0.10 * btcutil.UnitsPerCoinF(),
- ),
- change: true,
- },
- }
-
- for _, test := range tests {
- alice, bob := setupFundingManagers(t)
- defer tearDownFundingManagers(t, alice, bob)
-
- alice.fundingMgr.cfg.Wallet.WalletController.(*mock.WalletController).Utxos = allCoins
-
- // We will consume the channel updates as we go, so no
- // buffering is needed.
- updateChan := make(chan *lnrpc.OpenStatusUpdate)
-
- // Initiate a fund channel, and inspect the funding tx.
- pushAmt := btcutil.Amount(0)
- fundingTx := fundChannel(
- t, alice, bob, test.spendAmt, pushAmt, true, 1,
- updateChan, true,
- )
-
- // Check whether the expected change output is present.
- if test.change && len(fundingTx.TxOut) != 2 {
- t.Fatalf("expected 2 outputs, had %v",
- len(fundingTx.TxOut))
- }
-
- if !test.change && len(fundingTx.TxOut) != 1 {
- t.Fatalf("expected 1 output, had %v",
- len(fundingTx.TxOut))
- }
-
- // Inputs should be all funds in the wallet.
- if len(fundingTx.TxIn) != len(allCoins) {
- t.Fatalf("Had %d inputs, expected %d",
- len(fundingTx.TxIn), len(allCoins))
- }
-
- for i, txIn := range fundingTx.TxIn {
- if txIn.PreviousOutPoint != allCoins[i].OutPoint {
- t.Fatalf("expected outpoint to be %v, was %v",
- allCoins[i].OutPoint,
- txIn.PreviousOutPoint)
- }
- }
- }
-}
-
-// TestGetUpfrontShutdown tests different combinations of inputs for getting a
-// shutdown script. It varies whether the peer has the feature set, whether
-// the user has provided a script and our local configuration to test that
-// GetUpfrontShutdownScript returns the expected outcome.
-func TestGetUpfrontShutdownScript(t *testing.T) {
- upfrontScript := []byte("upfront script")
- generatedScript := []byte("generated script")
-
- getScript := func() (lnwire.DeliveryAddress, er.R) {
- return generatedScript, nil
- }
-
- tests := []struct {
- name string
- getScript func() (lnwire.DeliveryAddress, er.R)
- upfrontScript lnwire.DeliveryAddress
- peerEnabled bool
- localEnabled bool
- expectedScript lnwire.DeliveryAddress
- expectedErr *er.ErrorCode
- }{
- {
- name: "peer disabled, no shutdown",
- getScript: getScript,
- },
- {
- name: "peer disabled, upfront provided",
- upfrontScript: upfrontScript,
- expectedErr: errUpfrontShutdownScriptNotSupported,
- },
- {
- name: "peer enabled, upfront provided",
- upfrontScript: upfrontScript,
- peerEnabled: true,
- expectedScript: upfrontScript,
- },
- {
- name: "peer enabled, local disabled",
- peerEnabled: true,
- },
- {
- name: "local enabled, no upfront script",
- getScript: getScript,
- peerEnabled: true,
- localEnabled: true,
- expectedScript: generatedScript,
- },
- {
- name: "local enabled, upfront script",
- peerEnabled: true,
- upfrontScript: upfrontScript,
- localEnabled: true,
- expectedScript: upfrontScript,
- },
- }
-
- for _, test := range tests {
- test := test
-
- t.Run(test.name, func(t *testing.T) {
- var mockPeer testNode
-
- // If the remote peer in the test should support upfront shutdown,
- // add the feature bit.
- if test.peerEnabled {
- mockPeer.remoteFeatures = []lnwire.FeatureBit{
- lnwire.UpfrontShutdownScriptOptional,
- }
- }
-
- addr, err := getUpfrontShutdownScript(
- test.localEnabled, &mockPeer, test.upfrontScript,
- test.getScript,
- )
- if !er.Cis(test.expectedErr, err) {
- t.Fatalf("got: %v, expected error: %v", err, test.expectedErr)
- }
-
- if !bytes.Equal(addr, test.expectedScript) {
- t.Fatalf("expected address: %x, got: %x",
- test.expectedScript, addr)
- }
-
- })
- }
-}
-
-func expectOpenChannelMsg(t *testing.T, msgChan chan lnwire.Message) *lnwire.OpenChannel {
- var msg lnwire.Message
- select {
- case msg = <-msgChan:
- case <-time.After(time.Second * 5):
- t.Fatalf("node did not send OpenChannel message")
- }
-
- openChannelReq, ok := msg.(*lnwire.OpenChannel)
- if !ok {
- errorMsg, gotError := msg.(*lnwire.Error)
- if gotError {
- t.Fatalf("expected OpenChannel to be sent "+
- "from bob, instead got error: %v",
- errorMsg.Error())
- }
- t.Fatalf("expected OpenChannel to be sent, instead got %T",
- msg)
- }
-
- return openChannelReq
-}
-
-func TestMaxChannelSizeConfig(t *testing.T) {
- t.Parallel()
-
- // Create a set of funding managers that will reject wumbo
- // channels but set --maxchansize explicitly lower than soft-limit.
- // Verify that wumbo rejecting funding managers will respect --maxchansize
- // below 16777215 satoshi (MaxFundingAmount) limit.
- alice, bob := setupFundingManagers(t, func(cfg *fundingConfig) {
- cfg.NoWumboChans = true
- cfg.MaxChanSize = MaxFundingAmount - 1
- })
-
- // Attempt to create a channel above the limit
- // imposed by --maxchansize, which should be rejected.
- updateChan := make(chan *lnrpc.OpenStatusUpdate)
- errChan := make(chan er.R, 1)
- initReq := &openChanReq{
- targetPubkey: bob.privKey.PubKey(),
- chainHash: *fundingNetParams.GenesisHash,
- localFundingAmt: MaxFundingAmount,
- pushAmt: lnwire.NewMSatFromSatoshis(0),
- private: false,
- updates: updateChan,
- err: errChan,
- }
-
- // After processing the funding open message, bob should respond with
- // an error rejecting the channel that exceeds size limit.
- alice.fundingMgr.initFundingWorkflow(bob, initReq)
- openChanMsg := expectOpenChannelMsg(t, alice.msgChan)
- bob.fundingMgr.ProcessFundingMsg(openChanMsg, alice)
- assertErrorSent(t, bob.msgChan)
-
- // Create a set of funding managers that will reject wumbo
- // channels but set --maxchansize explicitly higher than soft-limit
- // A --maxchansize greater than this limit should have no effect.
- tearDownFundingManagers(t, alice, bob)
- alice, bob = setupFundingManagers(t, func(cfg *fundingConfig) {
- cfg.NoWumboChans = true
- cfg.MaxChanSize = MaxFundingAmount + 1
- })
-
- // We expect Bob to respond with an Accept channel message.
- alice.fundingMgr.initFundingWorkflow(bob, initReq)
- openChanMsg = expectOpenChannelMsg(t, alice.msgChan)
- bob.fundingMgr.ProcessFundingMsg(openChanMsg, alice)
- assertFundingMsgSent(t, bob.msgChan, "AcceptChannel")
-
- // Verify that wumbo accepting funding managers will respect --maxchansize
- // Create the funding managers, this time allowing
- // wumbo channels but setting --maxchansize explicitly.
- tearDownFundingManagers(t, alice, bob)
- alice, bob = setupFundingManagers(t, func(cfg *fundingConfig) {
- cfg.NoWumboChans = false
- cfg.MaxChanSize = btcutil.Amount(100000000)
- })
-
- // Attempt to create a channel above the limit
- // imposed by --maxchansize, which should be rejected.
- initReq.localFundingAmt = btcutil.UnitsPerCoin() + 1
-
- // After processing the funding open message, bob should respond with
- // an error rejecting the channel that exceeds size limit.
- alice.fundingMgr.initFundingWorkflow(bob, initReq)
- openChanMsg = expectOpenChannelMsg(t, alice.msgChan)
- bob.fundingMgr.ProcessFundingMsg(openChanMsg, alice)
- assertErrorSent(t, bob.msgChan)
-}
-
-// TestWumboChannelConfig tests that the funding manager will respect the wumbo
-// channel config param when creating or accepting new channels.
-func TestWumboChannelConfig(t *testing.T) {
- t.Parallel()
-
- // First we'll create a set of funding managers that will reject wumbo
- // channels.
- alice, bob := setupFundingManagers(t, func(cfg *fundingConfig) {
- cfg.NoWumboChans = true
- })
-
- // If we attempt to initiate a new funding open request to Alice,
- // that's below the wumbo channel mark, we should be able to start the
- // funding process w/o issue.
- updateChan := make(chan *lnrpc.OpenStatusUpdate)
- errChan := make(chan er.R, 1)
- initReq := &openChanReq{
- targetPubkey: bob.privKey.PubKey(),
- chainHash: *fundingNetParams.GenesisHash,
- localFundingAmt: MaxFundingAmount,
- pushAmt: lnwire.NewMSatFromSatoshis(0),
- private: false,
- updates: updateChan,
- err: errChan,
- }
-
- // We expect Bob to respond with an Accept channel message.
- alice.fundingMgr.initFundingWorkflow(bob, initReq)
- openChanMsg := expectOpenChannelMsg(t, alice.msgChan)
- bob.fundingMgr.ProcessFundingMsg(openChanMsg, alice)
- assertFundingMsgSent(t, bob.msgChan, "AcceptChannel")
-
- // We'll now attempt to create a channel above the wumbo mark, which
- // should be rejected.
- initReq.localFundingAmt = btcutil.UnitsPerCoin()
-
- // After processing the funding open message, bob should respond with
- // an error rejecting the channel.
- alice.fundingMgr.initFundingWorkflow(bob, initReq)
- openChanMsg = expectOpenChannelMsg(t, alice.msgChan)
- bob.fundingMgr.ProcessFundingMsg(openChanMsg, alice)
- assertErrorSent(t, bob.msgChan)
-
- // Next, we'll re-create the funding managers, but this time allowing
- // wumbo channels explicitly.
- tearDownFundingManagers(t, alice, bob)
- alice, bob = setupFundingManagers(t, func(cfg *fundingConfig) {
- cfg.NoWumboChans = false
- cfg.MaxChanSize = MaxBtcFundingAmountWumbo
- })
-
- // We should now be able to initiate a wumbo channel funding w/o any
- // issues.
- alice.fundingMgr.initFundingWorkflow(bob, initReq)
- openChanMsg = expectOpenChannelMsg(t, alice.msgChan)
- bob.fundingMgr.ProcessFundingMsg(openChanMsg, alice)
- assertFundingMsgSent(t, bob.msgChan, "AcceptChannel")
-}
diff --git a/lnd/fuzz/brontide/fuzz_utils.go b/lnd/fuzz/brontide/fuzz_utils.go
deleted file mode 100644
index 870a0a4c..00000000
--- a/lnd/fuzz/brontide/fuzz_utils.go
+++ /dev/null
@@ -1,144 +0,0 @@
-// +build gofuzz
-
-package brontidefuzz
-
-import (
- "encoding/hex"
- "fmt"
-
- "github.com/pkt-cash/pktd/btcec"
- "github.com/davecgh/go-spew/spew"
- "github.com/pkt-cash/pktd/lnd/brontide"
- "github.com/pkt-cash/pktd/lnd/keychain"
-)
-
-var (
- initBytes = []byte{
- 0x81, 0xb6, 0x37, 0xd8, 0xfc, 0xd2, 0xc6, 0xda,
- 0x63, 0x59, 0xe6, 0x96, 0x31, 0x13, 0xa1, 0x17,
- 0xd, 0xe7, 0x95, 0xe4, 0xb7, 0x25, 0xb8, 0x4d,
- 0x1e, 0xb, 0x4c, 0xfd, 0x9e, 0xc5, 0x8c, 0xe9,
- }
-
- respBytes = []byte{
- 0xaa, 0xb6, 0x37, 0xd9, 0xfc, 0xd2, 0xc6, 0xda,
- 0x63, 0x59, 0xe6, 0x99, 0x31, 0x13, 0xa1, 0x17,
- 0xd, 0xe7, 0x95, 0xe9, 0xb7, 0x25, 0xb8, 0x4d,
- 0x1e, 0xb, 0x4c, 0xf9, 0x9e, 0xc5, 0x8c, 0xe9,
- }
-
- // Returns the initiator's ephemeral private key.
- initEphemeral = brontide.EphemeralGenerator(func() (*btcec.PrivateKey, er.R) {
- e := "121212121212121212121212121212121212121212121212121212" +
- "1212121212"
- eBytes, err := util.DecodeHex(e)
- if err != nil {
- return nil, err
- }
-
- priv, _ := btcec.PrivKeyFromBytes(btcec.S256(), eBytes)
- return priv, nil
- })
-
- // Returns the responder's ephemeral private key.
- respEphemeral = brontide.EphemeralGenerator(func() (*btcec.PrivateKey, er.R) {
- e := "222222222222222222222222222222222222222222222222222" +
- "2222222222222"
- eBytes, err := util.DecodeHex(e)
- if err != nil {
- return nil, err
- }
-
- priv, _ := btcec.PrivKeyFromBytes(btcec.S256(), eBytes)
- return priv, nil
- })
-)
-
-// completeHandshake takes two brontide machines (initiator, responder)
-// and completes the brontide handshake between them. If any part of the
-// handshake fails, this function will panic.
-func completeHandshake(initiator, responder *brontide.Machine) {
- if err := handshake(initiator, responder); err != nil {
- nilAndPanic(initiator, responder, err)
- }
-}
-
-// handshake actually completes the brontide handshake and bubbles up
-// an error to the calling function.
-func handshake(initiator, responder *brontide.Machine) er.R {
- // Generate ActOne and send to the responder.
- actOne, err := initiator.GenActOne()
- if err != nil {
- return err
- }
-
- if err := responder.RecvActOne(actOne); err != nil {
- return err
- }
-
- // Generate ActTwo and send to initiator.
- actTwo, err := responder.GenActTwo()
- if err != nil {
- return err
- }
-
- if err := initiator.RecvActTwo(actTwo); err != nil {
- return err
- }
-
- // Generate ActThree and send to responder.
- actThree, err := initiator.GenActThree()
- if err != nil {
- return err
- }
-
- return responder.RecvActThree(actThree)
-}
-
-// nilAndPanic first nils the initiator and responder's Curve fields and then
-// panics.
-func nilAndPanic(initiator, responder *brontide.Machine, err error) {
- if initiator != nil {
- initiator.SetCurveToNil()
- }
- if responder != nil {
- responder.SetCurveToNil()
- }
- panic(er.Errorf("error: %v, initiator: %v, responder: %v", err,
- spew.Sdump(initiator), spew.Sdump(responder)))
-}
-
-// getBrontideMachines returns two brontide machines that use random keys
-// everywhere.
-func getBrontideMachines() (*brontide.Machine, *brontide.Machine) {
- initPriv, _ := btcec.NewPrivateKey(btcec.S256())
- respPriv, _ := btcec.NewPrivateKey(btcec.S256())
- respPub := (*btcec.PublicKey)(&respPriv.PublicKey)
-
- initPrivECDH := &keychain.PrivKeyECDH{PrivKey: initPriv}
- respPrivECDH := &keychain.PrivKeyECDH{PrivKey: respPriv}
-
- initiator := brontide.NewBrontideMachine(true, initPrivECDH, respPub)
- responder := brontide.NewBrontideMachine(false, respPrivECDH, nil)
-
- return initiator, responder
-}
-
-// getStaticBrontideMachines returns two brontide machines that use static keys
-// everywhere.
-func getStaticBrontideMachines() (*brontide.Machine, *brontide.Machine) {
- initPriv, _ := btcec.PrivKeyFromBytes(btcec.S256(), initBytes)
- respPriv, respPub := btcec.PrivKeyFromBytes(btcec.S256(), respBytes)
-
- initPrivECDH := &keychain.PrivKeyECDH{PrivKey: initPriv}
- respPrivECDH := &keychain.PrivKeyECDH{PrivKey: respPriv}
-
- initiator := brontide.NewBrontideMachine(
- true, initPrivECDH, respPub, initEphemeral,
- )
- responder := brontide.NewBrontideMachine(
- false, respPrivECDH, nil, respEphemeral,
- )
-
- return initiator, responder
-}
diff --git a/lnd/fuzz/brontide/random_actone.go b/lnd/fuzz/brontide/random_actone.go
deleted file mode 100644
index 9a84367d..00000000
--- a/lnd/fuzz/brontide/random_actone.go
+++ /dev/null
@@ -1,30 +0,0 @@
-// +build gofuzz
-
-package brontidefuzz
-
-import (
- "github.com/pkt-cash/pktd/lnd/brontide"
-)
-
-// Fuzz_random_actone is a go-fuzz harness for ActOne in the brontide
-// handshake.
-func Fuzz_random_actone(data []byte) int {
- // Check if data is large enough.
- if len(data) < brontide.ActOneSize {
- return 1
- }
-
- // This will return brontide machines with random keys.
- _, responder := getBrontideMachines()
-
- // Copy data into [ActOneSize]byte.
- var actOne [brontide.ActOneSize]byte
- copy(actOne[:], data)
-
- // Responder receives ActOne, should fail on the MAC check.
- if err := responder.RecvActOne(actOne); err == nil {
- nilAndPanic(nil, responder, nil)
- }
-
- return 1
-}
diff --git a/lnd/fuzz/brontide/random_actthree.go b/lnd/fuzz/brontide/random_actthree.go
deleted file mode 100644
index 11d1597b..00000000
--- a/lnd/fuzz/brontide/random_actthree.go
+++ /dev/null
@@ -1,50 +0,0 @@
-// +build gofuzz
-
-package brontidefuzz
-
-import (
- "github.com/pkt-cash/pktd/lnd/brontide"
-)
-
-// Fuzz_random_actthree is a go-fuzz harness for ActThree in the brontide
-// handshake.
-func Fuzz_random_actthree(data []byte) int {
- // Check if data is large enough.
- if len(data) < brontide.ActThreeSize {
- return 1
- }
-
- // This will return brontide machines with random keys.
- initiator, responder := getBrontideMachines()
-
- // Generate ActOne and send to the responder.
- actOne, err := initiator.GenActOne()
- if err != nil {
- nilAndPanic(initiator, responder, err)
- }
-
- // Receiving ActOne should succeed, so we panic on error.
- if err := responder.RecvActOne(actOne); err != nil {
- nilAndPanic(initiator, responder, err)
- }
-
- // Generate ActTwo - this is not sent to the initiator because nothing is
- // done with the initiator after this point and it would slow down fuzzing.
- // GenActTwo needs to be called to set the appropriate state in the
- // responder machine.
- _, err = responder.GenActTwo()
- if err != nil {
- nilAndPanic(initiator, responder, err)
- }
-
- // Copy data into [ActThreeSize]byte.
- var actThree [brontide.ActThreeSize]byte
- copy(actThree[:], data)
-
- // Responder receives ActThree, should fail on the MAC check.
- if err := responder.RecvActThree(actThree); err == nil {
- nilAndPanic(initiator, responder, nil)
- }
-
- return 1
-}
diff --git a/lnd/fuzz/brontide/random_acttwo.go b/lnd/fuzz/brontide/random_acttwo.go
deleted file mode 100644
index 664aa5f2..00000000
--- a/lnd/fuzz/brontide/random_acttwo.go
+++ /dev/null
@@ -1,39 +0,0 @@
-// +build gofuzz
-
-package brontidefuzz
-
-import (
- "github.com/pkt-cash/pktd/lnd/brontide"
-)
-
-// Fuzz_random_acttwo is a go-fuzz harness for ActTwo in the brontide
-// handshake.
-func Fuzz_random_acttwo(data []byte) int {
- // Check if data is large enough.
- if len(data) < brontide.ActTwoSize {
- return 1
- }
-
- // This will return brontide machines with random keys.
- initiator, _ := getBrontideMachines()
-
- // Generate ActOne - this isn't sent to the responder because nothing is
- // done with the responder machine and this would slow down fuzzing.
- // GenActOne needs to be called to set the appropriate state in the
- // initiator machine.
- _, err := initiator.GenActOne()
- if err != nil {
- nilAndPanic(initiator, nil, err)
- }
-
- // Copy data into [ActTwoSize]byte.
- var actTwo [brontide.ActTwoSize]byte
- copy(actTwo[:], data)
-
- // Initiator receives ActTwo, should fail.
- if err := initiator.RecvActTwo(actTwo); err == nil {
- nilAndPanic(initiator, nil, nil)
- }
-
- return 1
-}
diff --git a/lnd/fuzz/brontide/random_init_decrypt.go b/lnd/fuzz/brontide/random_init_decrypt.go
deleted file mode 100644
index 3328a2b6..00000000
--- a/lnd/fuzz/brontide/random_init_decrypt.go
+++ /dev/null
@@ -1,27 +0,0 @@
-// +build gofuzz
-
-package brontidefuzz
-
-import (
- "bytes"
-)
-
-// Fuzz_random_init_decrypt is a go-fuzz harness that decrypts arbitrary data
-// with the initiator.
-func Fuzz_random_init_decrypt(data []byte) int {
- // This will return brontide machines with random keys.
- initiator, responder := getBrontideMachines()
-
- // Complete the brontide handshake.
- completeHandshake(initiator, responder)
-
- // Create a reader with the byte array.
- r := bytes.NewReader(data)
-
- // Decrypt the encrypted message using ReadMessage w/ initiator machine.
- if _, err := initiator.ReadMessage(r); err == nil {
- nilAndPanic(initiator, responder, nil)
- }
-
- return 1
-}
diff --git a/lnd/fuzz/brontide/random_init_enc_dec.go b/lnd/fuzz/brontide/random_init_enc_dec.go
deleted file mode 100644
index 6f1a7312..00000000
--- a/lnd/fuzz/brontide/random_init_enc_dec.go
+++ /dev/null
@@ -1,48 +0,0 @@
-// +build gofuzz
-
-package brontidefuzz
-
-import (
- "bytes"
- "math"
-)
-
-// Fuzz_random_init_enc_dec is a go-fuzz harness that tests round-trip
-// encryption and decryption between the initiator and the responder.
-func Fuzz_random_init_enc_dec(data []byte) int {
- // Ensure that length of message is not greater than max allowed size.
- if len(data) > math.MaxUint16 {
- return 1
- }
-
- // This will return brontide machines with random keys.
- initiator, responder := getBrontideMachines()
-
- // Complete the brontide handshake.
- completeHandshake(initiator, responder)
-
- var b bytes.Buffer
-
- // Encrypt the message using WriteMessage w/ initiator machine.
- if err := initiator.WriteMessage(data); err != nil {
- nilAndPanic(initiator, responder, err)
- }
-
- // Flush the encrypted message w/ initiator machine.
- if _, err := initiator.Flush(&b); err != nil {
- nilAndPanic(initiator, responder, err)
- }
-
- // Decrypt the ciphertext using ReadMessage w/ responder machine.
- plaintext, err := responder.ReadMessage(&b)
- if err != nil {
- nilAndPanic(initiator, responder, err)
- }
-
- // Check that the decrypted message and the original message are equal.
- if !bytes.Equal(data, plaintext) {
- nilAndPanic(initiator, responder, nil)
- }
-
- return 1
-}
diff --git a/lnd/fuzz/brontide/random_init_encrypt.go b/lnd/fuzz/brontide/random_init_encrypt.go
deleted file mode 100644
index 76f5dacd..00000000
--- a/lnd/fuzz/brontide/random_init_encrypt.go
+++ /dev/null
@@ -1,37 +0,0 @@
-// +build gofuzz
-
-package brontidefuzz
-
-import (
- "bytes"
- "math"
-)
-
-// Fuzz_random_init_encrypt is a go-fuzz harness that encrypts arbitrary data
-// with the initiator.
-func Fuzz_random_init_encrypt(data []byte) int {
- // Ensure that length of message is not greater than max allowed size.
- if len(data) > math.MaxUint16 {
- return 1
- }
-
- // This will return brontide machines with random keys.
- initiator, responder := getBrontideMachines()
-
- // Complete the brontide handshake.
- completeHandshake(initiator, responder)
-
- var b bytes.Buffer
-
- // Encrypt the message using WriteMessage w/ initiator machine.
- if err := initiator.WriteMessage(data); err != nil {
- nilAndPanic(initiator, responder, err)
- }
-
- // Flush the encrypted message w/ initiator machine.
- if _, err := initiator.Flush(&b); err != nil {
- nilAndPanic(initiator, responder, err)
- }
-
- return 1
-}
diff --git a/lnd/fuzz/brontide/random_resp_decrypt.go b/lnd/fuzz/brontide/random_resp_decrypt.go
deleted file mode 100644
index 1ae40bd2..00000000
--- a/lnd/fuzz/brontide/random_resp_decrypt.go
+++ /dev/null
@@ -1,27 +0,0 @@
-// +build gofuzz
-
-package brontidefuzz
-
-import (
- "bytes"
-)
-
-// Fuzz_random_resp_decrypt is a go-fuzz harness that decrypts arbitrary data
-// with the responder.
-func Fuzz_random_resp_decrypt(data []byte) int {
- // This will return brontide machines with random keys.
- initiator, responder := getBrontideMachines()
-
- // Complete the brontide handshake.
- completeHandshake(initiator, responder)
-
- // Create a reader with the byte array.
- r := bytes.NewReader(data)
-
- // Decrypt the encrypted message using ReadMessage w/ responder machine.
- if _, err := responder.ReadMessage(r); err == nil {
- nilAndPanic(initiator, responder, nil)
- }
-
- return 1
-}
diff --git a/lnd/fuzz/brontide/random_resp_enc_dec.go b/lnd/fuzz/brontide/random_resp_enc_dec.go
deleted file mode 100644
index f84e7c47..00000000
--- a/lnd/fuzz/brontide/random_resp_enc_dec.go
+++ /dev/null
@@ -1,48 +0,0 @@
-// +build gofuzz
-
-package brontidefuzz
-
-import (
- "bytes"
- "math"
-)
-
-// Fuzz_random_resp_enc_dec is a go-fuzz harness that tests round-trip
-// encryption and decryption between the responder and the initiator.
-func Fuzz_random_resp_enc_dec(data []byte) int {
- // Ensure that length of message is not greater than max allowed size.
- if len(data) > math.MaxUint16 {
- return 1
- }
-
- // This will return brontide machines with random keys.
- initiator, responder := getBrontideMachines()
-
- // Complete the brontide handshake.
- completeHandshake(initiator, responder)
-
- var b bytes.Buffer
-
- // Encrypt the message using WriteMessage w/ responder machine.
- if err := responder.WriteMessage(data); err != nil {
- nilAndPanic(initiator, responder, err)
- }
-
- // Flush the encrypted message w/ responder machine.
- if _, err := responder.Flush(&b); err != nil {
- nilAndPanic(initiator, responder, err)
- }
-
- // Decrypt the ciphertext using ReadMessage w/ initiator machine.
- plaintext, err := initiator.ReadMessage(&b)
- if err != nil {
- nilAndPanic(initiator, responder, err)
- }
-
- // Check that the decrypted message and the original message are equal.
- if !bytes.Equal(data, plaintext) {
- nilAndPanic(initiator, responder, nil)
- }
-
- return 1
-}
diff --git a/lnd/fuzz/brontide/random_resp_encrypt.go b/lnd/fuzz/brontide/random_resp_encrypt.go
deleted file mode 100644
index 5ac9abad..00000000
--- a/lnd/fuzz/brontide/random_resp_encrypt.go
+++ /dev/null
@@ -1,37 +0,0 @@
-// +build gofuzz
-
-package brontidefuzz
-
-import (
- "bytes"
- "math"
-)
-
-// Fuzz_random_resp_encrypt is a go-fuzz harness that encrypts arbitrary data
-// with the responder.
-func Fuzz_random_resp_encrypt(data []byte) int {
- // Ensure that length of message is not greater than max allowed size.
- if len(data) > math.MaxUint16 {
- return 1
- }
-
- // This will return brontide machines with random keys.
- initiator, responder := getBrontideMachines()
-
- // Complete the brontide handshake.
- completeHandshake(initiator, responder)
-
- var b bytes.Buffer
-
- // Encrypt the message using WriteMessage w/ responder machine.
- if err := responder.WriteMessage(data); err != nil {
- nilAndPanic(initiator, responder, err)
- }
-
- // Flush the encrypted message w/ responder machine.
- if _, err := responder.Flush(&b); err != nil {
- nilAndPanic(initiator, responder, err)
- }
-
- return 1
-}
diff --git a/lnd/fuzz/brontide/static_actone.go b/lnd/fuzz/brontide/static_actone.go
deleted file mode 100644
index 89eb2434..00000000
--- a/lnd/fuzz/brontide/static_actone.go
+++ /dev/null
@@ -1,30 +0,0 @@
-// +build gofuzz
-
-package brontidefuzz
-
-import (
- "github.com/pkt-cash/pktd/lnd/brontide"
-)
-
-// Fuzz_static_actone is a go-fuzz harness for ActOne in the brontide
-// handshake.
-func Fuzz_static_actone(data []byte) int {
- // Check if data is large enough.
- if len(data) < brontide.ActOneSize {
- return 1
- }
-
- // This will return brontide machines with static keys.
- _, responder := getStaticBrontideMachines()
-
- // Copy data into [ActOneSize]byte.
- var actOne [brontide.ActOneSize]byte
- copy(actOne[:], data)
-
- // Responder receives ActOne, should fail.
- if err := responder.RecvActOne(actOne); err == nil {
- nilAndPanic(nil, responder, nil)
- }
-
- return 1
-}
diff --git a/lnd/fuzz/brontide/static_actthree.go b/lnd/fuzz/brontide/static_actthree.go
deleted file mode 100644
index e4e34de1..00000000
--- a/lnd/fuzz/brontide/static_actthree.go
+++ /dev/null
@@ -1,50 +0,0 @@
-// +build gofuzz
-
-package brontidefuzz
-
-import (
- "github.com/pkt-cash/pktd/lnd/brontide"
-)
-
-// Fuzz_static_actthree is a go-fuzz harness for ActThree in the brontide
-// handshake.
-func Fuzz_static_actthree(data []byte) int {
- // Check if data is large enough.
- if len(data) < brontide.ActThreeSize {
- return 1
- }
-
- // This will return brontide machines with static keys.
- initiator, responder := getStaticBrontideMachines()
-
- // Generate ActOne and send to the responder.
- actOne, err := initiator.GenActOne()
- if err != nil {
- nilAndPanic(initiator, responder, err)
- }
-
- // Receiving ActOne should succeed, so we panic on error.
- if err := responder.RecvActOne(actOne); err != nil {
- nilAndPanic(initiator, responder, err)
- }
-
- // Generate ActTwo - this is not sent to the initiator because nothing is
- // done with the initiator after this point and it would slow down fuzzing.
- // GenActTwo needs to be called to set the appropriate state in the responder
- // machine.
- _, err = responder.GenActTwo()
- if err != nil {
- nilAndPanic(initiator, responder, err)
- }
-
- // Copy data into [ActThreeSize]byte.
- var actThree [brontide.ActThreeSize]byte
- copy(actThree[:], data)
-
- // Responder receives ActThree, should fail.
- if err := responder.RecvActThree(actThree); err == nil {
- nilAndPanic(initiator, responder, nil)
- }
-
- return 1
-}
diff --git a/lnd/fuzz/brontide/static_acttwo.go b/lnd/fuzz/brontide/static_acttwo.go
deleted file mode 100644
index 51da4fc2..00000000
--- a/lnd/fuzz/brontide/static_acttwo.go
+++ /dev/null
@@ -1,39 +0,0 @@
-// +build gofuzz
-
-package brontidefuzz
-
-import (
- "github.com/pkt-cash/pktd/lnd/brontide"
-)
-
-// Fuzz_static_acttwo is a go-fuzz harness for ActTwo in the brontide
-// handshake.
-func Fuzz_static_acttwo(data []byte) int {
- // Check if data is large enough.
- if len(data) < brontide.ActTwoSize {
- return 1
- }
-
- // This will return brontide machines with static keys.
- initiator, _ := getStaticBrontideMachines()
-
- // Generate ActOne - this isn't sent to the responder because nothing is
- // done with the responder machine and this would slow down fuzzing.
- // GenActOne needs to be called to set the appropriate state in the initiator
- // machine.
- _, err := initiator.GenActOne()
- if err != nil {
- nilAndPanic(initiator, nil, err)
- }
-
- // Copy data into [ActTwoSize]byte.
- var actTwo [brontide.ActTwoSize]byte
- copy(actTwo[:], data)
-
- // Initiator receives ActTwo, should fail.
- if err := initiator.RecvActTwo(actTwo); err == nil {
- nilAndPanic(initiator, nil, nil)
- }
-
- return 1
-}
diff --git a/lnd/fuzz/brontide/static_init_decrypt.go b/lnd/fuzz/brontide/static_init_decrypt.go
deleted file mode 100644
index 35525d20..00000000
--- a/lnd/fuzz/brontide/static_init_decrypt.go
+++ /dev/null
@@ -1,27 +0,0 @@
-// +build gofuzz
-
-package brontidefuzz
-
-import (
- "bytes"
-)
-
-// Fuzz_static_init_decrypt is a go-fuzz harness that decrypts arbitrary data
-// with the initiator.
-func Fuzz_static_init_decrypt(data []byte) int {
- // This will return brontide machines with static keys.
- initiator, responder := getStaticBrontideMachines()
-
- // Complete the brontide handshake.
- completeHandshake(initiator, responder)
-
- // Create a reader with the byte array.
- r := bytes.NewReader(data)
-
- // Decrypt the encrypted message using ReadMessage w/ initiator machine.
- if _, err := initiator.ReadMessage(r); err == nil {
- nilAndPanic(initiator, responder, nil)
- }
-
- return 1
-}
diff --git a/lnd/fuzz/brontide/static_init_enc_dec.go b/lnd/fuzz/brontide/static_init_enc_dec.go
deleted file mode 100644
index 81669db2..00000000
--- a/lnd/fuzz/brontide/static_init_enc_dec.go
+++ /dev/null
@@ -1,49 +0,0 @@
-// +build gofuzz
-
-package brontidefuzz
-
-import (
- "bytes"
- "math"
-)
-
-// Fuzz_static_init_enc_dec is a go-fuzz harness that tests round-trip
-// encryption and decryption
-// between the initiator and the responder.
-func Fuzz_static_init_enc_dec(data []byte) int {
- // Ensure that length of message is not greater than max allowed size.
- if len(data) > math.MaxUint16 {
- return 1
- }
-
- // This will return brontide machines with static keys.
- initiator, responder := getStaticBrontideMachines()
-
- // Complete the brontide handshake.
- completeHandshake(initiator, responder)
-
- var b bytes.Buffer
-
- // Encrypt the message using WriteMessage w/ initiator machine.
- if err := initiator.WriteMessage(data); err != nil {
- nilAndPanic(initiator, responder, err)
- }
-
- // Flush the encrypted message w/ initiator machine.
- if _, err := initiator.Flush(&b); err != nil {
- nilAndPanic(initiator, responder, err)
- }
-
- // Decrypt the ciphertext using ReadMessage w/ responder machine.
- plaintext, err := responder.ReadMessage(&b)
- if err != nil {
- nilAndPanic(initiator, responder, err)
- }
-
- // Check that the decrypted message and the original message are equal.
- if !bytes.Equal(data, plaintext) {
- nilAndPanic(initiator, responder, nil)
- }
-
- return 1
-}
diff --git a/lnd/fuzz/brontide/static_init_encrypt.go b/lnd/fuzz/brontide/static_init_encrypt.go
deleted file mode 100644
index 6c45a0b2..00000000
--- a/lnd/fuzz/brontide/static_init_encrypt.go
+++ /dev/null
@@ -1,37 +0,0 @@
-// +build gofuzz
-
-package brontidefuzz
-
-import (
- "bytes"
- "math"
-)
-
-// Fuzz_static_init_encrypt is a go-fuzz harness that encrypts arbitrary data
-// with the initiator.
-func Fuzz_static_init_encrypt(data []byte) int {
- // Ensure that length of message is not greater than max allowed size.
- if len(data) > math.MaxUint16 {
- return 1
- }
-
- // This will return brontide machines with static keys.
- initiator, responder := getStaticBrontideMachines()
-
- // Complete the brontide handshake.
- completeHandshake(initiator, responder)
-
- var b bytes.Buffer
-
- // Encrypt the message using WriteMessage w/ initiator machine.
- if err := initiator.WriteMessage(data); err != nil {
- nilAndPanic(initiator, responder, err)
- }
-
- // Flush the encrypted message w/ initiator machine.
- if _, err := initiator.Flush(&b); err != nil {
- nilAndPanic(initiator, responder, err)
- }
-
- return 1
-}
diff --git a/lnd/fuzz/brontide/static_resp_decrypt.go b/lnd/fuzz/brontide/static_resp_decrypt.go
deleted file mode 100644
index fee4500b..00000000
--- a/lnd/fuzz/brontide/static_resp_decrypt.go
+++ /dev/null
@@ -1,27 +0,0 @@
-// +build gofuzz
-
-package brontidefuzz
-
-import (
- "bytes"
-)
-
-// Fuzz_static_resp_decrypt is a go-fuzz harness that decrypts arbitrary data
-// with the responder.
-func Fuzz_static_resp_decrypt(data []byte) int {
- // This will return brontide machines with static keys.
- initiator, responder := getStaticBrontideMachines()
-
- // Complete the brontide handshake.
- completeHandshake(initiator, responder)
-
- // Create a reader with the byte array.
- r := bytes.NewReader(data)
-
- // Decrypt the encrypted message using ReadMessage w/ responder machine.
- if _, err := responder.ReadMessage(r); err == nil {
- nilAndPanic(initiator, responder, nil)
- }
-
- return 1
-}
diff --git a/lnd/fuzz/brontide/static_resp_enc_dec.go b/lnd/fuzz/brontide/static_resp_enc_dec.go
deleted file mode 100644
index aaa7c3e8..00000000
--- a/lnd/fuzz/brontide/static_resp_enc_dec.go
+++ /dev/null
@@ -1,48 +0,0 @@
-// +build gofuzz
-
-package brontidefuzz
-
-import (
- "bytes"
- "math"
-)
-
-// Fuzz_static_resp_enc_dec is a go-fuzz harness that tests round-trip
-// encryption and decryption between the responder and the initiator.
-func Fuzz_static_resp_enc_dec(data []byte) int {
- // Ensure that length of message is not greater than max allowed size.
- if len(data) > math.MaxUint16 {
- return 1
- }
-
- // This will return brontide machines with static keys.
- initiator, responder := getStaticBrontideMachines()
-
- // Complete the brontide handshake.
- completeHandshake(initiator, responder)
-
- var b bytes.Buffer
-
- // Encrypt the message using WriteMessage w/ responder machine.
- if err := responder.WriteMessage(data); err != nil {
- nilAndPanic(initiator, responder, err)
- }
-
- // Flush the encrypted message w/ responder machine.
- if _, err := responder.Flush(&b); err != nil {
- nilAndPanic(initiator, responder, err)
- }
-
- // Decrypt the ciphertext using ReadMessage w/ initiator machine.
- plaintext, err := initiator.ReadMessage(&b)
- if err != nil {
- nilAndPanic(initiator, responder, err)
- }
-
- // Check that the decrypted message and the original message are equal.
- if !bytes.Equal(data, plaintext) {
- nilAndPanic(initiator, responder, nil)
- }
-
- return 1
-}
diff --git a/lnd/fuzz/brontide/static_resp_encrypt.go b/lnd/fuzz/brontide/static_resp_encrypt.go
deleted file mode 100644
index 5fdc9036..00000000
--- a/lnd/fuzz/brontide/static_resp_encrypt.go
+++ /dev/null
@@ -1,37 +0,0 @@
-// +build gofuzz
-
-package brontidefuzz
-
-import (
- "bytes"
- "math"
-)
-
-// Fuzz_static_resp_encrypt is a go-fuzz harness that encrypts arbitrary data
-// with the responder.
-func Fuzz_static_resp_encrypt(data []byte) int {
- // Ensure that length of message is not greater than max allowed size.
- if len(data) > math.MaxUint16 {
- return 1
- }
-
- // This will return brontide machines with static keys.
- initiator, responder := getStaticBrontideMachines()
-
- // Complete the brontide handshake.
- completeHandshake(initiator, responder)
-
- var b bytes.Buffer
-
- // Encrypt the message using WriteMessage w/ responder machine.
- if err := responder.WriteMessage(data); err != nil {
- nilAndPanic(initiator, responder, err)
- }
-
- // Flush the encrypted message w/ responder machine.
- if _, err := responder.Flush(&b); err != nil {
- nilAndPanic(initiator, responder, err)
- }
-
- return 1
-}
diff --git a/lnd/fuzz/lnwire/accept_channel.go b/lnd/fuzz/lnwire/accept_channel.go
deleted file mode 100644
index 248b6b4b..00000000
--- a/lnd/fuzz/lnwire/accept_channel.go
+++ /dev/null
@@ -1,135 +0,0 @@
-// +build gofuzz
-
-package lnwirefuzz
-
-import (
- "bytes"
-
- "github.com/pkt-cash/pktd/lnd/lnwire"
-)
-
-// Fuzz_accept_channel is used by go-fuzz.
-func Fuzz_accept_channel(data []byte) int {
- // Prefix with MsgAcceptChannel.
- data = prefixWithMsgType(data, lnwire.MsgAcceptChannel)
-
- // Create an empty message so that the FuzzHarness func can check
- // if the max payload constraint is violated.
- emptyMsg := lnwire.AcceptChannel{}
-
- // We have to do this here instead of in fuzz.Harness so that
- // reflect.DeepEqual isn't called. Because of the UpfrontShutdownScript
- // encoding, the first message and second message aren't deeply equal since
- // the first has a nil slice and the other has an empty slice.
-
- // Create a reader with the byte array.
- r := bytes.NewReader(data)
-
- // Make sure byte array length (excluding 2 bytes for message type) is
- // less than max payload size for the wire message. We check this because
- // otherwise `go-fuzz` will keep creating inputs that crash on ReadMessage
- // due to a large message size.
- payloadLen := uint32(len(data)) - 2
- if payloadLen > emptyMsg.MaxPayloadLength(0) {
- // Ignore this input - max payload constraint violated.
- return 1
- }
-
- msg, err := lnwire.ReadMessage(r, 0)
- if err != nil {
- // go-fuzz generated []byte that cannot be represented as a
- // wire message but we will return 0 so go-fuzz can modify the
- // input.
- return 1
- }
-
- // We will serialize the message into a new bytes buffer.
- var b bytes.Buffer
- if _, err := lnwire.WriteMessage(&b, msg, 0); err != nil {
- // Could not serialize message into bytes buffer, panic
- panic(err)
- }
-
- // Deserialize the message from the serialized bytes buffer, and then
- // assert that the original message is equal to the newly deserialized
- // message.
- newMsg, err := lnwire.ReadMessage(&b, 0)
- if err != nil {
- // Could not deserialize message from bytes buffer, panic
- panic(err)
- }
-
- // Now compare every field instead of using reflect.DeepEqual.
- // For UpfrontShutdownScript, we only compare bytes. This probably takes
- // up more branches than necessary, but that's fine for now.
- var shouldPanic bool
- first := msg.(*lnwire.AcceptChannel)
- second := newMsg.(*lnwire.AcceptChannel)
-
- if !bytes.Equal(first.PendingChannelID[:], second.PendingChannelID[:]) {
- shouldPanic = true
- }
-
- if first.DustLimit != second.DustLimit {
- shouldPanic = true
- }
-
- if first.MaxValueInFlight != second.MaxValueInFlight {
- shouldPanic = true
- }
-
- if first.ChannelReserve != second.ChannelReserve {
- shouldPanic = true
- }
-
- if first.HtlcMinimum != second.HtlcMinimum {
- shouldPanic = true
- }
-
- if first.MinAcceptDepth != second.MinAcceptDepth {
- shouldPanic = true
- }
-
- if first.CsvDelay != second.CsvDelay {
- shouldPanic = true
- }
-
- if first.MaxAcceptedHTLCs != second.MaxAcceptedHTLCs {
- shouldPanic = true
- }
-
- if !first.FundingKey.IsEqual(second.FundingKey) {
- shouldPanic = true
- }
-
- if !first.RevocationPoint.IsEqual(second.RevocationPoint) {
- shouldPanic = true
- }
-
- if !first.PaymentPoint.IsEqual(second.PaymentPoint) {
- shouldPanic = true
- }
-
- if !first.DelayedPaymentPoint.IsEqual(second.DelayedPaymentPoint) {
- shouldPanic = true
- }
-
- if !first.HtlcPoint.IsEqual(second.HtlcPoint) {
- shouldPanic = true
- }
-
- if !first.FirstCommitmentPoint.IsEqual(second.FirstCommitmentPoint) {
- shouldPanic = true
- }
-
- if !bytes.Equal(first.UpfrontShutdownScript, second.UpfrontShutdownScript) {
- shouldPanic = true
- }
-
- if shouldPanic {
- panic("original message and deserialized message are not equal")
- }
-
- // Add this input to the corpus.
- return 1
-}
diff --git a/lnd/fuzz/lnwire/announce_signatures.go b/lnd/fuzz/lnwire/announce_signatures.go
deleted file mode 100644
index bb72dccc..00000000
--- a/lnd/fuzz/lnwire/announce_signatures.go
+++ /dev/null
@@ -1,20 +0,0 @@
-// +build gofuzz
-
-package lnwirefuzz
-
-import (
- "github.com/pkt-cash/pktd/lnd/lnwire"
-)
-
-// Fuzz_announce_signatures is used by go-fuzz.
-func Fuzz_announce_signatures(data []byte) int {
- // Prefix with MsgAnnounceSignatures.
- data = prefixWithMsgType(data, lnwire.MsgAnnounceSignatures)
-
- // Create an empty message so that the FuzzHarness func can check
- // if the max payload constraint is violated.
- emptyMsg := lnwire.AnnounceSignatures{}
-
- // Pass the message into our general fuzz harness for wire messages!
- return harness(data, &emptyMsg)
-}
diff --git a/lnd/fuzz/lnwire/channel_announcement.go b/lnd/fuzz/lnwire/channel_announcement.go
deleted file mode 100644
index 6d30d8bb..00000000
--- a/lnd/fuzz/lnwire/channel_announcement.go
+++ /dev/null
@@ -1,20 +0,0 @@
-// +build gofuzz
-
-package lnwirefuzz
-
-import (
- "github.com/pkt-cash/pktd/lnd/lnwire"
-)
-
-// Fuzz_channel_announcement is used by go-fuzz.
-func Fuzz_channel_announcement(data []byte) int {
- // Prefix with MsgChannelAnnouncement.
- data = prefixWithMsgType(data, lnwire.MsgChannelAnnouncement)
-
- // Create an empty message so that the FuzzHarness func can check
- // if the max payload constraint is violated.
- emptyMsg := lnwire.ChannelAnnouncement{}
-
- // Pass the message into our general fuzz harness for wire messages!
- return harness(data, &emptyMsg)
-}
diff --git a/lnd/fuzz/lnwire/channel_reestablish.go b/lnd/fuzz/lnwire/channel_reestablish.go
deleted file mode 100644
index 54d9104c..00000000
--- a/lnd/fuzz/lnwire/channel_reestablish.go
+++ /dev/null
@@ -1,20 +0,0 @@
-// +build gofuzz
-
-package lnwirefuzz
-
-import (
- "github.com/pkt-cash/pktd/lnd/lnwire"
-)
-
-// Fuzz_channel_reestablish is used by go-fuzz.
-func Fuzz_channel_reestablish(data []byte) int {
- // Prefix with MsgChannelReestablish.
- data = prefixWithMsgType(data, lnwire.MsgChannelReestablish)
-
- // Create an empty message so that the FuzzHarness func can check
- // if the max payload constraint is violated.
- emptyMsg := lnwire.ChannelReestablish{}
-
- // Pass the message into our general fuzz harness for wire messages!
- return harness(data, &emptyMsg)
-}
diff --git a/lnd/fuzz/lnwire/channel_update.go b/lnd/fuzz/lnwire/channel_update.go
deleted file mode 100644
index 97a54f4b..00000000
--- a/lnd/fuzz/lnwire/channel_update.go
+++ /dev/null
@@ -1,20 +0,0 @@
-// +build gofuzz
-
-package lnwirefuzz
-
-import (
- "github.com/pkt-cash/pktd/lnd/lnwire"
-)
-
-// Fuzz_channel_update is used by go-fuzz.
-func Fuzz_channel_update(data []byte) int {
- // Prefix with MsgChannelUpdate.
- data = prefixWithMsgType(data, lnwire.MsgChannelUpdate)
-
- // Create an empty message so that the FuzzHarness func can check
- // if the max payload constraint is violated.
- emptyMsg := lnwire.ChannelUpdate{}
-
- // Pass the message into our general fuzz harness for wire messages!
- return harness(data, &emptyMsg)
-}
diff --git a/lnd/fuzz/lnwire/closing_signed.go b/lnd/fuzz/lnwire/closing_signed.go
deleted file mode 100644
index 920f2242..00000000
--- a/lnd/fuzz/lnwire/closing_signed.go
+++ /dev/null
@@ -1,20 +0,0 @@
-// +build gofuzz
-
-package lnwirefuzz
-
-import (
- "github.com/pkt-cash/pktd/lnd/lnwire"
-)
-
-// Fuzz_closing_signed is used by go-fuzz.
-func Fuzz_closing_signed(data []byte) int {
- // Prefix with MsgClosingSigned.
- data = prefixWithMsgType(data, lnwire.MsgClosingSigned)
-
- // Create an empty message so that the FuzzHarness func can check
- // if the max payload constraint is violated.
- emptyMsg := lnwire.ClosingSigned{}
-
- // Pass the message into our general fuzz harness for wire messages!
- return harness(data, &emptyMsg)
-}
diff --git a/lnd/fuzz/lnwire/commit_sig.go b/lnd/fuzz/lnwire/commit_sig.go
deleted file mode 100644
index d157de9e..00000000
--- a/lnd/fuzz/lnwire/commit_sig.go
+++ /dev/null
@@ -1,20 +0,0 @@
-// +build gofuzz
-
-package lnwirefuzz
-
-import (
- "github.com/pkt-cash/pktd/lnd/lnwire"
-)
-
-// Fuzz_commit_sig is used by go-fuzz.
-func Fuzz_commit_sig(data []byte) int {
- // Prefix with MsgCommitSig.
- data = prefixWithMsgType(data, lnwire.MsgCommitSig)
-
- // Create an empty message so that the FuzzHarness func can check
- // if the max payload constraint is violated.
- emptyMsg := lnwire.CommitSig{}
-
- // Pass the message into our general fuzz harness for wire messages!
- return harness(data, &emptyMsg)
-}
diff --git a/lnd/fuzz/lnwire/error.go b/lnd/fuzz/lnwire/error.go
deleted file mode 100644
index 1293cb0d..00000000
--- a/lnd/fuzz/lnwire/error.go
+++ /dev/null
@@ -1,20 +0,0 @@
-// +build gofuzz
-
-package lnwirefuzz
-
-import (
- "github.com/pkt-cash/pktd/lnd/lnwire"
-)
-
-// Fuzz_error is used by go-fuzz.
-func Fuzz_error(data []byte) int {
- // Prefix with MsgError.
- data = prefixWithMsgType(data, lnwire.MsgError)
-
- // Create an empty message so that the FuzzHarness func can check
- // if the max payload constraint is violated.
- emptyMsg := lnwire.Error{}
-
- // Pass the message into our general fuzz harness for wire messages!
- return harness(data, &emptyMsg)
-}
diff --git a/lnd/fuzz/lnwire/funding_created.go b/lnd/fuzz/lnwire/funding_created.go
deleted file mode 100644
index e170a1b7..00000000
--- a/lnd/fuzz/lnwire/funding_created.go
+++ /dev/null
@@ -1,20 +0,0 @@
-// +build gofuzz
-
-package lnwirefuzz
-
-import (
- "github.com/pkt-cash/pktd/lnd/lnwire"
-)
-
-// Fuzz_funding_created is used by go-fuzz.
-func Fuzz_funding_created(data []byte) int {
- // Prefix with MsgFundingCreated.
- data = prefixWithMsgType(data, lnwire.MsgFundingCreated)
-
- // Create an empty message so that the FuzzHarness func can check
- // if the max payload constraint is violated.
- emptyMsg := lnwire.FundingCreated{}
-
- // Pass the message into our general fuzz harness for wire messages!
- return harness(data, &emptyMsg)
-}
diff --git a/lnd/fuzz/lnwire/funding_locked.go b/lnd/fuzz/lnwire/funding_locked.go
deleted file mode 100644
index b3f2b2dd..00000000
--- a/lnd/fuzz/lnwire/funding_locked.go
+++ /dev/null
@@ -1,20 +0,0 @@
-// +build gofuzz
-
-package lnwirefuzz
-
-import (
- "github.com/pkt-cash/pktd/lnd/lnwire"
-)
-
-// Fuzz_funding_locked is used by go-fuzz.
-func Fuzz_funding_locked(data []byte) int {
- // Prefix with MsgFundingLocked.
- data = prefixWithMsgType(data, lnwire.MsgFundingLocked)
-
- // Create an empty message so that the FuzzHarness func can check
- // if the max payload constraint is violated.
- emptyMsg := lnwire.FundingLocked{}
-
- // Pass the message into our general fuzz harness for wire messages!
- return harness(data, &emptyMsg)
-}
diff --git a/lnd/fuzz/lnwire/funding_signed.go b/lnd/fuzz/lnwire/funding_signed.go
deleted file mode 100644
index 9cd19cb8..00000000
--- a/lnd/fuzz/lnwire/funding_signed.go
+++ /dev/null
@@ -1,20 +0,0 @@
-// +build gofuzz
-
-package lnwirefuzz
-
-import (
- "github.com/pkt-cash/pktd/lnd/lnwire"
-)
-
-// Fuzz_funding_signed is used by go-fuzz.
-func Fuzz_funding_signed(data []byte) int {
- // Prefix with MsgFundingSigned.
- prefixWithMsgType(data, lnwire.MsgFundingSigned)
-
- // Create an empty message so that the FuzzHarness func can check
- // if the max payload constraint is violated.
- emptyMsg := lnwire.FundingSigned{}
-
- // Pass the message into our general fuzz harness for wire messages!
- return harness(data, &emptyMsg)
-}
diff --git a/lnd/fuzz/lnwire/fuzz_utils.go b/lnd/fuzz/lnwire/fuzz_utils.go
deleted file mode 100644
index 9cd69a04..00000000
--- a/lnd/fuzz/lnwire/fuzz_utils.go
+++ /dev/null
@@ -1,72 +0,0 @@
-// +build gofuzz
-
-package lnwirefuzz
-
-import (
- "bytes"
- "encoding/binary"
- "reflect"
-
- "github.com/pkt-cash/pktd/lnd/lnwire"
-)
-
-// prefixWithMsgType takes []byte and adds a wire protocol prefix
-// to make the []byte into an actual message to be used in fuzzing.
-func prefixWithMsgType(data []byte, prefix lnwire.MessageType) []byte {
- var prefixBytes [2]byte
- binary.BigEndian.PutUint16(prefixBytes[:], uint16(prefix))
- data = append(prefixBytes[:], data...)
- return data
-}
-
-// harness performs the actual fuzz testing of the appropriate wire message.
-// This function will check that the passed-in message passes wire length checks,
-// is a valid message once deserialized, and passes a sequence of serialization
-// and deserialization checks. Returns an int that determines whether the input
-// is unique or not.
-func harness(data []byte, emptyMsg lnwire.Message) int {
- // Create a reader with the byte array.
- r := bytes.NewReader(data)
-
- // Make sure byte array length (excluding 2 bytes for message type) is
- // less than max payload size for the wire message. We check this because
- // otherwise `go-fuzz` will keep creating inputs that crash on ReadMessage
- // due to a large message size.
- payloadLen := uint32(len(data)) - 2
- if payloadLen > emptyMsg.MaxPayloadLength(0) {
- // Ignore this input - max payload constraint violated.
- return 1
- }
-
- msg, err := lnwire.ReadMessage(r, 0)
- if err != nil {
- // go-fuzz generated []byte that cannot be represented as a
- // wire message but we will return 0 so go-fuzz can modify the
- // input.
- return 1
- }
-
- // We will serialize the message into a new bytes buffer.
- var b bytes.Buffer
- if _, err := lnwire.WriteMessage(&b, msg, 0); err != nil {
- // Could not serialize message into bytes buffer, panic
- panic(err)
- }
-
- // Deserialize the message from the serialized bytes buffer, and then
- // assert that the original message is equal to the newly deserialized
- // message.
- newMsg, err := lnwire.ReadMessage(&b, 0)
- if err != nil {
- // Could not deserialize message from bytes buffer, panic
- panic(err)
- }
-
- if !reflect.DeepEqual(msg, newMsg) {
- // Deserialized message and original message are not deeply equal.
- panic("original message and deserialized message are not deeply equal")
- }
-
- // Add this input to the corpus.
- return 1
-}
diff --git a/lnd/fuzz/lnwire/gossip_timestamp_range.go b/lnd/fuzz/lnwire/gossip_timestamp_range.go
deleted file mode 100644
index 36ba6dce..00000000
--- a/lnd/fuzz/lnwire/gossip_timestamp_range.go
+++ /dev/null
@@ -1,20 +0,0 @@
-// +build gofuzz
-
-package lnwirefuzz
-
-import (
- "github.com/pkt-cash/pktd/lnd/lnwire"
-)
-
-// Fuzz_gossip_timestamp_range is used by go-fuzz.
-func Fuzz_gossip_timestamp_range(data []byte) int {
- // Prefix with MsgGossipTimestampRange.
- data = prefixWithMsgType(data, lnwire.MsgGossipTimestampRange)
-
- // Create an empty message so that the FuzzHarness func can check
- // if the max payload constraint is violated.
- emptyMsg := lnwire.GossipTimestampRange{}
-
- // Pass the message into our general fuzz harness for wire messages!
- return harness(data, &emptyMsg)
-}
diff --git a/lnd/fuzz/lnwire/init.go b/lnd/fuzz/lnwire/init.go
deleted file mode 100644
index 0362cb42..00000000
--- a/lnd/fuzz/lnwire/init.go
+++ /dev/null
@@ -1,20 +0,0 @@
-// +build gofuzz
-
-package lnwirefuzz
-
-import (
- "github.com/pkt-cash/pktd/lnd/lnwire"
-)
-
-// Fuzz_init is used by go-fuzz.
-func Fuzz_init(data []byte) int {
- // Prefix with MsgInit.
- data = prefixWithMsgType(data, lnwire.MsgInit)
-
- // Create an empty message so that the FuzzHarness func can check
- // if the max payload constraint is violated.
- emptyMsg := lnwire.Init{}
-
- // Pass the message into our general fuzz harness for wire messages!
- return harness(data, &emptyMsg)
-}
diff --git a/lnd/fuzz/lnwire/node_announcement.go b/lnd/fuzz/lnwire/node_announcement.go
deleted file mode 100644
index 4c15e627..00000000
--- a/lnd/fuzz/lnwire/node_announcement.go
+++ /dev/null
@@ -1,112 +0,0 @@
-// +build gofuzz
-
-package lnwirefuzz
-
-import (
- "bytes"
- "reflect"
-
- "github.com/pkt-cash/pktd/lnd/lnwire"
-)
-
-// Fuzz_node_announcement is used by go-fuzz.
-func Fuzz_node_announcement(data []byte) int {
- // Prefix with MsgNodeAnnouncement.
- data = prefixWithMsgType(data, lnwire.MsgNodeAnnouncement)
-
- // Create an empty message so that the FuzzHarness func can check
- // if the max payload constraint is violated.
- emptyMsg := lnwire.NodeAnnouncement{}
-
- // We have to do this here instead of in fuzz.Harness so that
- // reflect.DeepEqual isn't called. Address (de)serialization messes up
- // the fuzzing assertions.
-
- // Create a reader with the byte array.
- r := bytes.NewReader(data)
-
- // Make sure byte array length (excluding 2 bytes for message type) is
- // less than max payload size for the wire message. We check this because
- // otherwise `go-fuzz` will keep creating inputs that crash on ReadMessage
- // due to a large message size.
- payloadLen := uint32(len(data)) - 2
- if payloadLen > emptyMsg.MaxPayloadLength(0) {
- // Ignore this input - max payload constraint violated.
- return 1
- }
-
- msg, err := lnwire.ReadMessage(r, 0)
- if err != nil {
- // go-fuzz generated []byte that cannot be represented as a
- // wire message but we will return 0 so go-fuzz can modify the
- // input.
- return 1
- }
-
- // We will serialize the message into a new bytes buffer.
- var b bytes.Buffer
- if _, err := lnwire.WriteMessage(&b, msg, 0); err != nil {
- // Could not serialize message into bytes buffer, panic
- panic(err)
- }
-
- // Deserialize the message from the serialized bytes buffer, and then
- // assert that the original message is equal to the newly deserialized
- // message.
- newMsg, err := lnwire.ReadMessage(&b, 0)
- if err != nil {
- // Could not deserialize message from bytes buffer, panic
- panic(err)
- }
-
- // Now compare every field instead of using reflect.DeepEqual for the
- // Addresses field.
- var shouldPanic bool
- first := msg.(*lnwire.NodeAnnouncement)
- second := newMsg.(*lnwire.NodeAnnouncement)
- if !bytes.Equal(first.Signature[:], second.Signature[:]) {
- shouldPanic = true
- }
-
- if !reflect.DeepEqual(first.Features, second.Features) {
- shouldPanic = true
- }
-
- if first.Timestamp != second.Timestamp {
- shouldPanic = true
- }
-
- if !bytes.Equal(first.NodeID[:], second.NodeID[:]) {
- shouldPanic = true
- }
-
- if !reflect.DeepEqual(first.RGBColor, second.RGBColor) {
- shouldPanic = true
- }
-
- if !bytes.Equal(first.Alias[:], second.Alias[:]) {
- shouldPanic = true
- }
-
- if len(first.Addresses) != len(second.Addresses) {
- shouldPanic = true
- }
-
- for i := range first.Addresses {
- if first.Addresses[i].String() != second.Addresses[i].String() {
- shouldPanic = true
- break
- }
- }
-
- if !reflect.DeepEqual(first.ExtraOpaqueData, second.ExtraOpaqueData) {
- shouldPanic = true
- }
-
- if shouldPanic {
- panic("original message and deserialized message are not equal")
- }
-
- // Add this input to the corpus.
- return 1
-}
diff --git a/lnd/fuzz/lnwire/open_channel.go b/lnd/fuzz/lnwire/open_channel.go
deleted file mode 100644
index d3f2d0e6..00000000
--- a/lnd/fuzz/lnwire/open_channel.go
+++ /dev/null
@@ -1,151 +0,0 @@
-// +build gofuzz
-
-package lnwirefuzz
-
-import (
- "bytes"
-
- "github.com/pkt-cash/pktd/lnd/lnwire"
-)
-
-// Fuzz_open_channel is used by go-fuzz.
-func Fuzz_open_channel(data []byte) int {
- // Prefix with MsgOpenChannel.
- data = prefixWithMsgType(data, lnwire.MsgOpenChannel)
-
- // Create an empty message so that the FuzzHarness func can check
- // if the max payload constraint is violated.
- emptyMsg := lnwire.OpenChannel{}
-
- // We have to do this here instead of in fuzz.Harness so that
- // reflect.DeepEqual isn't called. Because of the UpfrontShutdownScript
- // encoding, the first message and second message aren't deeply equal since
- // the first has a nil slice and the other has an empty slice.
-
- // Create a reader with the byte array.
- r := bytes.NewReader(data)
-
- // Make sure byte array length (excluding 2 bytes for message type) is
- // less than max payload size for the wire message. We check this because
- // otherwise `go-fuzz` will keep creating inputs that crash on ReadMessage
- // due to a large message size.
- payloadLen := uint32(len(data)) - 2
- if payloadLen > emptyMsg.MaxPayloadLength(0) {
- // Ignore this input - max payload constraint violated.
- return 1
- }
-
- msg, err := lnwire.ReadMessage(r, 0)
- if err != nil {
- // go-fuzz generated []byte that cannot be represented as a
- // wire message but we will return 0 so go-fuzz can modify the
- // input.
- return 1
- }
-
- // We will serialize the message into a new bytes buffer.
- var b bytes.Buffer
- if _, err := lnwire.WriteMessage(&b, msg, 0); err != nil {
- // Could not serialize message into bytes buffer, panic
- panic(err)
- }
-
- // Deserialize the message from the serialized bytes buffer, and then
- // assert that the original message is equal to the newly deserialized
- // message.
- newMsg, err := lnwire.ReadMessage(&b, 0)
- if err != nil {
- // Could not deserialize message from bytes buffer, panic
- panic(err)
- }
-
- // Now compare every field instead of using reflect.DeepEqual.
- // For UpfrontShutdownScript, we only compare bytes. This probably takes
- // up more branches than necessary, but that's fine for now.
- var shouldPanic bool
- first := msg.(*lnwire.OpenChannel)
- second := newMsg.(*lnwire.OpenChannel)
-
- if !first.ChainHash.IsEqual(&second.ChainHash) {
- shouldPanic = true
- }
-
- if !bytes.Equal(first.PendingChannelID[:], second.PendingChannelID[:]) {
- shouldPanic = true
- }
-
- if first.FundingAmount != second.FundingAmount {
- shouldPanic = true
- }
-
- if first.PushAmount != second.PushAmount {
- shouldPanic = true
- }
-
- if first.DustLimit != second.DustLimit {
- shouldPanic = true
- }
-
- if first.MaxValueInFlight != second.MaxValueInFlight {
- shouldPanic = true
- }
-
- if first.ChannelReserve != second.ChannelReserve {
- shouldPanic = true
- }
-
- if first.HtlcMinimum != second.HtlcMinimum {
- shouldPanic = true
- }
-
- if first.FeePerKiloWeight != second.FeePerKiloWeight {
- shouldPanic = true
- }
-
- if first.CsvDelay != second.CsvDelay {
- shouldPanic = true
- }
-
- if first.MaxAcceptedHTLCs != second.MaxAcceptedHTLCs {
- shouldPanic = true
- }
-
- if !first.FundingKey.IsEqual(second.FundingKey) {
- shouldPanic = true
- }
-
- if !first.RevocationPoint.IsEqual(second.RevocationPoint) {
- shouldPanic = true
- }
-
- if !first.PaymentPoint.IsEqual(second.PaymentPoint) {
- shouldPanic = true
- }
-
- if !first.DelayedPaymentPoint.IsEqual(second.DelayedPaymentPoint) {
- shouldPanic = true
- }
-
- if !first.HtlcPoint.IsEqual(second.HtlcPoint) {
- shouldPanic = true
- }
-
- if !first.FirstCommitmentPoint.IsEqual(second.FirstCommitmentPoint) {
- shouldPanic = true
- }
-
- if first.ChannelFlags != second.ChannelFlags {
- shouldPanic = true
- }
-
- if !bytes.Equal(first.UpfrontShutdownScript, second.UpfrontShutdownScript) {
- shouldPanic = true
- }
-
- if shouldPanic {
- panic("original message and deserialized message are not equal")
- }
-
- // Add this input to the corpus.
- return 1
-}
diff --git a/lnd/fuzz/lnwire/ping.go b/lnd/fuzz/lnwire/ping.go
deleted file mode 100644
index 7936d8b8..00000000
--- a/lnd/fuzz/lnwire/ping.go
+++ /dev/null
@@ -1,20 +0,0 @@
-// +build gofuzz
-
-package lnwirefuzz
-
-import (
- "github.com/pkt-cash/pktd/lnd/lnwire"
-)
-
-// Fuzz_ping is used by go-fuzz.
-func Fuzz_ping(data []byte) int {
- // Prefix with MsgPing.
- data = prefixWithMsgType(data, lnwire.MsgPing)
-
- // Create an empty message so that the FuzzHarness func can check
- // if the max payload constraint is violated.
- emptyMsg := lnwire.Ping{}
-
- // Pass the message into our general fuzz harness for wire messages!
- return harness(data, &emptyMsg)
-}
diff --git a/lnd/fuzz/lnwire/pong.go b/lnd/fuzz/lnwire/pong.go
deleted file mode 100644
index 93e0b7f2..00000000
--- a/lnd/fuzz/lnwire/pong.go
+++ /dev/null
@@ -1,20 +0,0 @@
-// +build gofuzz
-
-package lnwirefuzz
-
-import (
- "github.com/pkt-cash/pktd/lnd/lnwire"
-)
-
-// Fuzz_pong is used by go-fuzz.
-func Fuzz_pong(data []byte) int {
- // Prefix with MsgPong.
- data = prefixWithMsgType(data, lnwire.MsgPong)
-
- // Create an empty message so that the FuzzHarness func can check
- // if the max payload constraint is violated.
- emptyMsg := lnwire.Pong{}
-
- // Pass the message into our general fuzz harness for wire messages!
- return harness(data, &emptyMsg)
-}
diff --git a/lnd/fuzz/lnwire/query_channel_range.go b/lnd/fuzz/lnwire/query_channel_range.go
deleted file mode 100644
index 8c24fe99..00000000
--- a/lnd/fuzz/lnwire/query_channel_range.go
+++ /dev/null
@@ -1,20 +0,0 @@
-// +build gofuzz
-
-package lnwirefuzz
-
-import (
- "github.com/pkt-cash/pktd/lnd/lnwire"
-)
-
-// Fuzz_query_channel_range is used by go-fuzz.
-func Fuzz_query_channel_range(data []byte) int {
- // Prefix with MsgQueryChannelRange.
- data = prefixWithMsgType(data, lnwire.MsgQueryChannelRange)
-
- // Create an empty message so that the FuzzHarness func can check
- // if the max payload constraint is violated.
- emptyMsg := lnwire.QueryChannelRange{}
-
- // Pass the message into our general fuzz harness for wire messages!
- return harness(data, &emptyMsg)
-}
diff --git a/lnd/fuzz/lnwire/query_short_chan_ids.go b/lnd/fuzz/lnwire/query_short_chan_ids.go
deleted file mode 100644
index 4e10cf84..00000000
--- a/lnd/fuzz/lnwire/query_short_chan_ids.go
+++ /dev/null
@@ -1,20 +0,0 @@
-// +build gofuzz
-
-package lnwirefuzz
-
-import (
- "github.com/pkt-cash/pktd/lnd/lnwire"
-)
-
-// Fuzz_query_short_chan_ids is used by go-fuzz.
-func Fuzz_query_short_chan_ids(data []byte) int {
- // Prefix with MsgQueryShortChanIDs.
- data = prefixWithMsgType(data, lnwire.MsgQueryShortChanIDs)
-
- // Create an empty message so that the FuzzHarness func can check
- // if the max payload constraint is violated.
- emptyMsg := lnwire.QueryShortChanIDs{}
-
- // Pass the message into our general fuzz harness for wire messages!
- return harness(data, &emptyMsg)
-}
diff --git a/lnd/fuzz/lnwire/query_short_chan_ids_zlib.go b/lnd/fuzz/lnwire/query_short_chan_ids_zlib.go
deleted file mode 100644
index 7304ec9d..00000000
--- a/lnd/fuzz/lnwire/query_short_chan_ids_zlib.go
+++ /dev/null
@@ -1,51 +0,0 @@
-// +build gofuzz
-
-package lnwirefuzz
-
-import (
- "bytes"
- "compress/zlib"
- "encoding/binary"
-
- "github.com/pkt-cash/pktd/lnd/lnwire"
-)
-
-// Fuzz_query_short_chan_ids_zlib is used by go-fuzz.
-func Fuzz_query_short_chan_ids_zlib(data []byte) int {
-
- var buf bytes.Buffer
- zlibWriter := zlib.NewWriter(&buf)
- _, err := zlibWriter.Write(data)
- if err != nil {
- // Zlib bug?
- panic(err)
- }
-
- if err := zlibWriter.Close(); err != nil {
- // Zlib bug?
- panic(err)
- }
-
- compressedPayload := buf.Bytes()
-
- chainhash := []byte("00000000000000000000000000000000")
- numBytesInBody := len(compressedPayload) + 1
- zlibByte := []byte("\x01")
-
- bodyBytes := make([]byte, 2)
- binary.BigEndian.PutUint16(bodyBytes, uint16(numBytesInBody))
-
- payload := append(chainhash, bodyBytes...)
- payload = append(payload, zlibByte...)
- payload = append(payload, compressedPayload...)
-
- // Prefix with MsgQueryShortChanIDs.
- payload = prefixWithMsgType(payload, lnwire.MsgQueryShortChanIDs)
-
- // Create an empty message so that the FuzzHarness func can check
- // if the max payload constraint is violated.
- emptyMsg := lnwire.QueryShortChanIDs{}
-
- // Pass the message into our general fuzz harness for wire messages!
- return harness(payload, &emptyMsg)
-}
diff --git a/lnd/fuzz/lnwire/reply_channel_range.go b/lnd/fuzz/lnwire/reply_channel_range.go
deleted file mode 100644
index 8e2165f8..00000000
--- a/lnd/fuzz/lnwire/reply_channel_range.go
+++ /dev/null
@@ -1,20 +0,0 @@
-// +build gofuzz
-
-package lnwirefuzz
-
-import (
- "github.com/pkt-cash/pktd/lnd/lnwire"
-)
-
-// Fuzz_reply_channel_range is used by go-fuzz.
-func Fuzz_reply_channel_range(data []byte) int {
- // Prefix with MsgReplyChannelRange.
- data = prefixWithMsgType(data, lnwire.MsgReplyChannelRange)
-
- // Create an empty message so that the FuzzHarness func can check
- // if the max payload constraint is violated.
- emptyMsg := lnwire.ReplyChannelRange{}
-
- // Pass the message into our general fuzz harness for wire messages!
- return harness(data, &emptyMsg)
-}
diff --git a/lnd/fuzz/lnwire/reply_channel_range_zlib.go b/lnd/fuzz/lnwire/reply_channel_range_zlib.go
deleted file mode 100644
index 59bb3bea..00000000
--- a/lnd/fuzz/lnwire/reply_channel_range_zlib.go
+++ /dev/null
@@ -1,59 +0,0 @@
-// +build gofuzz
-
-package lnwirefuzz
-
-import (
- "bytes"
- "compress/zlib"
- "encoding/binary"
-
- "github.com/pkt-cash/pktd/lnd/lnwire"
-)
-
-// Fuzz_reply_channel_range_zlib is used by go-fuzz.
-func Fuzz_reply_channel_range_zlib(data []byte) int {
-
- var buf bytes.Buffer
- zlibWriter := zlib.NewWriter(&buf)
- _, err := zlibWriter.Write(data)
- if err != nil {
- // Zlib bug?
- panic(err)
- }
-
- if err := zlibWriter.Close(); err != nil {
- // Zlib bug?
- panic(err)
- }
-
- compressedPayload := buf.Bytes()
-
- // Initialize some []byte vars which will prefix our payload
- chainhash := []byte("00000000000000000000000000000000")
- firstBlockHeight := []byte("\x00\x00\x00\x00")
- numBlocks := []byte("\x00\x00\x00\x00")
- completeByte := []byte("\x00")
-
- numBytesInBody := len(compressedPayload) + 1
- zlibByte := []byte("\x01")
-
- bodyBytes := make([]byte, 2)
- binary.BigEndian.PutUint16(bodyBytes, uint16(numBytesInBody))
-
- payload := append(chainhash, firstBlockHeight...)
- payload = append(payload, numBlocks...)
- payload = append(payload, completeByte...)
- payload = append(payload, bodyBytes...)
- payload = append(payload, zlibByte...)
- payload = append(payload, compressedPayload...)
-
- // Prefix with MsgReplyChannelRange.
- payload = prefixWithMsgType(payload, lnwire.MsgReplyChannelRange)
-
- // Create an empty message so that the FuzzHarness func can check
- // if the max payload constraint is violated.
- emptyMsg := lnwire.ReplyChannelRange{}
-
- // Pass the message into our general fuzz harness for wire messages!
- return harness(payload, &emptyMsg)
-}
diff --git a/lnd/fuzz/lnwire/reply_short_chan_ids_end.go b/lnd/fuzz/lnwire/reply_short_chan_ids_end.go
deleted file mode 100644
index 130f1303..00000000
--- a/lnd/fuzz/lnwire/reply_short_chan_ids_end.go
+++ /dev/null
@@ -1,20 +0,0 @@
-// +build gofuzz
-
-package lnwirefuzz
-
-import (
- "github.com/pkt-cash/pktd/lnd/lnwire"
-)
-
-// Fuzz_reply_short_chan_ids_end is used by go-fuzz.
-func Fuzz_reply_short_chan_ids_end(data []byte) int {
- // Prefix with MsgReplyShortChanIDsEnd.
- data = prefixWithMsgType(data, lnwire.MsgReplyShortChanIDsEnd)
-
- // Create an empty message so that the FuzzHarness func can check
- // if the max payload constraint is violated.
- emptyMsg := lnwire.ReplyShortChanIDsEnd{}
-
- // Pass the message into our general fuzz harness for wire messages!
- return harness(data, &emptyMsg)
-}
diff --git a/lnd/fuzz/lnwire/revoke_and_ack.go b/lnd/fuzz/lnwire/revoke_and_ack.go
deleted file mode 100644
index 60668d89..00000000
--- a/lnd/fuzz/lnwire/revoke_and_ack.go
+++ /dev/null
@@ -1,20 +0,0 @@
-// +build gofuzz
-
-package lnwirefuzz
-
-import (
- "github.com/pkt-cash/pktd/lnd/lnwire"
-)
-
-// Fuzz_revoke_and_ack is used by go-fuzz.
-func Fuzz_revoke_and_ack(data []byte) int {
- // Prefix with MsgRevokeAndAck.
- data = prefixWithMsgType(data, lnwire.MsgRevokeAndAck)
-
- // Create an empty message so that the FuzzHarness func can check
- // if the max payload constraint is violated.
- emptyMsg := lnwire.RevokeAndAck{}
-
- // Pass the message into our general fuzz harness for wire messages!
- return harness(data, &emptyMsg)
-}
diff --git a/lnd/fuzz/lnwire/shutdown.go b/lnd/fuzz/lnwire/shutdown.go
deleted file mode 100644
index a51ee8db..00000000
--- a/lnd/fuzz/lnwire/shutdown.go
+++ /dev/null
@@ -1,20 +0,0 @@
-// +build gofuzz
-
-package lnwirefuzz
-
-import (
- "github.com/pkt-cash/pktd/lnd/lnwire"
-)
-
-// Fuzz_shutdown is used by go-fuzz.
-func Fuzz_shutdown(data []byte) int {
- // Prefix with MsgShutdown.
- data = prefixWithMsgType(data, lnwire.MsgShutdown)
-
- // Create an empty message so that the FuzzHarness func can check
- // if the max payload constraint is violated.
- emptyMsg := lnwire.Shutdown{}
-
- // Pass the message into our general fuzz harness for wire messages!
- return harness(data, &emptyMsg)
-}
diff --git a/lnd/fuzz/lnwire/update_add_htlc.go b/lnd/fuzz/lnwire/update_add_htlc.go
deleted file mode 100644
index deabe79a..00000000
--- a/lnd/fuzz/lnwire/update_add_htlc.go
+++ /dev/null
@@ -1,20 +0,0 @@
-// +build gofuzz
-
-package lnwirefuzz
-
-import (
- "github.com/pkt-cash/pktd/lnd/lnwire"
-)
-
-// Fuzz_update_add_htlc is used by go-fuzz.
-func Fuzz_update_add_htlc(data []byte) int {
- // Prefix with MsgUpdateAddHTLC.
- data = prefixWithMsgType(data, lnwire.MsgUpdateAddHTLC)
-
- // Create an empty message so that the FuzzHarness func can check
- // if the max payload constraint is violated.
- emptyMsg := lnwire.UpdateAddHTLC{}
-
- // Pass the message into our general fuzz harness for wire messages!
- return harness(data, &emptyMsg)
-}
diff --git a/lnd/fuzz/lnwire/update_fail_htlc.go b/lnd/fuzz/lnwire/update_fail_htlc.go
deleted file mode 100644
index 256b6345..00000000
--- a/lnd/fuzz/lnwire/update_fail_htlc.go
+++ /dev/null
@@ -1,20 +0,0 @@
-// +build gofuzz
-
-package lnwirefuzz
-
-import (
- "github.com/pkt-cash/pktd/lnd/lnwire"
-)
-
-// Fuzz_update_fail_htlc is used by go-fuzz.
-func Fuzz_update_fail_htlc(data []byte) int {
- // Prefix with MsgUpdateFailHTLC.
- data = prefixWithMsgType(data, lnwire.MsgUpdateFailHTLC)
-
- // Create an empty message so that the FuzzHarness func can check
- // if the max payload constraint is violated.
- emptyMsg := lnwire.UpdateFailHTLC{}
-
- // Pass the message into our general fuzz harness for wire messages!
- return harness(data, &emptyMsg)
-}
diff --git a/lnd/fuzz/lnwire/update_fail_malformed_htlc.go b/lnd/fuzz/lnwire/update_fail_malformed_htlc.go
deleted file mode 100644
index 99bc15a0..00000000
--- a/lnd/fuzz/lnwire/update_fail_malformed_htlc.go
+++ /dev/null
@@ -1,20 +0,0 @@
-// +build gofuzz
-
-package lnwirefuzz
-
-import (
- "github.com/pkt-cash/pktd/lnd/lnwire"
-)
-
-// Fuzz_update_fail_malformed_htlc is used by go-fuzz.
-func Fuzz_update_fail_malformed_htlc(data []byte) int {
- // Prefix with MsgUpdateFailMalformedHTLC.
- data = prefixWithMsgType(data, lnwire.MsgUpdateFailMalformedHTLC)
-
- // Create an empty message so that the FuzzHarness func can check
- // if the max payload constraint is violated.
- emptyMsg := lnwire.UpdateFailMalformedHTLC{}
-
- // Pass the message into our general fuzz harness for wire messages!
- return harness(data, &emptyMsg)
-}
diff --git a/lnd/fuzz/lnwire/update_fee.go b/lnd/fuzz/lnwire/update_fee.go
deleted file mode 100644
index dbfbcc64..00000000
--- a/lnd/fuzz/lnwire/update_fee.go
+++ /dev/null
@@ -1,20 +0,0 @@
-// +build gofuzz
-
-package lnwirefuzz
-
-import (
- "github.com/pkt-cash/pktd/lnd/lnwire"
-)
-
-// Fuzz_update_fee is used by go-fuzz.
-func Fuzz_update_fee(data []byte) int {
- // Prefix with MsgUpdateFee.
- data = prefixWithMsgType(data, lnwire.MsgUpdateFee)
-
- // Create an empty message so that the FuzzHarness func can check
- // if the max payload constraint is violated.
- emptyMsg := lnwire.UpdateFee{}
-
- // Pass the message into our general fuzz harness for wire messages!
- return harness(data, &emptyMsg)
-}
diff --git a/lnd/fuzz/lnwire/update_fulfill_htlc.go b/lnd/fuzz/lnwire/update_fulfill_htlc.go
deleted file mode 100644
index db166d2f..00000000
--- a/lnd/fuzz/lnwire/update_fulfill_htlc.go
+++ /dev/null
@@ -1,20 +0,0 @@
-// +build gofuzz
-
-package lnwirefuzz
-
-import (
- "github.com/pkt-cash/pktd/lnd/lnwire"
-)
-
-// Fuzz_update_fulfill_htlc is used by go-fuzz.
-func Fuzz_update_fulfill_htlc(data []byte) int {
- // Prefix with MsgUpdateFulfillHTLC.
- data = prefixWithMsgType(data, lnwire.MsgUpdateFulfillHTLC)
-
- // Create an empty message so that the FuzzHarness func can check
- // if the max payload constraint is violated.
- emptyMsg := lnwire.UpdateFulfillHTLC{}
-
- // Pass the message into our general fuzz harness for wire messages!
- return harness(data, &emptyMsg)
-}
diff --git a/lnd/fuzz/wtwire/create_session.go b/lnd/fuzz/wtwire/create_session.go
deleted file mode 100644
index 99ea907f..00000000
--- a/lnd/fuzz/wtwire/create_session.go
+++ /dev/null
@@ -1,20 +0,0 @@
-// +build gofuzz
-
-package wtwirefuzz
-
-import (
- "github.com/pkt-cash/pktd/lnd/watchtower/wtwire"
-)
-
-// Fuzz_create_session is used by go-fuzz.
-func Fuzz_create_session(data []byte) int {
- // Prefix with MsgCreateSession.
- data = prefixWithMsgType(data, wtwire.MsgCreateSession)
-
- // Create an empty message so that the FuzzHarness func can check if the
- // max payload constraint is violated.
- emptyMsg := wtwire.CreateSession{}
-
- // Pass the message into our general fuzz harness for wire messages!
- return harness(data, &emptyMsg)
-}
diff --git a/lnd/fuzz/wtwire/create_session_reply.go b/lnd/fuzz/wtwire/create_session_reply.go
deleted file mode 100644
index 673fa91b..00000000
--- a/lnd/fuzz/wtwire/create_session_reply.go
+++ /dev/null
@@ -1,20 +0,0 @@
-// +build gofuzz
-
-package wtwirefuzz
-
-import (
- "github.com/pkt-cash/pktd/lnd/watchtower/wtwire"
-)
-
-// Fuzz_create_session_reply is used by go-fuzz.
-func Fuzz_create_session_reply(data []byte) int {
- // Prefix with MsgCreateSessionReply.
- data = prefixWithMsgType(data, wtwire.MsgCreateSessionReply)
-
- // Create an empty message so that the FuzzHarness func can check if the
- // max payload constraint is violated.
- emptyMsg := wtwire.CreateSessionReply{}
-
- // Pass the message into our general fuzz harness for wire messages!
- return harness(data, &emptyMsg)
-}
diff --git a/lnd/fuzz/wtwire/delete_session.go b/lnd/fuzz/wtwire/delete_session.go
deleted file mode 100644
index a47b437c..00000000
--- a/lnd/fuzz/wtwire/delete_session.go
+++ /dev/null
@@ -1,20 +0,0 @@
-// +build gofuzz
-
-package wtwirefuzz
-
-import (
- "github.com/pkt-cash/pktd/lnd/watchtower/wtwire"
-)
-
-// Fuzz_delete_session is used by go-fuzz.
-func Fuzz_delete_session(data []byte) int {
- // Prefix with MsgDeleteSession.
- data = prefixWithMsgType(data, wtwire.MsgDeleteSession)
-
- // Create an empty message so that the FuzzHarness func can check if the
- // max payload constraint is violated.
- emptyMsg := wtwire.DeleteSession{}
-
- // Pass the message into our general fuzz harness for wire messages!
- return harness(data, &emptyMsg)
-}
diff --git a/lnd/fuzz/wtwire/delete_session_reply.go b/lnd/fuzz/wtwire/delete_session_reply.go
deleted file mode 100644
index 1ecb10f0..00000000
--- a/lnd/fuzz/wtwire/delete_session_reply.go
+++ /dev/null
@@ -1,20 +0,0 @@
-// +build gofuzz
-
-package wtwirefuzz
-
-import (
- "github.com/pkt-cash/pktd/lnd/watchtower/wtwire"
-)
-
-// Fuzz_delete_session_reply is used by go-fuzz.
-func Fuzz_delete_session_reply(data []byte) int {
- // Prefix with MsgDeleteSessionReply.
- data = prefixWithMsgType(data, wtwire.MsgDeleteSessionReply)
-
- // Create an empty message so that the FuzzHarness func can check if the
- // max payload constraint is violated.
- emptyMsg := wtwire.DeleteSessionReply{}
-
- // Pass the message into our general fuzz harness for wire messages!
- return harness(data, &emptyMsg)
-}
diff --git a/lnd/fuzz/wtwire/error.go b/lnd/fuzz/wtwire/error.go
deleted file mode 100644
index ffb95723..00000000
--- a/lnd/fuzz/wtwire/error.go
+++ /dev/null
@@ -1,20 +0,0 @@
-// +build gofuzz
-
-package wtwirefuzz
-
-import (
- "github.com/pkt-cash/pktd/lnd/watchtower/wtwire"
-)
-
-// Fuzz_error is used by go-fuzz.
-func Fuzz_error(data []byte) int {
- // Prefix with MsgError.
- data = prefixWithMsgType(data, wtwire.MsgError)
-
- // Create an empty message so that the FuzzHarness func can check if the
- // max payload constraint is violated.
- emptyMsg := wtwire.Error{}
-
- // Pass the message into our general fuzz harness for wire messages!
- return harness(data, &emptyMsg)
-}
diff --git a/lnd/fuzz/wtwire/fuzz_utils.go b/lnd/fuzz/wtwire/fuzz_utils.go
deleted file mode 100644
index 36f31a34..00000000
--- a/lnd/fuzz/wtwire/fuzz_utils.go
+++ /dev/null
@@ -1,75 +0,0 @@
-// +build gofuzz
-
-package wtwirefuzz
-
-import (
- "bytes"
- "encoding/binary"
- "fmt"
- "reflect"
-
- "github.com/pkt-cash/pktd/lnd/watchtower/wtwire"
-)
-
-// prefixWithMsgType takes []byte and adds a wire protocol prefix
-// to make the []byte into an actual message to be used in fuzzing.
-func prefixWithMsgType(data []byte, prefix wtwire.MessageType) []byte {
- var prefixBytes [2]byte
- binary.BigEndian.PutUint16(prefixBytes[:], uint16(prefix))
- data = append(prefixBytes[:], data...)
- return data
-}
-
-// harness performs the actual fuzz testing of the appropriate wire message.
-// This function will check that the passed-in message passes wire length checks,
-// is a valid message once deserialized, and passes a sequence of serialization
-// and deserialization checks. Returns an int that determines whether the input
-// is unique or not.
-func harness(data []byte, emptyMsg wtwire.Message) int {
- // Create a reader with the byte array.
- r := bytes.NewReader(data)
-
- // Make sure byte array length (excluding 2 bytes for message type) is
- // less than max payload size for the wire message. We check this because
- // otherwise `go-fuzz` will keep creating inputs that crash on ReadMessage
- // due to a large message size.
- payloadLen := uint32(len(data)) - 2
- if payloadLen > emptyMsg.MaxPayloadLength(0) {
- // Ignore this input - max payload constraint violated.
- return 1
- }
-
- msg, err := wtwire.ReadMessage(r, 0)
- if err != nil {
- // go-fuzz generated []byte that cannot be represented as a
- // wire message but we will return 0 so go-fuzz can modify the
- // input.
- return 1
- }
-
- // We will serialize the message into a new bytes buffer.
- var b bytes.Buffer
- if _, err := wtwire.WriteMessage(&b, msg, 0); err != nil {
- // Could not serialize message into bytes buffer, panic.
- panic(err)
- }
-
- // Deserialize the message from the serialized bytes buffer, and then
- // assert that the original message is equal to the newly deserialized
- // message.
- newMsg, err := wtwire.ReadMessage(&b, 0)
- if err != nil {
- // Could not deserialize message from bytes buffer, panic.
- panic(err)
- }
-
- if !reflect.DeepEqual(msg, newMsg) {
- // Deserialized message and original message are not
- // deeply equal.
- panic(er.Errorf("deserialized message and original message " +
- "are not deeply equal."))
- }
-
- // Add this input to the corpus.
- return 1
-}
diff --git a/lnd/fuzz/wtwire/init.go b/lnd/fuzz/wtwire/init.go
deleted file mode 100644
index ca302e8f..00000000
--- a/lnd/fuzz/wtwire/init.go
+++ /dev/null
@@ -1,20 +0,0 @@
-// +build gofuzz
-
-package wtwirefuzz
-
-import (
- "github.com/pkt-cash/pktd/lnd/watchtower/wtwire"
-)
-
-// Fuzz_init is used by go-fuzz.
-func Fuzz_init(data []byte) int {
- // Prefix with MsgInit.
- data = prefixWithMsgType(data, wtwire.MsgInit)
-
- // Create an empty message so that the FuzzHarness func can check if the
- // max payload constraint is violated.
- emptyMsg := wtwire.Init{}
-
- // Pass the message into our general fuzz harness for wire messages!
- return harness(data, &emptyMsg)
-}
diff --git a/lnd/fuzz/wtwire/state_update.go b/lnd/fuzz/wtwire/state_update.go
deleted file mode 100644
index c1d3bd9b..00000000
--- a/lnd/fuzz/wtwire/state_update.go
+++ /dev/null
@@ -1,20 +0,0 @@
-// +build gofuzz
-
-package wtwirefuzz
-
-import (
- "github.com/pkt-cash/pktd/lnd/watchtower/wtwire"
-)
-
-// Fuzz_state_update is used by go-fuzz.
-func Fuzz_state_update(data []byte) int {
- // Prefix with MsgStateUpdate.
- data = prefixWithMsgType(data, wtwire.MsgStateUpdate)
-
- // Create an empty message so that the FuzzHarness func can check if the
- // max payload constraint is violated.
- emptyMsg := wtwire.StateUpdate{}
-
- // Pass the message into our general fuzz harness for wire messages!
- return harness(data, &emptyMsg)
-}
diff --git a/lnd/fuzz/wtwire/state_update_reply.go b/lnd/fuzz/wtwire/state_update_reply.go
deleted file mode 100644
index 1621325a..00000000
--- a/lnd/fuzz/wtwire/state_update_reply.go
+++ /dev/null
@@ -1,20 +0,0 @@
-// +build gofuzz
-
-package wtwirefuzz
-
-import (
- "github.com/pkt-cash/pktd/lnd/watchtower/wtwire"
-)
-
-// Fuzz_state_update_reply is used by go-fuzz.
-func Fuzz_state_update_reply(data []byte) int {
- // Prefix with MsgStateUpdateReply.
- data = prefixWithMsgType(data, wtwire.MsgStateUpdateReply)
-
- // Create an empty message so that the FuzzHarness func can check if the
- // max payload constraint is violated.
- emptyMsg := wtwire.StateUpdateReply{}
-
- // Pass the message into our general fuzz harness for wire messages!
- return harness(data, &emptyMsg)
-}
diff --git a/lnd/fuzz/zpay32/decode.go b/lnd/fuzz/zpay32/decode.go
deleted file mode 100644
index 553acd4c..00000000
--- a/lnd/fuzz/zpay32/decode.go
+++ /dev/null
@@ -1,22 +0,0 @@
-// +build gofuzz
-
-package zpay32fuzz
-
-import (
- "github.com/pkt-cash/pktd/chaincfg"
- "github.com/pkt-cash/pktd/lnd/zpay32"
-)
-
-// Fuzz_decode is used by go-fuzz.
-func Fuzz_decode(data []byte) int {
- inv, err := zpay32.Decode(string(data), &chaincfg.TestNet3Params)
- if err != nil {
- return 1
- }
-
- // Call these functions as a sanity check to make sure the invoice
- // is well-formed.
- _ = inv.MinFinalCLTVExpiry()
- _ = inv.Expiry()
- return 1
-}
diff --git a/lnd/fuzz/zpay32/encode.go b/lnd/fuzz/zpay32/encode.go
deleted file mode 100644
index 944c6de8..00000000
--- a/lnd/fuzz/zpay32/encode.go
+++ /dev/null
@@ -1,49 +0,0 @@
-// +build gofuzz
-
-package zpay32fuzz
-
-import (
- "encoding/hex"
- "fmt"
-
- "github.com/pkt-cash/pktd/btcec"
- "github.com/pkt-cash/pktd/chaincfg"
- "github.com/pkt-cash/pktd/lnd/zpay32"
-)
-
-// Fuzz_encode is used by go-fuzz.
-func Fuzz_encode(data []byte) int {
- inv, err := zpay32.Decode(string(data), &chaincfg.TestNet3Params)
- if err != nil {
- return 1
- }
-
- // Call these functions as a sanity check to make sure the invoice
- // is well-formed.
- _ = inv.MinFinalCLTVExpiry()
- _ = inv.Expiry()
-
- // Initialize the static key we will be using for this fuzz test.
- testPrivKeyBytes, _ := util.DecodeHex("e126f68f7eafcc8b74f54d269fe206be715000f94dac067d1c04a8ca3b2db734")
- testPrivKey, _ := btcec.PrivKeyFromBytes(btcec.S256(), testPrivKeyBytes)
-
- // Then, initialize the testMessageSigner so we can encode out
- // invoices with this private key.
- testMessageSigner := zpay32.MessageSigner{
- SignCompact: func(hash []byte) ([]byte, er.R) {
- sig, err := btcec.SignCompact(btcec.S256(),
- testPrivKey, hash, true)
- if err != nil {
- return nil, er.Errorf("can't sign the "+
- "message: %v", err)
- }
- return sig, nil
- },
- }
- _, err = inv.Encode(testMessageSigner)
- if err != nil {
- return 1
- }
-
- return 1
-}
diff --git a/lnd/healthcheck/diskcheck.go b/lnd/healthcheck/diskcheck.go
deleted file mode 100644
index b57ceccb..00000000
--- a/lnd/healthcheck/diskcheck.go
+++ /dev/null
@@ -1,37 +0,0 @@
-// +build !windows,!solaris,!netbsd,!openbsd
-
-package healthcheck
-
-import (
- "syscall"
-
- "github.com/pkt-cash/pktd/btcutil/er"
-)
-
-// AvailableDiskSpaceRatio returns ratio of available disk space to total
-// capacity.
-func AvailableDiskSpaceRatio(path string) (float64, er.R) {
- s := syscall.Statfs_t{}
- err := syscall.Statfs(path, &s)
- if err != nil {
- return 0, er.E(err)
- }
-
- // Calculate our free blocks/total blocks to get our total ratio of
- // free blocks.
- return float64(s.Bfree) / float64(s.Blocks), nil
-}
-
-// AvailableDiskSpace returns the available disk space in bytes of the given
-// file system.
-func AvailableDiskSpace(path string) (uint64, er.R) {
- s := syscall.Statfs_t{}
- err := syscall.Statfs(path, &s)
- if err != nil {
- return 0, er.E(err)
- }
-
- // Some OSes have s.Bavail defined as int64, others as uint64, so we
- // need the explicit type conversion here.
- return uint64(s.Bavail) * uint64(s.Bsize), nil // nolint:unconvert
-}
diff --git a/lnd/healthcheck/diskcheck_netbsd.go b/lnd/healthcheck/diskcheck_netbsd.go
deleted file mode 100644
index ef4dab2d..00000000
--- a/lnd/healthcheck/diskcheck_netbsd.go
+++ /dev/null
@@ -1,29 +0,0 @@
-package healthcheck
-
-import "golang.org/x/sys/unix"
-
-// AvailableDiskSpaceRatio returns ratio of available disk space to total
-// capacity for netbsd.
-func AvailableDiskSpaceRatio(path string) (float64, er.R) {
- s := unix.Statvfs_t{}
- err := unix.Statvfs(path, &s)
- if err != nil {
- return 0, err
- }
-
- // Calculate our free blocks/total blocks to get our total ratio of
- // free blocks.
- return float64(s.Bfree) / float64(s.Blocks), nil
-}
-
-// AvailableDiskSpace returns the available disk space in bytes of the given
-// file system for netbsd.
-func AvailableDiskSpace(path string) (uint64, er.R) {
- s := unix.Statvfs_t{}
- err := unix.Statvfs(path, &s)
- if err != nil {
- return 0, err
- }
-
- return s.Bavail * uint64(s.Bsize), nil
-}
diff --git a/lnd/healthcheck/diskcheck_openbsd.go b/lnd/healthcheck/diskcheck_openbsd.go
deleted file mode 100644
index ee5c7636..00000000
--- a/lnd/healthcheck/diskcheck_openbsd.go
+++ /dev/null
@@ -1,29 +0,0 @@
-package healthcheck
-
-import "golang.org/x/sys/unix"
-
-// AvailableDiskSpaceRatio returns ratio of available disk space to total
-// capacity for openbsd.
-func AvailableDiskSpaceRatio(path string) (float64, er.R) {
- s := unix.Statfs_t{}
- err := unix.Statfs(path, &s)
- if err != nil {
- return 0, err
- }
-
- // Calculate our free blocks/total blocks to get our total ratio of
- // free blocks.
- return float64(s.F_bfree) / float64(s.F_blocks), nil
-}
-
-// AvailableDiskSpace returns the available disk space in bytes of the given
-// file system for openbsd.
-func AvailableDiskSpace(path string) (uint64, er.R) {
- s := unix.Statfs_t{}
- err := unix.Statfs(path, &s)
- if err != nil {
- return 0, err
- }
-
- return uint64(s.F_bavail) * uint64(s.F_bsize), nil
-}
diff --git a/lnd/healthcheck/diskcheck_solaris.go b/lnd/healthcheck/diskcheck_solaris.go
deleted file mode 100644
index 34fd2251..00000000
--- a/lnd/healthcheck/diskcheck_solaris.go
+++ /dev/null
@@ -1,29 +0,0 @@
-package healthcheck
-
-import "golang.org/x/sys/unix"
-
-// AvailableDiskSpaceRatio returns ratio of available disk space to total
-// capacity for solaris.
-func AvailableDiskSpaceRatio(path string) (float64, er.R) {
- s := unix.Statvfs_t{}
- err := unix.Statvfs(path, &s)
- if err != nil {
- return 0, err
- }
-
- // Calculate our free blocks/total blocks to get our total ratio of
- // free blocks.
- return float64(s.Bfree) / float64(s.Blocks), nil
-}
-
-// AvailableDiskSpace returns the available disk space in bytes of the given
-// file system for solaris.
-func AvailableDiskSpace(path string) (uint64, er.R) {
- s := unix.Statvfs_t{}
- err := unix.Statvfs(path, &s)
- if err != nil {
- return 0, err
- }
-
- return s.Bavail * uint64(s.Bsize), nil
-}
diff --git a/lnd/healthcheck/diskcheck_windows.go b/lnd/healthcheck/diskcheck_windows.go
deleted file mode 100644
index c4fbe4ed..00000000
--- a/lnd/healthcheck/diskcheck_windows.go
+++ /dev/null
@@ -1,34 +0,0 @@
-package healthcheck
-
-import (
- "github.com/pkt-cash/pktd/btcutil/er"
- "golang.org/x/sys/windows"
-)
-
-// AvailableDiskSpaceRatio returns ratio of available disk space to total
-// capacity for windows.
-func AvailableDiskSpaceRatio(path string) (float64, er.R) {
- var free, total, avail uint64
-
- pathPtr, err := windows.UTF16PtrFromString(path)
- if err != nil {
- return 0, er.E(err)
- }
- err = windows.GetDiskFreeSpaceEx(pathPtr, &free, &total, &avail)
-
- return float64(avail) / float64(total), nil
-}
-
-// AvailableDiskSpace returns the available disk space in bytes of the given
-// file system for windows.
-func AvailableDiskSpace(path string) (uint64, er.R) {
- var free, total, avail uint64
-
- pathPtr, err := windows.UTF16PtrFromString(path)
- if err != nil {
- return 0, er.E(err)
- }
- err = windows.GetDiskFreeSpaceEx(pathPtr, &free, &total, &avail)
-
- return avail, nil
-}
diff --git a/lnd/healthcheck/healthcheck.go b/lnd/healthcheck/healthcheck.go
deleted file mode 100644
index bc437e85..00000000
--- a/lnd/healthcheck/healthcheck.go
+++ /dev/null
@@ -1,231 +0,0 @@
-// Package healthcheck contains a monitor which takes a set of liveliness checks
-// which it periodically checks. If a check fails after its configured number
-// of allowed call attempts, the monitor will send a request to shutdown using
-// the function is is provided in its config. Checks are dispatched in their own
-// goroutines so that they do not block each other.
-package healthcheck
-
-import (
- "sync"
- "sync/atomic"
- "time"
-
- "github.com/pkt-cash/pktd/btcutil/er"
- "github.com/pkt-cash/pktd/lnd/ticker"
- "github.com/pkt-cash/pktd/pktlog/log"
-)
-
-// Config contains configuration settings for our monitor.
-type Config struct {
- // Checks is a set of health checks that assert that lnd has access to
- // critical resources.
- Checks []*Observation
-
- // Shutdown should be called to request safe shutdown on failure of a
- // health check.
- Shutdown shutdownFunc
-}
-
-// shutdownFunc is the signature we use for a shutdown function which allows us
-// to print our reason for shutdown.
-type shutdownFunc func(format string, params ...interface{})
-
-// Monitor periodically checks a series of configured liveliness checks to
-// ensure that lnd has access to all critical resources.
-type Monitor struct {
- started int32 // To be used atomically.
- stopped int32 // To be used atomically.
-
- cfg *Config
-
- quit chan struct{}
- wg sync.WaitGroup
-}
-
-// NewMonitor returns a monitor with the provided config.
-func NewMonitor(cfg *Config) *Monitor {
- return &Monitor{
- cfg: cfg,
- quit: make(chan struct{}),
- }
-}
-
-// Start launches the goroutines required to run our monitor.
-func (m *Monitor) Start() er.R {
- if !atomic.CompareAndSwapInt32(&m.started, 0, 1) {
- return er.New("monitor already started")
- }
-
- // Run through all of the health checks that we have configured and
- // start a goroutine for each check.
- for _, check := range m.cfg.Checks {
- check := check
-
- // Skip over health checks that are disabled by setting zero
- // attempts.
- if check.Attempts == 0 {
- log.Warnf("check: %v configured with 0 attempts, "+
- "skipping it", check.Name)
-
- continue
- }
-
- m.wg.Add(1)
- go func() {
- defer m.wg.Done()
- check.monitor(m.cfg.Shutdown, m.quit)
- }()
- }
-
- return nil
-}
-
-// Stop sends all goroutines the signal to exit and waits for them to exit.
-func (m *Monitor) Stop() er.R {
- if !atomic.CompareAndSwapInt32(&m.stopped, 0, 1) {
- return er.Errorf("monitor already stopped")
- }
-
- close(m.quit)
- m.wg.Wait()
-
- return nil
-}
-
-// CreateCheck is a helper function that takes a function that produces an error
-// and wraps it in a function that returns its result on an error channel.
-// We do not wait group the goroutine running our checkFunc because we expect
-// to be dealing with health checks that may block; if we wait group them, we
-// may wait forever. Ideally future health checks will allow callers to cancel
-// them early, and we can wait group this.
-func CreateCheck(checkFunc func() er.R) func() chan er.R {
- return func() chan er.R {
- errChan := make(chan er.R, 1)
- go func() {
- errChan <- checkFunc()
- }()
-
- return errChan
- }
-}
-
-// Observation represents a liveliness check that we periodically check.
-type Observation struct {
- // Name describes the health check.
- Name string
-
- // Check runs the health check itself, returning an error channel that
- // is expected to receive nil or an error.
- Check func() chan er.R
-
- // Interval is a ticker which triggers running our check function. This
- // ticker must be started and stopped by the observation.
- Interval ticker.Ticker
-
- // Attempts is the number of calls we make for a single check before
- // failing.
- Attempts int
-
- // Timeout is the amount of time we allow our check function to take
- // before we time it out.
- Timeout time.Duration
-
- // Backoff is the amount of time we back off between retries for failed
- // checks.
- Backoff time.Duration
-}
-
-// NewObservation creates an observation.
-func NewObservation(name string, check func() er.R, interval,
- timeout, backoff time.Duration, attempts int) *Observation {
-
- return &Observation{
- Name: name,
- Check: CreateCheck(check),
- Interval: ticker.New(interval),
- Attempts: attempts,
- Timeout: timeout,
- Backoff: backoff,
- }
-}
-
-// String returns a string representation of an observation.
-func (o *Observation) String() string {
- return o.Name
-}
-
-// monitor executes a health check every time its interval ticks until the quit
-// channel signals that we should shutdown. This function is also responsible
-// for starting and stopping our ticker.
-func (o *Observation) monitor(shutdown shutdownFunc, quit chan struct{}) {
- log.Debugf("Monitoring: %v", o)
-
- o.Interval.Resume()
- defer o.Interval.Stop()
-
- for {
- select {
- case <-o.Interval.Ticks():
- o.retryCheck(quit, shutdown)
-
- // Exit if we receive the instruction to shutdown.
- case <-quit:
- return
- }
- }
-}
-
-// retryCheck calls a check function until it succeeds, or we reach our
-// configured number of attempts, waiting for our back off period between failed
-// calls. If we fail to obtain a passing health check after the allowed number
-// of calls, we will request shutdown.
-func (o *Observation) retryCheck(quit chan struct{}, shutdown shutdownFunc) {
- var count int
-
- for count < o.Attempts {
- // Increment our call count and call the health check endpoint.
- count++
-
- // Wait for our check to return, timeout to elapse, or quit
- // signal to be received.
- var err er.R
- select {
- case err = <-o.Check():
-
- case <-time.After(o.Timeout):
- err = er.Errorf("health check: %v timed out after: "+
- "%v", o, o.Timeout)
-
- case <-quit:
- return
- }
-
- // If our error is nil, we have passed our health check, so we
- // can exit.
- if err == nil {
- return
- }
-
- // If we have reached our allowed number of attempts, this
- // check has failed so we request shutdown.
- if count == o.Attempts {
- shutdown("Health check: %v failed after %v "+
- "calls", o, o.Attempts)
-
- return
- }
-
- log.Infof("Health check: %v, call: %v failed with: %v, "+
- "backing off for: %v", o, count, err, o.Backoff)
-
- // If we are still within the number of calls allowed for this
- // check, we wait for our back off period to elapse, or exit if
- // we get the signal to shutdown.
- select {
- case <-time.After(o.Backoff):
-
- case <-quit:
- return
- }
- }
-}
diff --git a/lnd/healthcheck/healthcheck_test.go b/lnd/healthcheck/healthcheck_test.go
deleted file mode 100644
index 48514e88..00000000
--- a/lnd/healthcheck/healthcheck_test.go
+++ /dev/null
@@ -1,226 +0,0 @@
-package healthcheck
-
-import (
- "testing"
- "time"
-
- "github.com/pkt-cash/pktd/btcutil/er"
- "github.com/pkt-cash/pktd/btcutil/util"
- "github.com/pkt-cash/pktd/lnd/ticker"
- "github.com/stretchr/testify/require"
-)
-
-var (
- errNonNil = er.GenericErrorType.CodeWithDetail("errNonNil", "non-nil test error")
- timeout = time.Second
- testTime = time.Unix(1, 2)
-)
-
-type mockedCheck struct {
- t *testing.T
- errChan chan er.R
-}
-
-// newMockCheck creates a new mock.
-func newMockCheck(t *testing.T) *mockedCheck {
- return &mockedCheck{
- t: t,
- errChan: make(chan er.R),
- }
-}
-
-// call returns our mock's error channel, which we can send responses on.
-func (m *mockedCheck) call() chan er.R {
- return m.errChan
-}
-
-// sendError sends an error into our mock's error channel, mocking the sending
-// of a response from our check function.
-func (m *mockedCheck) sendError(err er.R) {
- select {
- case m.errChan <- err:
- case <-time.After(timeout):
- m.t.Fatalf("could not send error: %v", err)
- }
-}
-
-// TestMonitor tests creation and triggering of a monitor with a health check.
-func TestMonitor(t *testing.T) {
- intervalTicker := ticker.NewForce(time.Hour)
-
- mock := newMockCheck(t)
- shutdown := make(chan struct{})
-
- // Create our config for monitoring. We will use a 0 back off so that
- // out test does not need to wait.
- cfg := &Config{
- Checks: []*Observation{
- {
- Check: mock.call,
- Interval: intervalTicker,
- Attempts: 2,
- Backoff: 0,
- Timeout: time.Hour,
- },
- },
- Shutdown: func(string, ...interface{}) {
- shutdown <- struct{}{}
- },
- }
- monitor := NewMonitor(cfg)
-
- util.RequireNoErr(t, monitor.Start(), "could not start monitor")
-
- // Tick is a helper we will use to tick our interval.
- tick := func() {
- select {
- case intervalTicker.Force <- testTime:
- case <-time.After(timeout):
- t.Fatal("could not tick timer")
- }
- }
-
- // Tick our timer and provide our error channel with a nil error. This
- // mocks our check function succeeding on the first call.
- tick()
- mock.sendError(nil)
-
- // Now we tick our timer again. This time send a non-nil error, followed
- // by a nil error. This tests our retry logic, because we allow 2
- // retries, so should recover without needing to shutdown.
- tick()
- mock.sendError(errNonNil.Default())
- mock.sendError(nil)
-
- // Finally, we tick our timer once more, and send two non-nil errors
- // into our error channel. This mocks our check function failing twice.
- tick()
- mock.sendError(errNonNil.Default())
- mock.sendError(errNonNil.Default())
-
- // Since we have failed within our allowed number of retries, we now
- // expect a call to our shutdown function.
- select {
- case <-shutdown:
- case <-time.After(timeout):
- t.Fatal("expected shutdown")
- }
-
- util.RequireNoErr(t, monitor.Stop(), "could not stop monitor")
-}
-
-// TestRetryCheck tests our retry logic. It does not include a test for exiting
-// during the back off period.
-func TestRetryCheck(t *testing.T) {
- tests := []struct {
- name string
-
- // errors provides an in-order list of errors that we expect our
- // health check to respond with. The number of errors in this
- // list indicates the number of times we expect our check to
- // be called, because our test will fail if we do not consume
- // every error.
- errors []er.R
-
- // attempts is the number of times we call a check before
- // failing.
- attempts int
-
- // timeout is the time we allow our check to take before we
- // fail them.
- timeout time.Duration
-
- // expectedShutdown is true if we expect a shutdown to be
- // triggered because all of our calls failed.
- expectedShutdown bool
- }{
- {
- name: "first call succeeds",
- errors: []er.R{nil},
- attempts: 2,
- timeout: time.Hour,
- expectedShutdown: false,
- },
- {
- name: "first call fails",
- errors: []er.R{errNonNil.Default()},
- attempts: 1,
- timeout: time.Hour,
- expectedShutdown: true,
- },
- {
- name: "fail then recover",
- errors: []er.R{errNonNil.Default(), nil},
- attempts: 2,
- timeout: time.Hour,
- expectedShutdown: false,
- },
- {
- name: "always fail",
- errors: []er.R{errNonNil.Default(), errNonNil.Default()},
- attempts: 2,
- timeout: time.Hour,
- expectedShutdown: true,
- },
- {
- name: "no calls",
- errors: nil,
- attempts: 0,
- timeout: time.Hour,
- expectedShutdown: false,
- },
- {
- name: "call times out",
- errors: nil,
- attempts: 1,
- timeout: 1,
- expectedShutdown: true,
- },
- }
-
- for _, test := range tests {
- test := test
-
- t.Run(test.name, func(t *testing.T) {
- var shutdown bool
- shutdownFunc := func(string, ...interface{}) {
- shutdown = true
- }
-
- mock := newMockCheck(t)
-
- // Create an observation that calls our call counting
- // function. We set a zero back off so that the test
- // will not wait.
- observation := &Observation{
- Check: mock.call,
- Attempts: test.attempts,
- Timeout: test.timeout,
- Backoff: 0,
- }
- quit := make(chan struct{})
-
- // Run our retry check in a goroutine because it blocks
- // on us sending errors into the mocked caller's error
- // channel.
- done := make(chan struct{})
- go func() {
- observation.retryCheck(quit, shutdownFunc)
- close(done)
- }()
-
- // Prompt our mock caller to send responses for calls
- // to our call function.
- for _, err := range test.errors {
- mock.sendError(err)
- }
-
- // Make sure that we have finished running our retry
- // check function before we start checking results.
- <-done
-
- require.Equal(t, test.expectedShutdown, shutdown,
- "unexpected shutdown state")
- })
- }
-}
diff --git a/lnd/htlcswitch/circuit.go b/lnd/htlcswitch/circuit.go
deleted file mode 100644
index c48a11f6..00000000
--- a/lnd/htlcswitch/circuit.go
+++ /dev/null
@@ -1,232 +0,0 @@
-package htlcswitch
-
-import (
- "encoding/binary"
- "io"
-
- "github.com/pkt-cash/pktd/btcutil/er"
- "github.com/pkt-cash/pktd/btcutil/util"
- "github.com/pkt-cash/pktd/lnd/channeldb"
- "github.com/pkt-cash/pktd/lnd/htlcswitch/hop"
- "github.com/pkt-cash/pktd/lnd/lnwire"
-)
-
-// EmptyCircuitKey is a default value for an outgoing circuit key returned when
-// a circuit's keystone has not been set. Note that this value is invalid for
-// use as a keystone, since the outgoing channel id can never be equal to
-// sourceHop.
-var EmptyCircuitKey CircuitKey
-
-// CircuitKey is a tuple of channel ID and HTLC ID, used to uniquely identify
-// HTLCs in a circuit. Circuits are identified primarily by the circuit key of
-// the incoming HTLC. However, a circuit may also be referenced by its outgoing
-// circuit key after the HTLC has been forwarded via the outgoing link.
-type CircuitKey = channeldb.CircuitKey
-
-// PaymentCircuit is used by the switch as placeholder between when the
-// switch makes a forwarding decision and the outgoing link determines the
-// proper HTLC ID for the local log. After the outgoing HTLC ID has been
-// determined, the half circuit will be converted into a full PaymentCircuit.
-type PaymentCircuit struct {
- // AddRef is the forward reference of the Add update in the incoming
- // link's forwarding package. This value is set on the htlcPacket of the
- // returned settle/fail so that it can be removed from disk.
- AddRef channeldb.AddRef
-
- // Incoming is the circuit key identifying the incoming channel and htlc
- // index from which this ADD originates.
- Incoming CircuitKey
-
- // Outgoing is the circuit key identifying the outgoing channel, and the
- // HTLC index that was used to forward the ADD. It will be nil if this
- // circuit's keystone has not been set.
- Outgoing *CircuitKey
-
- // PaymentHash used as unique identifier of payment.
- PaymentHash [32]byte
-
- // IncomingAmount is the value of the HTLC from the incoming link.
- IncomingAmount lnwire.MilliSatoshi
-
- // OutgoingAmount specifies the value of the HTLC leaving the switch,
- // either as a payment or forwarded amount.
- OutgoingAmount lnwire.MilliSatoshi
-
- // ErrorEncrypter is used to re-encrypt the onion failure before
- // sending it back to the originator of the payment.
- ErrorEncrypter hop.ErrorEncrypter
-
- // LoadedFromDisk is set true for any circuits loaded after the circuit
- // map is reloaded from disk.
- //
- // NOTE: This value is determined implicitly during a restart. It is not
- // persisted, and should never be set outside the circuit map.
- LoadedFromDisk bool
-}
-
-// HasKeystone returns true if an outgoing link has assigned this circuit's
-// outgoing circuit key.
-func (c *PaymentCircuit) HasKeystone() bool {
- return c.Outgoing != nil
-}
-
-// newPaymentCircuit initializes a payment circuit on the heap using the payment
-// hash and an in-memory htlc packet.
-func newPaymentCircuit(hash *[32]byte, pkt *htlcPacket) *PaymentCircuit {
- var addRef channeldb.AddRef
- if pkt.sourceRef != nil {
- addRef = *pkt.sourceRef
- }
-
- return &PaymentCircuit{
- AddRef: addRef,
- Incoming: CircuitKey{
- ChanID: pkt.incomingChanID,
- HtlcID: pkt.incomingHTLCID,
- },
- PaymentHash: *hash,
- IncomingAmount: pkt.incomingAmount,
- OutgoingAmount: pkt.amount,
- ErrorEncrypter: pkt.obfuscator,
- }
-}
-
-// makePaymentCircuit initializes a payment circuit on the stack using the
-// payment hash and an in-memory htlc packet.
-func makePaymentCircuit(hash *[32]byte, pkt *htlcPacket) PaymentCircuit {
- var addRef channeldb.AddRef
- if pkt.sourceRef != nil {
- addRef = *pkt.sourceRef
- }
-
- return PaymentCircuit{
- AddRef: addRef,
- Incoming: CircuitKey{
- ChanID: pkt.incomingChanID,
- HtlcID: pkt.incomingHTLCID,
- },
- PaymentHash: *hash,
- IncomingAmount: pkt.incomingAmount,
- OutgoingAmount: pkt.amount,
- ErrorEncrypter: pkt.obfuscator,
- }
-}
-
-// Encode writes a PaymentCircuit to the provided io.Writer.
-func (c *PaymentCircuit) Encode(w io.Writer) er.R {
- if err := c.AddRef.Encode(w); err != nil {
- return err
- }
-
- if err := c.Incoming.Encode(w); err != nil {
- return err
- }
-
- if _, err := util.Write(w, c.PaymentHash[:]); err != nil {
- return err
- }
-
- var scratch [8]byte
-
- binary.BigEndian.PutUint64(scratch[:], uint64(c.IncomingAmount))
- if _, err := util.Write(w, scratch[:]); err != nil {
- return err
- }
-
- binary.BigEndian.PutUint64(scratch[:], uint64(c.OutgoingAmount))
- if _, err := util.Write(w, scratch[:]); err != nil {
- return err
- }
-
- // Defaults to EncrypterTypeNone.
- var encrypterType hop.EncrypterType
- if c.ErrorEncrypter != nil {
- encrypterType = c.ErrorEncrypter.Type()
- }
-
- err := util.WriteBin(w, binary.BigEndian, encrypterType)
- if err != nil {
- return err
- }
-
- // Skip encoding of error encrypter if this half add does not have one.
- if encrypterType == hop.EncrypterTypeNone {
- return nil
- }
-
- return c.ErrorEncrypter.Encode(w)
-}
-
-// Decode reads a PaymentCircuit from the provided io.Reader.
-func (c *PaymentCircuit) Decode(r io.Reader) er.R {
- if err := c.AddRef.Decode(r); err != nil {
- return err
- }
-
- if err := c.Incoming.Decode(r); err != nil {
- return err
- }
-
- if _, err := util.ReadFull(r, c.PaymentHash[:]); err != nil {
- return err
- }
-
- var scratch [8]byte
-
- if _, err := util.ReadFull(r, scratch[:]); err != nil {
- return err
- }
- c.IncomingAmount = lnwire.MilliSatoshi(
- binary.BigEndian.Uint64(scratch[:]))
-
- if _, err := util.ReadFull(r, scratch[:]); err != nil {
- return err
- }
- c.OutgoingAmount = lnwire.MilliSatoshi(
- binary.BigEndian.Uint64(scratch[:]))
-
- // Read the encrypter type used for this circuit.
- var encrypterType hop.EncrypterType
- err := util.ReadBin(r, binary.BigEndian, &encrypterType)
- if err != nil {
- return err
- }
-
- switch encrypterType {
- case hop.EncrypterTypeNone:
- // No encrypter was provided, such as when the payment is
- // locally initiated.
- return nil
-
- case hop.EncrypterTypeSphinx:
- // Sphinx encrypter was used as this is a forwarded HTLC.
- c.ErrorEncrypter = hop.NewSphinxErrorEncrypter()
-
- case hop.EncrypterTypeMock:
- // Test encrypter.
- c.ErrorEncrypter = NewMockObfuscator()
-
- default:
- return ErrUnknownEncrypterType.Default()
- }
-
- return c.ErrorEncrypter.Decode(r)
-}
-
-// InKey returns the primary identifier for the circuit corresponding to the
-// incoming HTLC.
-func (c *PaymentCircuit) InKey() CircuitKey {
- return c.Incoming
-}
-
-// OutKey returns the keystone identifying the outgoing link and HTLC ID. If the
-// circuit hasn't been completed, this method returns an EmptyKeystone, which is
-// an invalid outgoing circuit key. Only call this method if HasKeystone returns
-// true.
-func (c *PaymentCircuit) OutKey() CircuitKey {
- if c.Outgoing != nil {
- return *c.Outgoing
- }
-
- return EmptyCircuitKey
-}
diff --git a/lnd/htlcswitch/circuit_map.go b/lnd/htlcswitch/circuit_map.go
deleted file mode 100644
index 8cdb4f9c..00000000
--- a/lnd/htlcswitch/circuit_map.go
+++ /dev/null
@@ -1,956 +0,0 @@
-package htlcswitch
-
-import (
- "bytes"
- "fmt"
- "sync"
-
- "github.com/davecgh/go-spew/spew"
- "github.com/pkt-cash/pktd/btcutil/er"
- "github.com/pkt-cash/pktd/lnd/channeldb"
- "github.com/pkt-cash/pktd/lnd/channeldb/kvdb"
- "github.com/pkt-cash/pktd/lnd/htlcswitch/hop"
- "github.com/pkt-cash/pktd/lnd/lnwire"
- "github.com/pkt-cash/pktd/pktlog/log"
-)
-
-var (
- // ErrCorruptedCircuitMap indicates that the on-disk bucketing structure
- // has altered since the circuit map instance was initialized.
- ErrCorruptedCircuitMap = Err.CodeWithDetail("ErrCorruptedCircuitMap", "circuit map has been corrupted")
-
- // ErrCircuitNotInHashIndex indicates that a particular circuit did not
- // appear in the in-memory hash index.
- ErrCircuitNotInHashIndex = Err.CodeWithDetail("ErrCircuitNotInHashIndex", "payment circuit not found in "+
- "hash index")
-
- // ErrUnknownCircuit signals that circuit could not be removed from the
- // map because it was not found.
- ErrUnknownCircuit = Err.CodeWithDetail("ErrUnknownCircuit", "unknown payment circuit")
-
- // ErrCircuitClosing signals that an htlc has already closed this
- // circuit in-memory.
- ErrCircuitClosing = Err.CodeWithDetail("ErrCircuitClosing", "circuit has already been closed")
-
- // ErrDuplicateCircuit signals that this circuit was previously
- // added.
- ErrDuplicateCircuit = Err.CodeWithDetail("ErrDuplicateCircuit", "duplicate circuit add")
-
- // ErrUnknownKeystone signals that no circuit was found using the
- // outgoing circuit key.
- ErrUnknownKeystone = Err.CodeWithDetail("ErrUnknownKeystone", "unknown circuit keystone")
-
- // ErrDuplicateKeystone signals that this circuit was previously
- // assigned a keystone.
- ErrDuplicateKeystone = Err.CodeWithDetail("ErrDuplicateKeystone", "cannot add duplicate keystone")
-)
-
-// CircuitModifier is a common interface used by channel links to modify the
-// contents of the circuit map maintained by the switch.
-type CircuitModifier interface {
- // OpenCircuits preemptively records a batch keystones that will mark
- // currently pending circuits as open. These changes can be rolled back
- // on restart if the outgoing Adds do not make it into a commitment
- // txn.
- OpenCircuits(...Keystone) er.R
-
- // TrimOpenCircuits removes a channel's open channels with htlc indexes
- // above `start`.
- TrimOpenCircuits(chanID lnwire.ShortChannelID, start uint64) er.R
-
- // DeleteCircuits removes the incoming circuit key to remove all
- // persistent references to a circuit. Returns a ErrUnknownCircuit if
- // any of the incoming keys are not known.
- DeleteCircuits(inKeys ...CircuitKey) er.R
-}
-
-// CircuitLookup is a common interface used to lookup information that is stored
-// in the circuit map.
-type CircuitLookup interface {
- // LookupCircuit queries the circuit map for the circuit identified by
- // inKey.
- LookupCircuit(inKey CircuitKey) *PaymentCircuit
-
- // LookupOpenCircuit queries the circuit map for a circuit identified
- // by its outgoing circuit key.
- LookupOpenCircuit(outKey CircuitKey) *PaymentCircuit
-}
-
-// CircuitFwdActions represents the forwarding decision made by the circuit
-// map, and is returned from CommitCircuits. The sequence of circuits provided
-// to CommitCircuits is split into three sub-sequences, allowing the caller to
-// do an in-order scan, comparing the head of each subsequence, to determine
-// the decision made by the circuit map.
-type CircuitFwdActions struct {
- // Adds is the subsequence of circuits that were successfully committed
- // in the circuit map.
- Adds []*PaymentCircuit
-
- // Drops is the subsequence of circuits for which no action should be
- // done.
- Drops []*PaymentCircuit
-
- // Fails is the subsequence of circuits that should be failed back by
- // the calling link.
- Fails []*PaymentCircuit
-}
-
-// CircuitMap is an interface for managing the construction and teardown of
-// payment circuits used by the switch.
-type CircuitMap interface {
- CircuitModifier
-
- CircuitLookup
-
- // CommitCircuits attempts to add the given circuits to the circuit
- // map. The list of circuits is split into three distinct
- // sub-sequences, corresponding to adds, drops, and fails. Adds should
- // be forwarded to the switch, while fails should be failed back
- // locally within the calling link.
- CommitCircuits(circuit ...*PaymentCircuit) (*CircuitFwdActions, er.R)
-
- // CloseCircuit marks the circuit identified by `outKey` as closing
- // in-memory, which prevents duplicate settles/fails from completing an
- // open circuit twice.
- CloseCircuit(outKey CircuitKey) (*PaymentCircuit, er.R)
-
- // FailCircuit is used by locally failed HTLCs to mark the circuit
- // identified by `inKey` as closing in-memory, which prevents duplicate
- // settles/fails from being accepted for the same circuit.
- FailCircuit(inKey CircuitKey) (*PaymentCircuit, er.R)
-
- // LookupByPaymentHash queries the circuit map and returns all open
- // circuits that use the given payment hash.
- LookupByPaymentHash(hash [32]byte) []*PaymentCircuit
-
- // NumPending returns the total number of active circuits added by
- // CommitCircuits.
- NumPending() int
-
- // NumOpen returns the number of circuits with HTLCs that have been
- // forwarded via an outgoing link.
- NumOpen() int
-}
-
-var (
- // circuitAddKey is the key used to retrieve the bucket containing
- // payment circuits. A circuit records information about how to return
- // a packet to the source link, potentially including an error
- // encrypter for applying this hop's encryption to the payload in the
- // reverse direction.
- circuitAddKey = []byte("circuit-adds")
-
- // circuitKeystoneKey is used to retrieve the bucket containing circuit
- // keystones, which are set in place once a forwarded packet is
- // assigned an index on an outgoing commitment txn.
- circuitKeystoneKey = []byte("circuit-keystones")
-)
-
-// circuitMap is a data structure that implements thread safe, persistent
-// storage of circuit routing information. The switch consults a circuit map to
-// determine where to forward returning HTLC update messages. Circuits are
-// always identifiable by their incoming CircuitKey, in addition to their
-// outgoing CircuitKey if the circuit is fully-opened.
-type circuitMap struct {
- cfg *CircuitMapConfig
-
- mtx sync.RWMutex
-
- // pending is an in-memory mapping of all half payment circuits, and is
- // kept in sync with the on-disk contents of the circuit map.
- pending map[CircuitKey]*PaymentCircuit
-
- // opened is an in-memory mapping of all full payment circuits, which
- // is also synchronized with the persistent state of the circuit map.
- opened map[CircuitKey]*PaymentCircuit
-
- // closed is an in-memory set of circuits for which the switch has
- // received a settle or fail. This precedes the actual deletion of a
- // circuit from disk.
- closed map[CircuitKey]struct{}
-
- // hashIndex is a volatile index that facilitates fast queries by
- // payment hash against the contents of circuits. This index can be
- // reconstructed entirely from the set of persisted full circuits on
- // startup.
- hashIndex map[[32]byte]map[CircuitKey]struct{}
-}
-
-// CircuitMapConfig houses the critical interfaces and references necessary to
-// parameterize an instance of circuitMap.
-type CircuitMapConfig struct {
- // DB provides the persistent storage engine for the circuit map.
- // TODO(conner): create abstraction to allow for the substitution of
- // other persistence engines.
- DB *channeldb.DB
-
- // ExtractErrorEncrypter derives the shared secret used to encrypt
- // errors from the obfuscator's ephemeral public key.
- ExtractErrorEncrypter hop.ErrorEncrypterExtracter
-}
-
-// NewCircuitMap creates a new instance of the circuitMap.
-func NewCircuitMap(cfg *CircuitMapConfig) (CircuitMap, er.R) {
- cm := &circuitMap{
- cfg: cfg,
- }
-
- // Initialize the on-disk buckets used by the circuit map.
- if err := cm.initBuckets(); err != nil {
- return nil, err
- }
-
- // Load any previously persisted circuit into back into memory.
- if err := cm.restoreMemState(); err != nil {
- return nil, err
- }
-
- // Trim any keystones that were not committed in an outgoing commit txn.
- //
- // NOTE: This operation will be applied to the persistent state of all
- // active channels. Therefore, it must be called before any links are
- // created to avoid interfering with normal operation.
- if err := cm.trimAllOpenCircuits(); err != nil {
- return nil, err
- }
-
- return cm, nil
-}
-
-// initBuckets ensures that the primary buckets used by the circuit are
-// initialized so that we can assume their existence after startup.
-func (cm *circuitMap) initBuckets() er.R {
- return kvdb.Update(cm.cfg.DB, func(tx kvdb.RwTx) er.R {
- _, err := tx.CreateTopLevelBucket(circuitKeystoneKey)
- if err != nil {
- return err
- }
-
- _, err = tx.CreateTopLevelBucket(circuitAddKey)
- return err
- }, func() {})
-}
-
-// restoreMemState loads the contents of the half circuit and full circuit
-// buckets from disk and reconstructs the in-memory representation of the
-// circuit map. Afterwards, the state of the hash index is reconstructed using
-// the recovered set of full circuits. This method will also remove any stray
-// keystones, which are those that appear fully-opened, but have no pending
-// circuit related to the intended incoming link.
-func (cm *circuitMap) restoreMemState() er.R {
- log.Infof("Restoring in-memory circuit state from disk")
-
- var (
- opened map[CircuitKey]*PaymentCircuit
- pending map[CircuitKey]*PaymentCircuit
- )
-
- if err := kvdb.Update(cm.cfg.DB, func(tx kvdb.RwTx) er.R {
- // Restore any of the circuits persisted in the circuit bucket
- // back into memory.
- circuitBkt := tx.ReadWriteBucket(circuitAddKey)
- if circuitBkt == nil {
- return ErrCorruptedCircuitMap.Default()
- }
-
- if err := circuitBkt.ForEach(func(_, v []byte) er.R {
- circuit, err := cm.decodeCircuit(v)
- if err != nil {
- return err
- }
-
- circuit.LoadedFromDisk = true
- pending[circuit.Incoming] = circuit
-
- return nil
- }); err != nil {
- return err
- }
-
- // Furthermore, load the keystone bucket and resurrect the
- // keystones used in any open circuits.
- keystoneBkt := tx.ReadWriteBucket(circuitKeystoneKey)
- if keystoneBkt == nil {
- return ErrCorruptedCircuitMap.Default()
- }
-
- var strayKeystones []Keystone
- if err := keystoneBkt.ForEach(func(k, v []byte) er.R {
- var (
- inKey CircuitKey
- outKey = &CircuitKey{}
- )
-
- // Decode the incoming and outgoing circuit keys.
- if err := inKey.SetBytes(v); err != nil {
- return err
- }
- if err := outKey.SetBytes(k); err != nil {
- return err
- }
-
- // Retrieve the pending circuit, set its keystone, then
- // add it to the opened map.
- circuit, ok := pending[inKey]
- if ok {
- circuit.Outgoing = outKey
- opened[*outKey] = circuit
- } else {
- strayKeystones = append(strayKeystones, Keystone{
- InKey: inKey,
- OutKey: *outKey,
- })
- }
-
- return nil
- }); err != nil {
- return err
- }
-
- // If any stray keystones were found, we'll proceed to prune
- // them from the circuit map's persistent storage. This may
- // manifest on older nodes that had updated channels before
- // their short channel id was set properly. We believe this
- // issue has been fixed, though this will allow older nodes to
- // recover without additional intervention.
- for _, strayKeystone := range strayKeystones {
- // As a precaution, we will only cleanup keystones
- // related to locally-initiated payments. If a
- // documented case of stray keystones emerges for
- // forwarded payments, this check should be removed, but
- // with extreme caution.
- if strayKeystone.OutKey.ChanID != hop.Source {
- continue
- }
-
- log.Infof("Removing stray keystone: %v", strayKeystone)
- err := keystoneBkt.Delete(strayKeystone.OutKey.Bytes())
- if err != nil {
- return err
- }
- }
-
- return nil
-
- }, func() {
- opened = make(map[CircuitKey]*PaymentCircuit)
- pending = make(map[CircuitKey]*PaymentCircuit)
- }); err != nil {
- return err
- }
-
- cm.pending = pending
- cm.opened = opened
- cm.closed = make(map[CircuitKey]struct{})
-
- log.Infof("Payment circuits loaded: num_pending=%v, num_open=%v",
- len(pending), len(opened))
-
- // Finally, reconstruct the hash index by running through our set of
- // open circuits.
- cm.hashIndex = make(map[[32]byte]map[CircuitKey]struct{})
- for _, circuit := range opened {
- cm.addCircuitToHashIndex(circuit)
- }
-
- return nil
-}
-
-// decodeCircuit reconstructs an in-memory payment circuit from a byte slice.
-// The byte slice is assumed to have been generated by the circuit's Encode
-// method. If the decoding is successful, the onion obfuscator will be
-// reextracted, since it is not stored in plaintext on disk.
-func (cm *circuitMap) decodeCircuit(v []byte) (*PaymentCircuit, er.R) {
- var circuit = &PaymentCircuit{}
-
- circuitReader := bytes.NewReader(v)
- if err := circuit.Decode(circuitReader); err != nil {
- return nil, err
- }
-
- // If the error encrypter is nil, this is locally-source payment so
- // there is no encrypter.
- if circuit.ErrorEncrypter == nil {
- return circuit, nil
- }
-
- // Otherwise, we need to reextract the encrypter, so that the shared
- // secret is rederived from what was decoded.
- err := circuit.ErrorEncrypter.Reextract(
- cm.cfg.ExtractErrorEncrypter,
- )
- if err != nil {
- return nil, err
- }
-
- return circuit, nil
-}
-
-// trimAllOpenCircuits reads the set of active channels from disk and trims
-// keystones for any non-pending channels using the next unallocated htlc index.
-// This method is intended to be called on startup. Each link will also trim
-// it's own circuits upon startup.
-//
-// NOTE: This operation will be applied to the persistent state of all active
-// channels. Therefore, it must be called before any links are created to avoid
-// interfering with normal operation.
-func (cm *circuitMap) trimAllOpenCircuits() er.R {
- activeChannels, err := cm.cfg.DB.FetchAllOpenChannels()
- if err != nil {
- return err
- }
-
- for _, activeChannel := range activeChannels {
- if activeChannel.IsPending {
- continue
- }
-
- // First, skip any channels that have not been assigned their
- // final channel identifier, otherwise we would try to trim
- // htlcs belonging to the all-zero, hop.Source ID.
- chanID := activeChannel.ShortChanID()
- if chanID == hop.Source {
- continue
- }
-
- // Next, retrieve the next unallocated htlc index, which bounds
- // the cutoff of confirmed htlc indexes.
- start, err := activeChannel.NextLocalHtlcIndex()
- if err != nil {
- return err
- }
-
- // Finally, remove all pending circuits above at or above the
- // next unallocated local htlc indexes. This has the effect of
- // reverting any circuits that have either not been locked in,
- // or had not been included in a pending commitment.
- err = cm.TrimOpenCircuits(chanID, start)
- if err != nil {
- return err
- }
- }
-
- return nil
-}
-
-// TrimOpenCircuits removes a channel's keystones above the short chan id's
-// highest committed htlc index. This has the effect of returning those
-// circuits to a half-open state. Since opening of circuits is done in advance
-// of actually committing the Add htlcs into a commitment txn, this allows
-// circuits to be opened preemptively, since we can roll them back after any
-// failures.
-func (cm *circuitMap) TrimOpenCircuits(chanID lnwire.ShortChannelID,
- start uint64) er.R {
-
- log.Infof("Trimming open circuits for chan_id=%v, start_htlc_id=%v",
- chanID, start)
-
- var trimmedOutKeys []CircuitKey
-
- // Scan forward from the last unacked htlc id, stopping as soon as we
- // don't find any more. Outgoing htlc id's must be assigned in order,
- // so there should never be disjoint segments of keystones to trim.
- cm.mtx.Lock()
- for i := start; ; i++ {
- outKey := CircuitKey{
- ChanID: chanID,
- HtlcID: i,
- }
-
- circuit, ok := cm.opened[outKey]
- if !ok {
- break
- }
-
- circuit.Outgoing = nil
- delete(cm.opened, outKey)
- trimmedOutKeys = append(trimmedOutKeys, outKey)
- cm.removeCircuitFromHashIndex(circuit)
- }
- cm.mtx.Unlock()
-
- if len(trimmedOutKeys) == 0 {
- return nil
- }
-
- return kvdb.Update(cm.cfg.DB, func(tx kvdb.RwTx) er.R {
- keystoneBkt := tx.ReadWriteBucket(circuitKeystoneKey)
- if keystoneBkt == nil {
- return ErrCorruptedCircuitMap.Default()
- }
-
- for _, outKey := range trimmedOutKeys {
- err := keystoneBkt.Delete(outKey.Bytes())
- if err != nil {
- return err
- }
- }
-
- return nil
- }, func() {})
-}
-
-// LookupByHTLC looks up the payment circuit by the outgoing channel and HTLC
-// IDs. Returns nil if there is no such circuit.
-func (cm *circuitMap) LookupCircuit(inKey CircuitKey) *PaymentCircuit {
- cm.mtx.RLock()
- defer cm.mtx.RUnlock()
-
- return cm.pending[inKey]
-}
-
-// LookupOpenCircuit searches for the circuit identified by its outgoing circuit
-// key.
-func (cm *circuitMap) LookupOpenCircuit(outKey CircuitKey) *PaymentCircuit {
- cm.mtx.RLock()
- defer cm.mtx.RUnlock()
-
- return cm.opened[outKey]
-}
-
-// LookupByPaymentHash looks up and returns any payment circuits with a given
-// payment hash.
-func (cm *circuitMap) LookupByPaymentHash(hash [32]byte) []*PaymentCircuit {
- cm.mtx.RLock()
- defer cm.mtx.RUnlock()
-
- var circuits []*PaymentCircuit
- if circuitSet, ok := cm.hashIndex[hash]; ok {
- // Iterate over the outgoing circuit keys found with this hash,
- // and retrieve the circuit from the opened map.
- circuits = make([]*PaymentCircuit, 0, len(circuitSet))
- for key := range circuitSet {
- if circuit, ok := cm.opened[key]; ok {
- circuits = append(circuits, circuit)
- }
- }
- }
-
- return circuits
-}
-
-// CommitCircuits accepts any number of circuits and persistently adds them to
-// the switch's circuit map. The method returns a list of circuits that had not
-// been seen prior by the switch. A link should only forward HTLCs corresponding
-// to the returned circuits to the switch.
-//
-// NOTE: This method uses batched writes to improve performance, gains will only
-// be realized if it is called concurrently from separate goroutines.
-func (cm *circuitMap) CommitCircuits(circuits ...*PaymentCircuit) (
- *CircuitFwdActions, er.R) {
-
- inKeys := make([]CircuitKey, 0, len(circuits))
- for _, circuit := range circuits {
- inKeys = append(inKeys, circuit.Incoming)
- }
-
- log.Tracef("Committing fresh circuits: %v", log.C(func() string {
- return spew.Sdump(inKeys)
- }))
-
- actions := &CircuitFwdActions{}
-
- // If an empty list was passed, return early to avoid grabbing the lock.
- if len(circuits) == 0 {
- return actions, nil
- }
-
- // First, we reconcile the provided circuits with our set of pending
- // circuits to construct a set of new circuits that need to be written
- // to disk. The circuit's pointer is stored so that we only permit this
- // exact circuit to be forwarded through the switch. If a circuit is
- // already pending, the htlc will be reforwarded by the switch.
- //
- // NOTE: We track an additional addFails subsequence, which permits us
- // to fail back all packets that weren't dropped if we encounter an
- // error when committing the circuits.
- cm.mtx.Lock()
- var adds, drops, fails, addFails []*PaymentCircuit
- for _, circuit := range circuits {
- inKey := circuit.InKey()
- if foundCircuit, ok := cm.pending[inKey]; ok {
- switch {
-
- // This circuit has a keystone, it's waiting for a
- // response from the remote peer on the outgoing link.
- // Drop it like it's hot, ensure duplicates get caught.
- case foundCircuit.HasKeystone():
- drops = append(drops, circuit)
-
- // If no keystone is set and the switch has not been
- // restarted, the corresponding packet should still be
- // in the outgoing link's mailbox. It will be delivered
- // if it comes online before the switch goes down.
- //
- // NOTE: Dropping here prevents a flapping, incoming
- // link from failing a duplicate add while it is still
- // in the server's memory mailboxes.
- case !foundCircuit.LoadedFromDisk:
- drops = append(drops, circuit)
-
- // Otherwise, the in-mem packet has been lost due to a
- // restart. It is now safe to send back a failure along
- // the incoming link. The incoming link should be able
- // detect and ignore duplicate packets of this type.
- default:
- fails = append(fails, circuit)
- addFails = append(addFails, circuit)
- }
-
- continue
- }
-
- cm.pending[inKey] = circuit
- adds = append(adds, circuit)
- addFails = append(addFails, circuit)
- }
- cm.mtx.Unlock()
-
- // If all circuits are dropped or failed, we are done.
- if len(adds) == 0 {
- actions.Drops = drops
- actions.Fails = fails
- return actions, nil
- }
-
- // Now, optimistically serialize the circuits to add.
- var bs = make([]bytes.Buffer, len(adds))
- for i, circuit := range adds {
- if err := circuit.Encode(&bs[i]); err != nil {
- actions.Drops = drops
- actions.Fails = addFails
- return actions, err
- }
- }
-
- // Write the entire batch of circuits to the persistent circuit bucket
- // using bolt's Batch write. This method must be called from multiple,
- // distinct goroutines to have any impact on performance.
- err := kvdb.Batch(cm.cfg.DB.Backend, func(tx kvdb.RwTx) er.R {
- circuitBkt := tx.ReadWriteBucket(circuitAddKey)
- if circuitBkt == nil {
- return ErrCorruptedCircuitMap.Default()
- }
-
- for i, circuit := range adds {
- inKeyBytes := circuit.InKey().Bytes()
- circuitBytes := bs[i].Bytes()
-
- err := circuitBkt.Put(inKeyBytes, circuitBytes)
- if err != nil {
- return err
- }
- }
-
- return nil
- })
-
- // Return if the write succeeded.
- if err == nil {
- actions.Adds = adds
- actions.Drops = drops
- actions.Fails = fails
- return actions, nil
- }
-
- // Otherwise, rollback the circuits added to the pending set if the
- // write failed.
- cm.mtx.Lock()
- for _, circuit := range adds {
- delete(cm.pending, circuit.InKey())
- }
- cm.mtx.Unlock()
-
- // Since our write failed, we will return the dropped packets and mark
- // all other circuits as failed.
- actions.Drops = drops
- actions.Fails = addFails
-
- return actions, err
-}
-
-// Keystone is a tuple binding an incoming and outgoing CircuitKey. Keystones
-// are preemptively written by an outgoing link before signing a new commitment
-// state, and cements which HTLCs we are awaiting a response from a remote
-// peer.
-type Keystone struct {
- InKey CircuitKey
- OutKey CircuitKey
-}
-
-// String returns a human readable description of the Keystone.
-func (k *Keystone) String() string {
- return fmt.Sprintf("%s --> %s", k.InKey, k.OutKey)
-}
-
-// OpenCircuits sets the outgoing circuit key for the circuit identified by
-// inKey, persistently marking the circuit as opened. After the changes have
-// been persisted, the circuit map's in-memory indexes are updated so that this
-// circuit can be queried using LookupByKeystone or LookupByPaymentHash.
-func (cm *circuitMap) OpenCircuits(keystones ...Keystone) er.R {
- if len(keystones) == 0 {
- return nil
- }
-
- log.Tracef("Opening finalized circuits: %v", log.C(func() string {
- return spew.Sdump(keystones)
- }))
-
- // Check that all keystones correspond to committed-but-unopened
- // circuits.
- cm.mtx.RLock()
- openedCircuits := make([]*PaymentCircuit, 0, len(keystones))
- for _, ks := range keystones {
- if _, ok := cm.opened[ks.OutKey]; ok {
- cm.mtx.RUnlock()
- return ErrDuplicateKeystone.Default()
- }
-
- circuit, ok := cm.pending[ks.InKey]
- if !ok {
- cm.mtx.RUnlock()
- return ErrUnknownCircuit.Default()
- }
-
- openedCircuits = append(openedCircuits, circuit)
- }
- cm.mtx.RUnlock()
-
- err := kvdb.Update(cm.cfg.DB, func(tx kvdb.RwTx) er.R {
- // Now, load the circuit bucket to which we will write the
- // already serialized circuit.
- keystoneBkt := tx.ReadWriteBucket(circuitKeystoneKey)
- if keystoneBkt == nil {
- return ErrCorruptedCircuitMap.Default()
- }
-
- for _, ks := range keystones {
- outBytes := ks.OutKey.Bytes()
- inBytes := ks.InKey.Bytes()
- err := keystoneBkt.Put(outBytes, inBytes)
- if err != nil {
- return err
- }
- }
-
- return nil
- }, func() {})
-
- if err != nil {
- return err
- }
-
- cm.mtx.Lock()
- for i, circuit := range openedCircuits {
- ks := keystones[i]
-
- // Since our persistent operation was successful, we can now
- // modify the in memory representations. Set the outgoing
- // circuit key on our pending circuit, add the same circuit to
- // set of opened circuits, and add this circuit to the hash
- // index.
- circuit.Outgoing = &CircuitKey{}
- *circuit.Outgoing = ks.OutKey
-
- cm.opened[ks.OutKey] = circuit
- cm.addCircuitToHashIndex(circuit)
- }
- cm.mtx.Unlock()
-
- return nil
-}
-
-// addCirciutToHashIndex inserts a circuit into the circuit map's hash index, so
-// that it can be queried using LookupByPaymentHash.
-func (cm *circuitMap) addCircuitToHashIndex(c *PaymentCircuit) {
- if _, ok := cm.hashIndex[c.PaymentHash]; !ok {
- cm.hashIndex[c.PaymentHash] = make(map[CircuitKey]struct{})
- }
- cm.hashIndex[c.PaymentHash][c.OutKey()] = struct{}{}
-}
-
-// FailCircuit marks the circuit identified by `inKey` as closing in-memory,
-// which prevents duplicate settles/fails from completing an open circuit twice.
-func (cm *circuitMap) FailCircuit(inKey CircuitKey) (*PaymentCircuit, er.R) {
-
- cm.mtx.Lock()
- defer cm.mtx.Unlock()
-
- circuit, ok := cm.pending[inKey]
- if !ok {
- return nil, ErrUnknownCircuit.Default()
- }
-
- _, ok = cm.closed[inKey]
- if ok {
- return nil, ErrCircuitClosing.Default()
- }
-
- cm.closed[inKey] = struct{}{}
-
- return circuit, nil
-}
-
-// CloseCircuit marks the circuit identified by `outKey` as closing in-memory,
-// which prevents duplicate settles/fails from completing an open
-// circuit twice.
-func (cm *circuitMap) CloseCircuit(outKey CircuitKey) (*PaymentCircuit, er.R) {
-
- cm.mtx.Lock()
- defer cm.mtx.Unlock()
-
- circuit, ok := cm.opened[outKey]
- if !ok {
- return nil, ErrUnknownCircuit.Default()
- }
-
- _, ok = cm.closed[circuit.Incoming]
- if ok {
- return nil, ErrCircuitClosing.Default()
- }
-
- cm.closed[circuit.Incoming] = struct{}{}
-
- return circuit, nil
-}
-
-// DeleteCircuits destroys the target circuits by removing them from the circuit
-// map, additionally removing the circuits' keystones if any HTLCs were
-// forwarded through an outgoing link. The circuits should be identified by its
-// incoming circuit key. If a given circuit is not found in the circuit map, it
-// will be ignored from the query. This would typically indicate that the
-// circuit was already cleaned up at a different point in time.
-func (cm *circuitMap) DeleteCircuits(inKeys ...CircuitKey) er.R {
-
- log.Tracef("Deleting resolved circuits: %v", log.C(func() string {
- return spew.Sdump(inKeys)
- }))
-
- var (
- closingCircuits = make(map[CircuitKey]struct{})
- removedCircuits = make(map[CircuitKey]*PaymentCircuit)
- )
-
- cm.mtx.Lock()
- // Remove any references to the circuits from memory, keeping track of
- // which circuits were removed, and which ones had been marked closed.
- // This can be used to restore these entries later if the persistent
- // removal fails.
- for _, inKey := range inKeys {
- circuit, ok := cm.pending[inKey]
- if !ok {
- continue
- }
- delete(cm.pending, inKey)
-
- if _, ok := cm.closed[inKey]; ok {
- closingCircuits[inKey] = struct{}{}
- delete(cm.closed, inKey)
- }
-
- if circuit.HasKeystone() {
- delete(cm.opened, circuit.OutKey())
- cm.removeCircuitFromHashIndex(circuit)
- }
-
- removedCircuits[inKey] = circuit
- }
- cm.mtx.Unlock()
-
- err := kvdb.Batch(cm.cfg.DB.Backend, func(tx kvdb.RwTx) er.R {
- for _, circuit := range removedCircuits {
- // If this htlc made it to an outgoing link, load the
- // keystone bucket from which we will remove the
- // outgoing circuit key.
- if circuit.HasKeystone() {
- keystoneBkt := tx.ReadWriteBucket(circuitKeystoneKey)
- if keystoneBkt == nil {
- return ErrCorruptedCircuitMap.Default()
- }
-
- outKey := circuit.OutKey()
-
- err := keystoneBkt.Delete(outKey.Bytes())
- if err != nil {
- return err
- }
- }
-
- // Remove the circuit itself based on the incoming
- // circuit key.
- circuitBkt := tx.ReadWriteBucket(circuitAddKey)
- if circuitBkt == nil {
- return ErrCorruptedCircuitMap.Default()
- }
-
- inKey := circuit.InKey()
- if err := circuitBkt.Delete(inKey.Bytes()); err != nil {
- return err
- }
- }
-
- return nil
- })
-
- // Return if the write succeeded.
- if err == nil {
- return nil
- }
-
- // If the persistent changes failed, restore the circuit map to it's
- // previous state.
- cm.mtx.Lock()
- for inKey, circuit := range removedCircuits {
- cm.pending[inKey] = circuit
-
- if _, ok := closingCircuits[inKey]; ok {
- cm.closed[inKey] = struct{}{}
- }
-
- if circuit.HasKeystone() {
- cm.opened[circuit.OutKey()] = circuit
- cm.addCircuitToHashIndex(circuit)
- }
- }
- cm.mtx.Unlock()
-
- return err
-}
-
-// removeCircuitFromHashIndex removes the given circuit from the hash index,
-// pruning any unnecessary memory optimistically.
-func (cm *circuitMap) removeCircuitFromHashIndex(c *PaymentCircuit) {
- // Locate bucket containing this circuit's payment hashes.
- circuitsWithHash, ok := cm.hashIndex[c.PaymentHash]
- if !ok {
- return
- }
-
- outKey := c.OutKey()
-
- // Remove this circuit from the set of circuitsWithHash.
- delete(circuitsWithHash, outKey)
-
- // Prune the payment hash bucket if no other entries remain.
- if len(circuitsWithHash) == 0 {
- delete(cm.hashIndex, c.PaymentHash)
- }
-}
-
-// NumPending returns the number of active circuits added to the circuit map.
-func (cm *circuitMap) NumPending() int {
- cm.mtx.RLock()
- defer cm.mtx.RUnlock()
-
- return len(cm.pending)
-}
-
-// NumOpen returns the number of circuits that have been opened by way of
-// setting their keystones. This is the number of HTLCs that are waiting for a
-// settle/fail response from a remote peer.
-func (cm *circuitMap) NumOpen() int {
- cm.mtx.RLock()
- defer cm.mtx.RUnlock()
-
- return len(cm.opened)
-}
diff --git a/lnd/htlcswitch/circuit_test.go b/lnd/htlcswitch/circuit_test.go
deleted file mode 100644
index fb20808c..00000000
--- a/lnd/htlcswitch/circuit_test.go
+++ /dev/null
@@ -1,1387 +0,0 @@
-package htlcswitch_test
-
-import (
- "bytes"
- "io/ioutil"
- "reflect"
- "testing"
-
- "github.com/pkt-cash/pktd/btcec"
- "github.com/pkt-cash/pktd/btcutil"
- "github.com/pkt-cash/pktd/btcutil/er"
- bitcoinCfg "github.com/pkt-cash/pktd/chaincfg"
- sphinx "github.com/pkt-cash/pktd/lightning-onion"
- "github.com/pkt-cash/pktd/lnd/channeldb"
- "github.com/pkt-cash/pktd/lnd/htlcswitch"
- "github.com/pkt-cash/pktd/lnd/htlcswitch/hop"
- "github.com/pkt-cash/pktd/lnd/keychain"
- "github.com/pkt-cash/pktd/lnd/lnwire"
-)
-
-var (
- hash1 = [32]byte{0x01}
- hash2 = [32]byte{0x02}
- hash3 = [32]byte{0x03}
-
- // sphinxPrivKey is the private key given to freshly created sphinx
- // routers.
- sphinxPrivKey *btcec.PrivateKey
-
- // testEphemeralKey is the ephemeral key that will be extracted to
- // create onion obfuscators.
- testEphemeralKey *btcec.PublicKey
-
- // testExtracter is a precomputed extraction of testEphemeralKey, using
- // the sphinxPrivKey.
- testExtracter *hop.SphinxErrorEncrypter
-)
-
-func init() {
- // Generate a fresh key for our sphinx router.
- var err er.R
- sphinxPrivKey, err = btcec.NewPrivateKey(btcec.S256())
- if err != nil {
- panic(err)
- }
-
- // And another, whose public key will serve as the test ephemeral key.
- testEphemeralPriv, err := btcec.NewPrivateKey(btcec.S256())
- if err != nil {
- panic(err)
- }
- testEphemeralKey = testEphemeralPriv.PubKey()
-
- // Finally, properly initialize the test extracter
- initTestExtracter()
-}
-
-// initTestExtracter spins up a new onion processor specifically for the purpose
-// of generating our testExtracter, which should be derived from the
-// testEphemeralKey, and which randomly-generated key is used to init the sphinx
-// router.
-//
-// NOTE: This should be called in init(), after testEphemeralKey has been
-// properly initialized.
-func initTestExtracter() {
- onionProcessor := newOnionProcessor(nil)
- defer onionProcessor.Stop()
-
- obfuscator, _ := onionProcessor.ExtractErrorEncrypter(
- testEphemeralKey,
- )
-
- sphinxExtracter, ok := obfuscator.(*hop.SphinxErrorEncrypter)
- if !ok {
- panic("did not extract sphinx error encrypter")
- }
-
- testExtracter = sphinxExtracter
-
- // We also set this error extracter on startup, otherwise it will be nil
- // at compile-time.
- halfCircuitTests[2].encrypter = testExtracter
-}
-
-// newOnionProcessor creates starts a new htlcswitch.OnionProcessor using a temp
-// db and no garbage collection.
-func newOnionProcessor(t *testing.T) *hop.OnionProcessor {
- sphinxRouter := sphinx.NewRouter(
- &keychain.PrivKeyECDH{PrivKey: sphinxPrivKey},
- &bitcoinCfg.SimNetParams, sphinx.NewMemoryReplayLog(),
- )
-
- if err := sphinxRouter.Start(); err != nil {
- t.Fatalf("unable to start sphinx router: %v", err)
- }
-
- return hop.NewOnionProcessor(sphinxRouter)
-}
-
-// newCircuitMap creates a new htlcswitch.CircuitMap using a temp db and a
-// fresh sphinx router.
-func newCircuitMap(t *testing.T) (*htlcswitch.CircuitMapConfig,
- htlcswitch.CircuitMap) {
-
- onionProcessor := newOnionProcessor(t)
-
- circuitMapCfg := &htlcswitch.CircuitMapConfig{
- DB: makeCircuitDB(t, ""),
- ExtractErrorEncrypter: onionProcessor.ExtractErrorEncrypter,
- }
-
- circuitMap, err := htlcswitch.NewCircuitMap(circuitMapCfg)
- if err != nil {
- t.Fatalf("unable to create persistent circuit map: %v", err)
- }
-
- return circuitMapCfg, circuitMap
-}
-
-// TestCircuitMapInit is a quick check to ensure that we can start and restore
-// the circuit map, as this will be used extensively in this suite.
-func TestCircuitMapInit(t *testing.T) {
- t.Parallel()
-
- cfg, _ := newCircuitMap(t)
- restartCircuitMap(t, cfg)
-}
-
-var halfCircuitTests = []struct {
- hash [32]byte
- inValue btcutil.Amount
- outValue btcutil.Amount
- chanID lnwire.ShortChannelID
- htlcID uint64
- encrypter hop.ErrorEncrypter
-}{
- {
- hash: hash1,
- inValue: 0,
- outValue: 1000,
- chanID: lnwire.NewShortChanIDFromInt(1),
- htlcID: 1,
- encrypter: nil,
- },
- {
- hash: hash2,
- inValue: 2100,
- outValue: 2000,
- chanID: lnwire.NewShortChanIDFromInt(2),
- htlcID: 2,
- encrypter: htlcswitch.NewMockObfuscator(),
- },
- {
- hash: hash3,
- inValue: 10000,
- outValue: 9000,
- chanID: lnwire.NewShortChanIDFromInt(3),
- htlcID: 3,
- // NOTE: The value of testExtracter is nil at compile-time, it
- // is fully-initialized in initTestExtracter, which should
- // repopulate this encrypter.
- encrypter: testExtracter,
- },
-}
-
-// TestHalfCircuitSerialization checks that the half circuits can be properly
-// encoded and decoded properly. A critical responsibility of this test is to
-// verify that the various ErrorEncrypter implementations can be properly
-// reconstructed from a serialized half circuit.
-func TestHalfCircuitSerialization(t *testing.T) {
- t.Parallel()
-
- onionProcessor := newOnionProcessor(t)
-
- for i, test := range halfCircuitTests {
- circuit := &htlcswitch.PaymentCircuit{
- PaymentHash: test.hash,
- IncomingAmount: lnwire.NewMSatFromSatoshis(test.inValue),
- OutgoingAmount: lnwire.NewMSatFromSatoshis(test.outValue),
- Incoming: htlcswitch.CircuitKey{
- ChanID: test.chanID,
- HtlcID: test.htlcID,
- },
- ErrorEncrypter: test.encrypter,
- }
-
- // Write the half circuit to our buffer.
- var b bytes.Buffer
- if err := circuit.Encode(&b); err != nil {
- t.Fatalf("unable to encode half payment circuit test=%d: %v", i, err)
- }
-
- // Then try to decode the serialized bytes.
- var circuit2 htlcswitch.PaymentCircuit
- circuitReader := bytes.NewReader(b.Bytes())
- if err := circuit2.Decode(circuitReader); err != nil {
- t.Fatalf("unable to decode half payment circuit test=%d: %v", i, err)
- }
-
- // If the error encrypter is initialized, we will need to
- // reextract it from it's decoded state, as this requires an
- // ECDH with the onion processor's private key. For mock error
- // encrypters, this will be a NOP.
- if circuit2.ErrorEncrypter != nil {
- err := circuit2.ErrorEncrypter.Reextract(
- onionProcessor.ExtractErrorEncrypter,
- )
- if err != nil {
- t.Fatalf("unable to reextract sphinx error "+
- "encrypter: %v", err)
- }
- }
-
- // Reconstructed half circuit should match the original.
- if !equalIgnoreLFD(circuit, &circuit2) {
- t.Fatalf("unexpected half circuit test=%d, want %v, got %v",
- i, circuit, circuit2)
- }
- }
-}
-
-func TestCircuitMapPersistence(t *testing.T) {
- t.Parallel()
-
- var (
- chan1 = lnwire.NewShortChanIDFromInt(1)
- chan2 = lnwire.NewShortChanIDFromInt(2)
- circuitMap htlcswitch.CircuitMap
- err er.R
- )
-
- cfg, circuitMap := newCircuitMap(t)
-
- circuit := circuitMap.LookupCircuit(htlcswitch.CircuitKey{
- ChanID: chan1,
- HtlcID: 0,
- })
- if circuit != nil {
- t.Fatalf("LookupByHTLC returned a circuit before any were added: %v",
- circuit)
- }
-
- circuit1 := &htlcswitch.PaymentCircuit{
- Incoming: htlcswitch.CircuitKey{
- ChanID: chan2,
- HtlcID: 1,
- },
- PaymentHash: hash1,
- ErrorEncrypter: htlcswitch.NewMockObfuscator(),
- }
- if _, err := circuitMap.CommitCircuits(circuit1); err != nil {
- t.Fatalf("unable to add half circuit: %v", err)
- }
-
- // Circuit map should have one circuit that has not been fully opened.
- assertNumCircuitsWithHash(t, circuitMap, hash1, 0)
- assertHasCircuit(t, circuitMap, circuit1)
-
- cfg, circuitMap = restartCircuitMap(t, cfg)
-
- assertNumCircuitsWithHash(t, circuitMap, hash1, 0)
- assertHasCircuit(t, circuitMap, circuit1)
-
- // Add multiple circuits with same destination channel but different HTLC
- // IDs and payment hashes.
- keystone1 := htlcswitch.Keystone{
- InKey: circuit1.Incoming,
- OutKey: htlcswitch.CircuitKey{
- ChanID: chan1,
- HtlcID: 0,
- },
- }
- circuit1.Outgoing = &keystone1.OutKey
- if err := circuitMap.OpenCircuits(keystone1); err != nil {
- t.Fatalf("unable to add full circuit: %v", err)
- }
-
- // Circuit map should reflect addition of circuit1, and the change
- // should survive a restart.
- assertNumCircuitsWithHash(t, circuitMap, hash1, 1)
- assertHasCircuit(t, circuitMap, circuit1)
- assertHasKeystone(t, circuitMap, keystone1.OutKey, circuit1)
-
- cfg, circuitMap = restartCircuitMap(t, cfg)
-
- assertNumCircuitsWithHash(t, circuitMap, hash1, 1)
- assertHasCircuit(t, circuitMap, circuit1)
- assertHasKeystone(t, circuitMap, keystone1.OutKey, circuit1)
-
- circuit2 := &htlcswitch.PaymentCircuit{
- Incoming: htlcswitch.CircuitKey{
- ChanID: chan2,
- HtlcID: 2,
- },
- PaymentHash: hash2,
- ErrorEncrypter: htlcswitch.NewMockObfuscator(),
- }
- if _, err := circuitMap.CommitCircuits(circuit2); err != nil {
- t.Fatalf("unable to add half circuit: %v", err)
- }
-
- assertHasCircuit(t, circuitMap, circuit2)
-
- keystone2 := htlcswitch.Keystone{
- InKey: circuit2.Incoming,
- OutKey: htlcswitch.CircuitKey{
- ChanID: chan1,
- HtlcID: 1,
- },
- }
- circuit2.Outgoing = &keystone2.OutKey
- if err := circuitMap.OpenCircuits(keystone2); err != nil {
- t.Fatalf("unable to add full circuit: %v", err)
- }
-
- // Should have two full circuits, one under hash1 and another under
- // hash2. Both half payment circuits should have been removed when the
- // full circuits were added.
- assertNumCircuitsWithHash(t, circuitMap, hash1, 1)
- assertHasCircuit(t, circuitMap, circuit1)
- assertHasKeystone(t, circuitMap, keystone1.OutKey, circuit1)
-
- assertNumCircuitsWithHash(t, circuitMap, hash2, 1)
- assertHasCircuit(t, circuitMap, circuit2)
- assertHasKeystone(t, circuitMap, keystone2.OutKey, circuit2)
-
- assertNumCircuitsWithHash(t, circuitMap, hash3, 0)
-
- cfg, circuitMap = restartCircuitMap(t, cfg)
-
- assertNumCircuitsWithHash(t, circuitMap, hash1, 1)
- assertHasCircuit(t, circuitMap, circuit1)
- assertHasKeystone(t, circuitMap, keystone1.OutKey, circuit1)
-
- assertNumCircuitsWithHash(t, circuitMap, hash2, 1)
- assertHasCircuit(t, circuitMap, circuit2)
- assertHasKeystone(t, circuitMap, keystone2.OutKey, circuit2)
-
- assertNumCircuitsWithHash(t, circuitMap, hash3, 0)
-
- circuit3 := &htlcswitch.PaymentCircuit{
- Incoming: htlcswitch.CircuitKey{
- ChanID: chan1,
- HtlcID: 2,
- },
- PaymentHash: hash3,
- ErrorEncrypter: htlcswitch.NewMockObfuscator(),
- }
- if _, err := circuitMap.CommitCircuits(circuit3); err != nil {
- t.Fatalf("unable to add half circuit: %v", err)
- }
-
- assertHasCircuit(t, circuitMap, circuit3)
- cfg, circuitMap = restartCircuitMap(t, cfg)
- assertHasCircuit(t, circuitMap, circuit3)
-
- // Add another circuit with an already-used HTLC ID but different
- // destination channel.
- keystone3 := htlcswitch.Keystone{
- InKey: circuit3.Incoming,
- OutKey: htlcswitch.CircuitKey{
- ChanID: chan2,
- HtlcID: 0,
- },
- }
- circuit3.Outgoing = &keystone3.OutKey
- if err := circuitMap.OpenCircuits(keystone3); err != nil {
- t.Fatalf("unable to add full circuit: %v", err)
- }
-
- // Check that all have been marked as full circuits, and that no half
- // circuits are currently being tracked.
- assertHasKeystone(t, circuitMap, keystone1.OutKey, circuit1)
- assertHasKeystone(t, circuitMap, keystone2.OutKey, circuit2)
- assertHasKeystone(t, circuitMap, keystone3.OutKey, circuit3)
- cfg, circuitMap = restartCircuitMap(t, cfg)
- assertHasKeystone(t, circuitMap, keystone1.OutKey, circuit1)
- assertHasKeystone(t, circuitMap, keystone2.OutKey, circuit2)
- assertHasKeystone(t, circuitMap, keystone3.OutKey, circuit3)
-
- // Even though a circuit was added with chan1, HTLC ID 2 as the source,
- // the lookup should go by destination channel, HTLC ID.
- invalidKeystone := htlcswitch.CircuitKey{
- ChanID: chan1,
- HtlcID: 2,
- }
- circuit = circuitMap.LookupOpenCircuit(invalidKeystone)
- if circuit != nil {
- t.Fatalf("LookupByHTLC returned a circuit without being added: %v",
- circuit)
- }
-
- circuit4 := &htlcswitch.PaymentCircuit{
- Incoming: htlcswitch.CircuitKey{
- ChanID: chan2,
- HtlcID: 3,
- },
- PaymentHash: hash1,
- ErrorEncrypter: htlcswitch.NewMockObfuscator(),
- }
- if _, err := circuitMap.CommitCircuits(circuit4); err != nil {
- t.Fatalf("unable to add half circuit: %v", err)
- }
-
- // Circuit map should still only show one circuit with hash1, since we
- // have not set the keystone for circuit4.
- assertNumCircuitsWithHash(t, circuitMap, hash1, 1)
- assertHasCircuit(t, circuitMap, circuit4)
-
- cfg, circuitMap = restartCircuitMap(t, cfg)
-
- assertNumCircuitsWithHash(t, circuitMap, hash1, 1)
- assertHasCircuit(t, circuitMap, circuit4)
-
- // Add a circuit with a destination channel and payment hash that are
- // already added but a different HTLC ID.
- keystone4 := htlcswitch.Keystone{
- InKey: circuit4.Incoming,
- OutKey: htlcswitch.CircuitKey{
- ChanID: chan1,
- HtlcID: 3,
- },
- }
- circuit4.Outgoing = &keystone4.OutKey
- if err := circuitMap.OpenCircuits(keystone4); err != nil {
- t.Fatalf("unable to add full circuit: %v", err)
- }
-
- // Verify that all circuits have been fully added.
- assertHasCircuit(t, circuitMap, circuit1)
- assertHasKeystone(t, circuitMap, keystone1.OutKey, circuit1)
- assertHasCircuit(t, circuitMap, circuit2)
- assertHasKeystone(t, circuitMap, keystone2.OutKey, circuit2)
- assertHasCircuit(t, circuitMap, circuit3)
- assertHasKeystone(t, circuitMap, keystone3.OutKey, circuit3)
- assertHasCircuit(t, circuitMap, circuit4)
- assertHasKeystone(t, circuitMap, keystone4.OutKey, circuit4)
-
- // Verify that each circuit is exposed via the proper hash bucketing.
- assertNumCircuitsWithHash(t, circuitMap, hash1, 2)
- assertHasCircuitForHash(t, circuitMap, hash1, circuit1)
- assertHasCircuitForHash(t, circuitMap, hash1, circuit4)
-
- assertNumCircuitsWithHash(t, circuitMap, hash2, 1)
- assertHasCircuitForHash(t, circuitMap, hash2, circuit2)
-
- assertNumCircuitsWithHash(t, circuitMap, hash3, 1)
- assertHasCircuitForHash(t, circuitMap, hash3, circuit3)
-
- // Restart, then run checks again.
- cfg, circuitMap = restartCircuitMap(t, cfg)
-
- // Verify that all circuits have been fully added.
- assertHasCircuit(t, circuitMap, circuit1)
- assertHasKeystone(t, circuitMap, keystone1.OutKey, circuit1)
- assertHasCircuit(t, circuitMap, circuit2)
- assertHasKeystone(t, circuitMap, keystone2.OutKey, circuit2)
- assertHasCircuit(t, circuitMap, circuit3)
- assertHasKeystone(t, circuitMap, keystone3.OutKey, circuit3)
- assertHasCircuit(t, circuitMap, circuit4)
- assertHasKeystone(t, circuitMap, keystone4.OutKey, circuit4)
-
- // Verify that each circuit is exposed via the proper hash bucketing.
- assertNumCircuitsWithHash(t, circuitMap, hash1, 2)
- assertHasCircuitForHash(t, circuitMap, hash1, circuit1)
- assertHasCircuitForHash(t, circuitMap, hash1, circuit4)
-
- assertNumCircuitsWithHash(t, circuitMap, hash2, 1)
- assertHasCircuitForHash(t, circuitMap, hash2, circuit2)
-
- assertNumCircuitsWithHash(t, circuitMap, hash3, 1)
- assertHasCircuitForHash(t, circuitMap, hash3, circuit3)
-
- // Test removing circuits and the subsequent lookups.
- err = circuitMap.DeleteCircuits(circuit1.Incoming)
- if err != nil {
- t.Fatalf("Remove returned unexpected error: %v", err)
- }
-
- // There should be exactly one remaining circuit with hash1, and it
- // should be circuit4.
- assertNumCircuitsWithHash(t, circuitMap, hash1, 1)
- assertHasCircuitForHash(t, circuitMap, hash1, circuit4)
- cfg, circuitMap = restartCircuitMap(t, cfg)
- assertNumCircuitsWithHash(t, circuitMap, hash1, 1)
- assertHasCircuitForHash(t, circuitMap, hash1, circuit4)
-
- // Removing already-removed circuit should return an error.
- err = circuitMap.DeleteCircuits(circuit1.Incoming)
- if err != nil {
- t.Fatal("Unexpected failure when deleting already "+
- "deleted circuit: %v", err)
- }
-
- // Verify that nothing related to hash1 has changed
- assertNumCircuitsWithHash(t, circuitMap, hash1, 1)
- assertHasCircuitForHash(t, circuitMap, hash1, circuit4)
-
- // Remove last remaining circuit with payment hash hash1.
- err = circuitMap.DeleteCircuits(circuit4.Incoming)
- if err != nil {
- t.Fatalf("Remove returned unexpected error: %v", err)
- }
-
- assertNumCircuitsWithHash(t, circuitMap, hash1, 0)
- assertNumCircuitsWithHash(t, circuitMap, hash2, 1)
- assertNumCircuitsWithHash(t, circuitMap, hash3, 1)
- cfg, circuitMap = restartCircuitMap(t, cfg)
- assertNumCircuitsWithHash(t, circuitMap, hash1, 0)
- assertNumCircuitsWithHash(t, circuitMap, hash2, 1)
- assertNumCircuitsWithHash(t, circuitMap, hash3, 1)
-
- // Remove last remaining circuit with payment hash hash2.
- err = circuitMap.DeleteCircuits(circuit2.Incoming)
- if err != nil {
- t.Fatalf("Remove returned unexpected error: %v", err)
- }
-
- // There should now only be one remaining circuit, with hash3.
- assertNumCircuitsWithHash(t, circuitMap, hash2, 0)
- assertNumCircuitsWithHash(t, circuitMap, hash3, 1)
- cfg, circuitMap = restartCircuitMap(t, cfg)
- assertNumCircuitsWithHash(t, circuitMap, hash2, 0)
- assertNumCircuitsWithHash(t, circuitMap, hash3, 1)
-
- // In removing the final circuit, we will try and remove all other known
- // circuits as well. Any circuits that are unknown to the circuit map
- // will be ignored, and only circuit 3 should be cause any change in the
- // state.
- err = circuitMap.DeleteCircuits(
- circuit1.Incoming, circuit2.Incoming,
- circuit3.Incoming, circuit4.Incoming,
- )
- if err != nil {
- t.Fatalf("Unexpected failure when removing circuit while also "+
- "deleting already deleted circuits: %v", err)
- }
-
- // Check that the circuit map is empty, even after restarting.
- assertNumCircuitsWithHash(t, circuitMap, hash3, 0)
- _, circuitMap = restartCircuitMap(t, cfg)
- assertNumCircuitsWithHash(t, circuitMap, hash3, 0)
-}
-
-// assertHasKeystone tests that the circuit map contains the provided payment
-// circuit.
-func assertHasKeystone(t *testing.T, cm htlcswitch.CircuitMap,
- outKey htlcswitch.CircuitKey, c *htlcswitch.PaymentCircuit) {
-
- circuit := cm.LookupOpenCircuit(outKey)
- if !equalIgnoreLFD(circuit, c) {
- t.Fatalf("unexpected circuit, want: %v, got %v", c, circuit)
- }
-}
-
-// assertHasCircuitForHash tests that the provided circuit appears in the list
-// of circuits for the given hash.
-func assertHasCircuitForHash(t *testing.T, cm htlcswitch.CircuitMap, hash [32]byte,
- circuit *htlcswitch.PaymentCircuit) {
-
- circuits := cm.LookupByPaymentHash(hash)
- for _, c := range circuits {
- if equalIgnoreLFD(c, circuit) {
- return
- }
- }
-
- t.Fatalf("unable to find circuit: %v by hash: %v", circuit, hash)
-}
-
-// assertNumCircuitsWithHash tests that the circuit has the right number of full
-// circuits, indexed by the given hash.
-func assertNumCircuitsWithHash(t *testing.T, cm htlcswitch.CircuitMap,
- hash [32]byte, expectedNum int) {
-
- circuits := cm.LookupByPaymentHash(hash)
- if len(circuits) != expectedNum {
- t.Fatalf("LookupByPaymentHash returned wrong number of circuits for "+
- "hash=%v: expecected %d, got %d", hash, expectedNum,
- len(circuits))
- }
-}
-
-// assertHasCircuit queries the circuit map using the half-circuit's half
-// key, and fails if the returned half-circuit differs from the provided one.
-func assertHasCircuit(t *testing.T, cm htlcswitch.CircuitMap,
- c *htlcswitch.PaymentCircuit) {
-
- c2 := cm.LookupCircuit(c.Incoming)
- if !equalIgnoreLFD(c, c2) {
- t.Fatalf("expected circuit: %v, got %v", c, c2)
- }
-}
-
-// equalIgnoreLFD compares two payment circuits, but ignores the current value
-// of LoadedFromDisk. The value is temporarily set to false for the comparison
-// and then restored.
-func equalIgnoreLFD(c, c2 *htlcswitch.PaymentCircuit) bool {
- ogLFD := c.LoadedFromDisk
- ogLFD2 := c2.LoadedFromDisk
-
- c.LoadedFromDisk = false
- c2.LoadedFromDisk = false
-
- isEqual := reflect.DeepEqual(c, c2)
-
- c.LoadedFromDisk = ogLFD
- c2.LoadedFromDisk = ogLFD2
-
- return isEqual
-}
-
-// makeCircuitDB initializes a new test channeldb for testing the persistence of
-// the circuit map. If an empty string is provided as a path, a temp directory
-// will be created.
-func makeCircuitDB(t *testing.T, path string) *channeldb.DB {
- if path == "" {
- var err error
- path, err = ioutil.TempDir("", "circuitdb")
- if err != nil {
- t.Fatalf("unable to create temp path: %v", err)
- }
- }
-
- db, err := channeldb.Open(path)
- if err != nil {
- t.Fatalf("unable to open channel db: %v", err)
- }
-
- return db
-}
-
-// Creates a new circuit map, backed by a freshly opened channeldb. The existing
-// channeldb is closed in order to simulate a complete restart.
-func restartCircuitMap(t *testing.T, cfg *htlcswitch.CircuitMapConfig) (
- *htlcswitch.CircuitMapConfig, htlcswitch.CircuitMap) {
-
- // Record the current temp path and close current db.
- dbPath := cfg.DB.Path()
- cfg.DB.Close()
-
- // Reinitialize circuit map with same db path.
- cfg2 := &htlcswitch.CircuitMapConfig{
- DB: makeCircuitDB(t, dbPath),
- ExtractErrorEncrypter: cfg.ExtractErrorEncrypter,
- }
- cm2, err := htlcswitch.NewCircuitMap(cfg2)
- if err != nil {
- t.Fatalf("unable to recreate persistent circuit map: %v", err)
- }
-
- return cfg2, cm2
-}
-
-// TestCircuitMapCommitCircuits tests the following behavior of CommitCircuits:
-// 1. New circuits are successfully added.
-// 2. Duplicate circuits are dropped anytime before circuit map shutsdown.
-// 3. Duplicate circuits are failed anytime after circuit map restarts.
-func TestCircuitMapCommitCircuits(t *testing.T) {
- t.Parallel()
-
- var (
- chan1 = lnwire.NewShortChanIDFromInt(1)
- circuitMap htlcswitch.CircuitMap
- err er.R
- )
-
- cfg, circuitMap := newCircuitMap(t)
-
- circuit := &htlcswitch.PaymentCircuit{
- Incoming: htlcswitch.CircuitKey{
- ChanID: chan1,
- HtlcID: 3,
- },
- ErrorEncrypter: testExtracter,
- }
-
- // First we will try to add an new circuit to the circuit map, this
- // should succeed.
- actions, err := circuitMap.CommitCircuits(circuit)
- if err != nil {
- t.Fatalf("failed to commit circuits: %v", err)
- }
- if len(actions.Drops) > 0 {
- t.Fatalf("new circuit should not have been dropped")
- }
- if len(actions.Fails) > 0 {
- t.Fatalf("new circuit should not have failed")
- }
- if len(actions.Adds) != 1 {
- t.Fatalf("only one circuit should have been added, found %d",
- len(actions.Adds))
- }
-
- circuit2 := circuitMap.LookupCircuit(circuit.Incoming)
- if !reflect.DeepEqual(circuit, circuit2) {
- t.Fatalf("unexpected committed circuit: got %v, want %v",
- circuit2, circuit)
- }
-
- // Then we will try to readd the same circuit again, this should result
- // in the circuit being dropped. This can happen if the incoming link
- // flaps.
- actions, err = circuitMap.CommitCircuits(circuit)
- if err != nil {
- t.Fatalf("failed to commit circuits: %v", err)
- }
- if len(actions.Adds) > 0 {
- t.Fatalf("duplicate circuit should not have been added to circuit map")
- }
- if len(actions.Fails) > 0 {
- t.Fatalf("duplicate circuit should not have failed")
- }
- if len(actions.Drops) != 1 {
- t.Fatalf("only one circuit should have been dropped, found %d",
- len(actions.Drops))
- }
-
- // Finally, restart the circuit map, which will cause the added circuit
- // to be loaded from disk. Since the keystone was never set, subsequent
- // attempts to commit the circuit should cause the circuit map to
- // indicate that the HTLC should be failed back.
- _, circuitMap = restartCircuitMap(t, cfg)
-
- actions, err = circuitMap.CommitCircuits(circuit)
- if err != nil {
- t.Fatalf("failed to commit circuits: %v", err)
- }
- if len(actions.Adds) > 0 {
- t.Fatalf("duplicate circuit with incomplete forwarding " +
- "decision should not have been added to circuit map")
- }
- if len(actions.Drops) > 0 {
- t.Fatalf("duplicate circuit with incomplete forwarding " +
- "decision should not have been dropped by circuit map")
- }
- if len(actions.Fails) != 1 {
- t.Fatalf("only one duplicate circuit with incomplete "+
- "forwarding decision should have been failed, found: "+
- "%d", len(actions.Fails))
- }
-
- // Lookup the committed circuit again, it should be identical apart from
- // the loaded from disk flag.
- circuit2 = circuitMap.LookupCircuit(circuit.Incoming)
- if !equalIgnoreLFD(circuit, circuit2) {
- t.Fatalf("unexpected committed circuit: got %v, want %v",
- circuit2, circuit)
- }
-}
-
-// TestCircuitMapOpenCircuits checks that circuits are properly opened, and that
-// duplicate attempts to open a circuit will result in an error.
-func TestCircuitMapOpenCircuits(t *testing.T) {
- t.Parallel()
-
- var (
- chan1 = lnwire.NewShortChanIDFromInt(1)
- chan2 = lnwire.NewShortChanIDFromInt(2)
- circuitMap htlcswitch.CircuitMap
- err er.R
- )
-
- cfg, circuitMap := newCircuitMap(t)
-
- circuit := &htlcswitch.PaymentCircuit{
- Incoming: htlcswitch.CircuitKey{
- ChanID: chan1,
- HtlcID: 3,
- },
- ErrorEncrypter: testExtracter,
- }
-
- // First we will try to add an new circuit to the circuit map, this
- // should succeed.
- _, err = circuitMap.CommitCircuits(circuit)
- if err != nil {
- t.Fatalf("failed to commit circuits: %v", err)
- }
-
- keystone := htlcswitch.Keystone{
- InKey: circuit.Incoming,
- OutKey: htlcswitch.CircuitKey{
- ChanID: chan2,
- HtlcID: 2,
- },
- }
-
- // Open the circuit for the first time.
- err = circuitMap.OpenCircuits(keystone)
- if err != nil {
- t.Fatalf("failed to open circuits: %v", err)
- }
-
- // Check that we can retrieve the open circuit if the circuit map before
- // the circuit map is restarted.
- circuit2 := circuitMap.LookupOpenCircuit(keystone.OutKey)
- if !reflect.DeepEqual(circuit, circuit2) {
- t.Fatalf("unexpected open circuit: got %v, want %v",
- circuit2, circuit)
- }
-
- if !circuit2.HasKeystone() {
- t.Fatalf("open circuit should have keystone")
- }
- if !reflect.DeepEqual(&keystone.OutKey, circuit2.Outgoing) {
- t.Fatalf("expected open circuit to have outgoing key: %v, found %v",
- &keystone.OutKey, circuit2.Outgoing)
- }
-
- // Open the circuit for a second time, which should fail due to a
- // duplicate keystone
- err = circuitMap.OpenCircuits(keystone)
- if !htlcswitch.ErrDuplicateKeystone.Is(err) {
- t.Fatalf("failed to open circuits: %v", err)
- }
-
- // Then we will try to readd the same circuit again, this should result
- // in the circuit being dropped. This can happen if the incoming link
- // flaps OR the switch is entirely restarted and the outgoing link has
- // not received a response.
- actions, err := circuitMap.CommitCircuits(circuit)
- if err != nil {
- t.Fatalf("failed to commit circuits: %v", err)
- }
- if len(actions.Adds) > 0 {
- t.Fatalf("duplicate circuit should not have been added to circuit map")
- }
- if len(actions.Fails) > 0 {
- t.Fatalf("duplicate circuit should not have failed")
- }
- if len(actions.Drops) != 1 {
- t.Fatalf("only one circuit should have been dropped, found %d",
- len(actions.Drops))
- }
-
- // Now, restart the circuit map, which will cause the opened circuit to
- // be loaded from disk. Since we set the keystone on this circuit, it
- // should be restored as such in memory.
- //
- // NOTE: The channel db doesn't have any channel data, so no keystones
- // will be trimmed.
- _, circuitMap = restartCircuitMap(t, cfg)
-
- // Check that we can still query for the open circuit.
- circuit2 = circuitMap.LookupOpenCircuit(keystone.OutKey)
- if !equalIgnoreLFD(circuit, circuit2) {
- t.Fatalf("unexpected open circuit: got %v, want %v",
- circuit2, circuit)
- }
-
- // Try to open the circuit again, we expect this to fail since the open
- // circuit was restored.
- err = circuitMap.OpenCircuits(keystone)
- if !htlcswitch.ErrDuplicateKeystone.Is(err) {
- t.Fatalf("failed to open circuits: %v", err)
- }
-
- // Lastly, with the circuit map restarted, try one more time to recommit
- // the open circuit. This should be dropped, and is expected to happen
- // if the incoming link flaps OR the switch is entirely restarted and
- // the outgoing link has not received a response.
- actions, err = circuitMap.CommitCircuits(circuit)
- if err != nil {
- t.Fatalf("failed to commit circuits: %v", err)
- }
- if len(actions.Adds) > 0 {
- t.Fatalf("duplicate circuit should not have been added to circuit map")
- }
- if len(actions.Fails) > 0 {
- t.Fatalf("duplicate circuit should not have failed")
- }
- if len(actions.Drops) != 1 {
- t.Fatalf("only one circuit should have been dropped, found %d",
- len(actions.Drops))
- }
-}
-
-func assertCircuitsOpenedPreRestart(t *testing.T,
- circuitMap htlcswitch.CircuitMap,
- circuits []*htlcswitch.PaymentCircuit,
- keystones []htlcswitch.Keystone) {
-
- for i, circuit := range circuits {
- keystone := keystones[i]
-
- openCircuit := circuitMap.LookupOpenCircuit(keystone.OutKey)
- if !reflect.DeepEqual(circuit, openCircuit) {
- t.Fatalf("unexpected open circuit %d: got %v, want %v",
- i, openCircuit, circuit)
- }
-
- if !openCircuit.HasKeystone() {
- t.Fatalf("open circuit %d should have keystone", i)
- }
- if !reflect.DeepEqual(&keystone.OutKey, openCircuit.Outgoing) {
- t.Fatalf("expected open circuit %d to have outgoing "+
- "key: %v, found %v", i,
- &keystone.OutKey, openCircuit.Outgoing)
- }
- }
-}
-
-func assertCircuitsOpenedPostRestart(t *testing.T,
- circuitMap htlcswitch.CircuitMap,
- circuits []*htlcswitch.PaymentCircuit,
- keystones []htlcswitch.Keystone) {
-
- for i, circuit := range circuits {
- keystone := keystones[i]
-
- openCircuit := circuitMap.LookupOpenCircuit(keystone.OutKey)
- if !equalIgnoreLFD(circuit, openCircuit) {
- t.Fatalf("unexpected open circuit %d: got %v, want %v",
- i, openCircuit, circuit)
- }
-
- if !openCircuit.HasKeystone() {
- t.Fatalf("open circuit %d should have keystone", i)
- }
- if !reflect.DeepEqual(&keystone.OutKey, openCircuit.Outgoing) {
- t.Fatalf("expected open circuit %d to have outgoing "+
- "key: %v, found %v", i,
- &keystone.OutKey, openCircuit.Outgoing)
- }
- }
-}
-
-func assertCircuitsNotOpenedPreRestart(t *testing.T,
- circuitMap htlcswitch.CircuitMap,
- circuits []*htlcswitch.PaymentCircuit,
- keystones []htlcswitch.Keystone,
- offset int) {
-
- for i := range circuits {
- keystone := keystones[i]
-
- openCircuit := circuitMap.LookupOpenCircuit(keystone.OutKey)
- if openCircuit != nil {
- t.Fatalf("expected circuit %d not to be open",
- offset+i)
- }
-
- circuit := circuitMap.LookupCircuit(keystone.InKey)
- if circuit == nil {
- t.Fatalf("expected to find unopened circuit %d",
- offset+i)
- }
- if circuit.HasKeystone() {
- t.Fatalf("circuit %d should not have keystone",
- offset+i)
- }
- }
-}
-
-// TestCircuitMapTrimOpenCircuits verifies that the circuit map properly removes
-// circuits from disk and the in-memory state when TrimOpenCircuits is used.
-// This test checks that a successful trim survives a restart, and that circuits
-// added before the restart can also be trimmed.
-func TestCircuitMapTrimOpenCircuits(t *testing.T) {
- t.Parallel()
-
- var (
- chan1 = lnwire.NewShortChanIDFromInt(1)
- chan2 = lnwire.NewShortChanIDFromInt(2)
- circuitMap htlcswitch.CircuitMap
- err er.R
- )
-
- cfg, circuitMap := newCircuitMap(t)
-
- const nCircuits = 10
- const firstTrimIndex = 7
- const secondTrimIndex = 3
-
- // Create a list of all circuits that will be committed in the circuit
- // map. The incoming HtlcIDs are chosen so that there is overlap with
- // the outgoing HtlcIDs, but ensures that the test is not dependent on
- // them being equal.
- circuits := make([]*htlcswitch.PaymentCircuit, nCircuits)
- for i := range circuits {
- circuits[i] = &htlcswitch.PaymentCircuit{
- Incoming: htlcswitch.CircuitKey{
- ChanID: chan1,
- HtlcID: uint64(i + 3),
- },
- ErrorEncrypter: htlcswitch.NewMockObfuscator(),
- }
- }
-
- // First we will try to add an new circuit to the circuit map, this
- // should succeed.
- _, err = circuitMap.CommitCircuits(circuits...)
- if err != nil {
- t.Fatalf("failed to commit circuits: %v", err)
- }
-
- // Now create a list of the keystones that we will use to preemptively
- // open the circuits. We set the index as the outgoing HtlcID to i
- // simplify the indexing logic of the test.
- keystones := make([]htlcswitch.Keystone, nCircuits)
- for i := range keystones {
- keystones[i] = htlcswitch.Keystone{
- InKey: circuits[i].Incoming,
- OutKey: htlcswitch.CircuitKey{
- ChanID: chan2,
- HtlcID: uint64(i),
- },
- }
- }
-
- // Open the circuits for the first time.
- err = circuitMap.OpenCircuits(keystones...)
- if err != nil {
- t.Fatalf("failed to open circuits: %v", err)
- }
-
- // Check that all circuits are marked open.
- assertCircuitsOpenedPreRestart(t, circuitMap, circuits, keystones)
-
- // Now trim up above outgoing htlcid `firstTrimIndex` (7). This should
- // leave the first 7 circuits open, and the rest should be reverted to
- // an unopened state.
- err = circuitMap.TrimOpenCircuits(chan2, firstTrimIndex)
- if err != nil {
- t.Fatalf("unable to trim circuits")
- }
-
- assertCircuitsOpenedPreRestart(t,
- circuitMap,
- circuits[:firstTrimIndex],
- keystones[:firstTrimIndex],
- )
-
- assertCircuitsNotOpenedPreRestart(
- t,
- circuitMap,
- circuits[firstTrimIndex:],
- keystones[firstTrimIndex:],
- firstTrimIndex,
- )
-
- // Restart the circuit map, verify that the trim is reflected on
- // startup.
- cfg, circuitMap = restartCircuitMap(t, cfg)
-
- assertCircuitsOpenedPostRestart(
- t,
- circuitMap,
- circuits[:firstTrimIndex],
- keystones[:firstTrimIndex],
- )
-
- assertCircuitsNotOpenedPreRestart(
- t,
- circuitMap,
- circuits[firstTrimIndex:],
- keystones[firstTrimIndex:],
- firstTrimIndex,
- )
-
- // Now, trim above outgoing htlcid `secondTrimIndex` (3). Only the first
- // three circuits should be open, with any others being reverted back to
- // unopened.
- err = circuitMap.TrimOpenCircuits(chan2, secondTrimIndex)
- if err != nil {
- t.Fatalf("unable to trim circuits")
- }
-
- assertCircuitsOpenedPostRestart(
- t,
- circuitMap,
- circuits[:secondTrimIndex],
- keystones[:secondTrimIndex],
- )
-
- assertCircuitsNotOpenedPreRestart(
- t,
- circuitMap,
- circuits[secondTrimIndex:],
- keystones[secondTrimIndex:],
- secondTrimIndex,
- )
-
- // Restart the circuit map one last time to make sure the changes are
- // persisted.
- _, circuitMap = restartCircuitMap(t, cfg)
-
- assertCircuitsOpenedPostRestart(
- t,
- circuitMap,
- circuits[:secondTrimIndex],
- keystones[:secondTrimIndex],
- )
-
- assertCircuitsNotOpenedPreRestart(
- t,
- circuitMap,
- circuits[secondTrimIndex:],
- keystones[secondTrimIndex:],
- secondTrimIndex,
- )
-}
-
-// TestCircuitMapCloseOpenCircuits asserts that the circuit map can properly
-// close open circuits, and that it allows at most one response to do so
-// successfully. It also checks that a circuit is reopened if the close was not
-// persisted via DeleteCircuits, and can again be closed.
-func TestCircuitMapCloseOpenCircuits(t *testing.T) {
- t.Parallel()
-
- var (
- chan1 = lnwire.NewShortChanIDFromInt(1)
- chan2 = lnwire.NewShortChanIDFromInt(2)
- circuitMap htlcswitch.CircuitMap
- err er.R
- )
-
- cfg, circuitMap := newCircuitMap(t)
-
- circuit := &htlcswitch.PaymentCircuit{
- Incoming: htlcswitch.CircuitKey{
- ChanID: chan1,
- HtlcID: 3,
- },
- ErrorEncrypter: &hop.SphinxErrorEncrypter{
- EphemeralKey: testEphemeralKey,
- },
- }
-
- // First we will try to add an new circuit to the circuit map, this
- // should succeed.
- _, err = circuitMap.CommitCircuits(circuit)
- if err != nil {
- t.Fatalf("failed to commit circuits: %v", err)
- }
-
- keystone := htlcswitch.Keystone{
- InKey: circuit.Incoming,
- OutKey: htlcswitch.CircuitKey{
- ChanID: chan2,
- HtlcID: 2,
- },
- }
-
- // Open the circuit for the first time.
- err = circuitMap.OpenCircuits(keystone)
- if err != nil {
- t.Fatalf("failed to open circuits: %v", err)
- }
-
- // Check that we can retrieve the open circuit if the circuit map before
- // the circuit map is restarted.
- circuit2 := circuitMap.LookupOpenCircuit(keystone.OutKey)
- if !reflect.DeepEqual(circuit, circuit2) {
- t.Fatalf("unexpected open circuit: got %v, want %v",
- circuit2, circuit)
- }
-
- // Open the circuit for a second time, which should fail due to a
- // duplicate keystone
- err = circuitMap.OpenCircuits(keystone)
- if !htlcswitch.ErrDuplicateKeystone.Is(err) {
- t.Fatalf("failed to open circuits: %v", err)
- }
-
- // Close the open circuit for the first time, which should succeed.
- _, err = circuitMap.FailCircuit(circuit.Incoming)
- if err != nil {
- t.Fatalf("unable to close unopened circuit")
- }
-
- // Closing the circuit a second time should result in a failure.
- _, err = circuitMap.FailCircuit(circuit.Incoming)
- if !htlcswitch.ErrCircuitClosing.Is(err) {
- t.Fatalf("unable to close unopened circuit")
- }
-
- // Now, restart the circuit map, which will cause the opened circuit to
- // be loaded from disk. Since we set the keystone on this circuit, it
- // should be restored as such in memory.
- //
- // NOTE: The channel db doesn't have any channel data, so no keystones
- // will be trimmed.
- _, circuitMap = restartCircuitMap(t, cfg)
-
- // Close the open circuit for the first time, which should succeed.
- _, err = circuitMap.FailCircuit(circuit.Incoming)
- if err != nil {
- t.Fatalf("unable to close unopened circuit")
- }
-
- // Closing the circuit a second time should result in a failure.
- _, err = circuitMap.FailCircuit(circuit.Incoming)
- if !htlcswitch.ErrCircuitClosing.Is(err) {
- t.Fatalf("unable to close unopened circuit")
- }
-}
-
-// TestCircuitMapCloseUnopenedCircuit tests that closing an unopened circuit
-// allows at most semantics, and that the close is not persisted across
-// restarts.
-func TestCircuitMapCloseUnopenedCircuit(t *testing.T) {
- t.Parallel()
-
- var (
- chan1 = lnwire.NewShortChanIDFromInt(1)
- circuitMap htlcswitch.CircuitMap
- err er.R
- )
-
- cfg, circuitMap := newCircuitMap(t)
-
- circuit := &htlcswitch.PaymentCircuit{
- Incoming: htlcswitch.CircuitKey{
- ChanID: chan1,
- HtlcID: 3,
- },
- ErrorEncrypter: testExtracter,
- }
-
- // First we will try to add an new circuit to the circuit map, this
- // should succeed.
- _, err = circuitMap.CommitCircuits(circuit)
- if err != nil {
- t.Fatalf("failed to commit circuits: %v", err)
- }
-
- // Close the open circuit for the first time, which should succeed.
- _, err = circuitMap.FailCircuit(circuit.Incoming)
- if err != nil {
- t.Fatalf("unable to close unopened circuit")
- }
-
- // Closing the circuit a second time should result in a failure.
- _, err = circuitMap.FailCircuit(circuit.Incoming)
- if !htlcswitch.ErrCircuitClosing.Is(err) {
- t.Fatalf("unable to close unopened circuit")
- }
-
- // Now, restart the circuit map, which will result in the circuit being
- // reopened, since no attempt to delete the circuit was made.
- _, circuitMap = restartCircuitMap(t, cfg)
-
- // Close the open circuit for the first time, which should succeed.
- _, err = circuitMap.FailCircuit(circuit.Incoming)
- if err != nil {
- t.Fatalf("unable to close unopened circuit")
- }
-
- // Closing the circuit a second time should result in a failure.
- _, err = circuitMap.FailCircuit(circuit.Incoming)
- if !htlcswitch.ErrCircuitClosing.Is(err) {
- t.Fatalf("unable to close unopened circuit")
- }
-}
-
-// TestCircuitMapDeleteUnopenedCircuit checks that an unopened circuit can be
-// removed persistently from the circuit map.
-func TestCircuitMapDeleteUnopenedCircuit(t *testing.T) {
- t.Parallel()
-
- var (
- chan1 = lnwire.NewShortChanIDFromInt(1)
- circuitMap htlcswitch.CircuitMap
- err er.R
- )
-
- cfg, circuitMap := newCircuitMap(t)
-
- circuit := &htlcswitch.PaymentCircuit{
- Incoming: htlcswitch.CircuitKey{
- ChanID: chan1,
- HtlcID: 3,
- },
- ErrorEncrypter: testExtracter,
- }
-
- // First we will try to add an new circuit to the circuit map, this
- // should succeed.
- _, err = circuitMap.CommitCircuits(circuit)
- if err != nil {
- t.Fatalf("failed to commit circuits: %v", err)
- }
-
- // Close the open circuit for the first time, which should succeed.
- _, err = circuitMap.FailCircuit(circuit.Incoming)
- if err != nil {
- t.Fatalf("unable to close unopened circuit")
- }
-
- err = circuitMap.DeleteCircuits(circuit.Incoming)
- if err != nil {
- t.Fatalf("unable to close unopened circuit")
- }
-
- // Check that we can retrieve the open circuit if the circuit map before
- // the circuit map is restarted.
- circuit2 := circuitMap.LookupCircuit(circuit.Incoming)
- if circuit2 != nil {
- t.Fatalf("unexpected open circuit: got %v, want %v",
- circuit2, nil)
- }
-
- // Now, restart the circuit map, and check that the deletion survived
- // the restart.
- _, circuitMap = restartCircuitMap(t, cfg)
-
- circuit2 = circuitMap.LookupCircuit(circuit.Incoming)
- if circuit2 != nil {
- t.Fatalf("unexpected open circuit: got %v, want %v",
- circuit2, nil)
- }
-}
-
-// TestCircuitMapDeleteUnopenedCircuit checks that an open circuit can be
-// removed persistently from the circuit map.
-func TestCircuitMapDeleteOpenCircuit(t *testing.T) {
- t.Parallel()
-
- var (
- chan1 = lnwire.NewShortChanIDFromInt(1)
- chan2 = lnwire.NewShortChanIDFromInt(2)
- circuitMap htlcswitch.CircuitMap
- err er.R
- )
-
- cfg, circuitMap := newCircuitMap(t)
-
- circuit := &htlcswitch.PaymentCircuit{
- Incoming: htlcswitch.CircuitKey{
- ChanID: chan1,
- HtlcID: 3,
- },
- ErrorEncrypter: testExtracter,
- }
-
- // First we will try to add an new circuit to the circuit map, this
- // should succeed.
- _, err = circuitMap.CommitCircuits(circuit)
- if err != nil {
- t.Fatalf("failed to commit circuits: %v", err)
- }
-
- keystone := htlcswitch.Keystone{
- InKey: circuit.Incoming,
- OutKey: htlcswitch.CircuitKey{
- ChanID: chan2,
- HtlcID: 2,
- },
- }
-
- // Open the circuit for the first time.
- err = circuitMap.OpenCircuits(keystone)
- if err != nil {
- t.Fatalf("failed to open circuits: %v", err)
- }
-
- // Close the open circuit for the first time, which should succeed.
- _, err = circuitMap.FailCircuit(circuit.Incoming)
- if err != nil {
- t.Fatalf("unable to close unopened circuit")
- }
-
- // Persistently remove the circuit identified by incoming chan id.
- err = circuitMap.DeleteCircuits(circuit.Incoming)
- if err != nil {
- t.Fatalf("unable to close unopened circuit")
- }
-
- // Check that we can no longer retrieve the open circuit.
- circuit2 := circuitMap.LookupOpenCircuit(keystone.OutKey)
- if circuit2 != nil {
- t.Fatalf("unexpected open circuit: got %v, want %v",
- circuit2, nil)
- }
-
- // Now, restart the circuit map, and check that the deletion survived
- // the restart.
- _, circuitMap = restartCircuitMap(t, cfg)
-
- circuit2 = circuitMap.LookupOpenCircuit(keystone.OutKey)
- if circuit2 != nil {
- t.Fatalf("unexpected open circuit: got %v, want %v",
- circuit2, nil)
- }
-}
diff --git a/lnd/htlcswitch/decayedlog.go b/lnd/htlcswitch/decayedlog.go
deleted file mode 100644
index 45cd6faf..00000000
--- a/lnd/htlcswitch/decayedlog.go
+++ /dev/null
@@ -1,413 +0,0 @@
-package htlcswitch
-
-import (
- "bytes"
- "encoding/binary"
- "sync"
- "sync/atomic"
-
- "github.com/pkt-cash/pktd/btcutil/er"
- sphinx "github.com/pkt-cash/pktd/lightning-onion"
- "github.com/pkt-cash/pktd/lnd/chainntnfs"
- "github.com/pkt-cash/pktd/lnd/channeldb/kvdb"
- "github.com/pkt-cash/pktd/pktlog/log"
-)
-
-const (
- // defaultDbDirectory is the default directory where our decayed log
- // will store our (sharedHash, CLTV) key-value pairs.
- defaultDbDirectory = "sharedhashes"
-)
-
-var (
- // sharedHashBucket is a bucket which houses the first HashPrefixSize
- // bytes of a received HTLC's hashed shared secret as the key and the HTLC's
- // CLTV expiry as the value.
- sharedHashBucket = []byte("shared-hash")
-
- // batchReplayBucket is a bucket that maps batch identifiers to
- // serialized ReplaySets. This is used to give idempotency in the event
- // that a batch is processed more than once.
- batchReplayBucket = []byte("batch-replay")
-)
-
-var (
- // ErrDecayedLogInit is used to indicate a decayed log failed to create
- // the proper bucketing structure on startup.
- ErrDecayedLogInit = Err.CodeWithDetail("ErrDecayedLogInit", "unable to initialize decayed log")
-
- // ErrDecayedLogCorrupted signals that the anticipated bucketing
- // structure has diverged since initialization.
- ErrDecayedLogCorrupted = Err.CodeWithDetail("ErrDecayedLogCorrupted", "decayed log structure corrupted")
-)
-
-// DecayedLog implements the PersistLog interface. It stores the first
-// HashPrefixSize bytes of a sha256-hashed shared secret along with a node's
-// CLTV value. It is a decaying log meaning there will be a garbage collector
-// to collect entries which are expired according to their stored CLTV value
-// and the current block height. DecayedLog wraps boltdb for simplicity and
-// batches writes to the database to decrease write contention.
-type DecayedLog struct {
- started int32 // To be used atomically.
- stopped int32 // To be used atomically.
-
- cfg *kvdb.BoltBackendConfig
-
- db kvdb.Backend
-
- notifier chainntnfs.ChainNotifier
-
- wg sync.WaitGroup
- quit chan struct{}
-}
-
-// NewDecayedLog creates a new DecayedLog, which caches recently seen hash
-// shared secrets. Entries are evicted as their cltv expires using block epochs
-// from the given notifier.
-func NewDecayedLog(dbPath, dbFileName string, boltCfg *kvdb.BoltConfig,
- notifier chainntnfs.ChainNotifier) *DecayedLog {
-
- cfg := &kvdb.BoltBackendConfig{
- DBPath: dbPath,
- DBFileName: dbFileName,
- NoFreelistSync: true,
- AutoCompact: boltCfg.AutoCompact,
- AutoCompactMinAge: boltCfg.AutoCompactMinAge,
- }
-
- // Use default path for log database
- if dbPath == "" {
- cfg.DBPath = defaultDbDirectory
- }
-
- return &DecayedLog{
- cfg: cfg,
- notifier: notifier,
- quit: make(chan struct{}),
- }
-}
-
-// Start opens the database we will be using to store hashed shared secrets.
-// It also starts the garbage collector in a goroutine to remove stale
-// database entries.
-func (d *DecayedLog) Start() er.R {
- if !atomic.CompareAndSwapInt32(&d.started, 0, 1) {
- return nil
- }
-
- // Open the boltdb for use.
- var err er.R
- d.db, err = kvdb.GetBoltBackend(d.cfg)
- if err != nil {
- return er.Errorf("could not open boltdb: %v", err)
- }
-
- // Initialize the primary buckets used by the decayed log.
- if err := d.initBuckets(); err != nil {
- return err
- }
-
- // Start garbage collector.
- if d.notifier != nil {
- epochClient, err := d.notifier.RegisterBlockEpochNtfn(nil)
- if err != nil {
- return er.Errorf("unable to register for epoch "+
- "notifications: %v", err)
- }
-
- d.wg.Add(1)
- go d.garbageCollector(epochClient)
- }
-
- return nil
-}
-
-// initBuckets initializes the primary buckets used by the decayed log, namely
-// the shared hash bucket, and batch replay
-func (d *DecayedLog) initBuckets() er.R {
- return kvdb.Update(d.db, func(tx kvdb.RwTx) er.R {
- _, err := tx.CreateTopLevelBucket(sharedHashBucket)
- if err != nil {
- return ErrDecayedLogInit.Default()
- }
-
- _, err = tx.CreateTopLevelBucket(batchReplayBucket)
- if err != nil {
- return ErrDecayedLogInit.Default()
- }
-
- return nil
- }, func() {})
-}
-
-// Stop halts the garbage collector and closes boltdb.
-func (d *DecayedLog) Stop() er.R {
- if !atomic.CompareAndSwapInt32(&d.stopped, 0, 1) {
- return nil
- }
-
- // Stop garbage collector.
- close(d.quit)
-
- d.wg.Wait()
-
- // Close boltdb.
- d.db.Close()
-
- return nil
-}
-
-// garbageCollector deletes entries from sharedHashBucket whose expiry height
-// has already past. This function MUST be run as a goroutine.
-func (d *DecayedLog) garbageCollector(epochClient *chainntnfs.BlockEpochEvent) {
- defer d.wg.Done()
- defer epochClient.Cancel()
-
- for {
- select {
- case epoch, ok := <-epochClient.Epochs:
- if !ok {
- // Block epoch was canceled, shutting down.
- log.Infof("Block epoch canceled, " +
- "decaying hash log shutting down")
- return
- }
-
- // Perform a bout of garbage collection using the
- // epoch's block height.
- height := uint32(epoch.Height)
- numExpired, err := d.gcExpiredHashes(height)
- if err != nil {
- log.Errorf("unable to expire hashes at "+
- "height=%d", height)
- }
-
- if numExpired > 0 {
- log.Infof("Garbage collected %v shared "+
- "secret hashes at height=%v",
- numExpired, height)
- }
-
- case <-d.quit:
- // Received shutdown request.
- log.Infof("Decaying hash log received " +
- "shutdown request")
- return
- }
- }
-}
-
-// gcExpiredHashes purges the decaying log of all entries whose CLTV expires
-// below the provided height.
-func (d *DecayedLog) gcExpiredHashes(height uint32) (uint32, er.R) {
- var numExpiredHashes uint32
-
- err := kvdb.Batch(d.db, func(tx kvdb.RwTx) er.R {
- numExpiredHashes = 0
-
- // Grab the shared hash bucket
- sharedHashes := tx.ReadWriteBucket(sharedHashBucket)
- if sharedHashes == nil {
- return er.Errorf("sharedHashBucket " +
- "is nil")
- }
-
- var expiredCltv [][]byte
- if err := sharedHashes.ForEach(func(k, v []byte) er.R {
- // Deserialize the CLTV value for this entry.
- cltv := uint32(binary.BigEndian.Uint32(v))
-
- if cltv < height {
- // This CLTV is expired. We must add it to an
- // array which we'll loop over and delete every
- // hash contained from the db.
- expiredCltv = append(expiredCltv, k)
- numExpiredHashes++
- }
-
- return nil
- }); err != nil {
- return err
- }
-
- // Delete every item in the array. This must
- // be done explicitly outside of the ForEach
- // function for safety reasons.
- for _, hash := range expiredCltv {
- err := sharedHashes.Delete(hash)
- if err != nil {
- return err
- }
- }
-
- return nil
- })
- if err != nil {
- return 0, err
- }
-
- return numExpiredHashes, nil
-}
-
-// Delete removes a key-pair from the
-// sharedHashBucket.
-func (d *DecayedLog) Delete(hash *sphinx.HashPrefix) er.R {
- return kvdb.Batch(d.db, func(tx kvdb.RwTx) er.R {
- sharedHashes := tx.ReadWriteBucket(sharedHashBucket)
- if sharedHashes == nil {
- return ErrDecayedLogCorrupted.Default()
- }
-
- return sharedHashes.Delete(hash[:])
- })
-}
-
-// Get retrieves the CLTV of a processed HTLC given the first 20 bytes of the
-// Sha-256 hash of the shared secret.
-func (d *DecayedLog) Get(hash *sphinx.HashPrefix) (uint32, er.R) {
- var value uint32
-
- err := kvdb.View(d.db, func(tx kvdb.RTx) er.R {
- // Grab the shared hash bucket which stores the mapping from
- // truncated sha-256 hashes of shared secrets to CLTV's.
- sharedHashes := tx.ReadBucket(sharedHashBucket)
- if sharedHashes == nil {
- return er.Errorf("sharedHashes is nil, could " +
- "not retrieve CLTV value")
- }
-
- // Retrieve the bytes which represents the CLTV
- valueBytes := sharedHashes.Get(hash[:])
- if valueBytes == nil {
- return sphinx.ErrLogEntryNotFound.Default()
- }
-
- // The first 4 bytes represent the CLTV, store it in value.
- value = uint32(binary.BigEndian.Uint32(valueBytes))
-
- return nil
- }, func() {
- value = 0
- })
- if err != nil {
- return value, err
- }
-
- return value, nil
-}
-
-// Put stores a shared secret hash as the key and the CLTV as the value.
-func (d *DecayedLog) Put(hash *sphinx.HashPrefix, cltv uint32) er.R {
- // Optimisitically serialize the cltv value into the scratch buffer.
- var scratch [4]byte
- binary.BigEndian.PutUint32(scratch[:], cltv)
-
- return kvdb.Batch(d.db, func(tx kvdb.RwTx) er.R {
- sharedHashes := tx.ReadWriteBucket(sharedHashBucket)
- if sharedHashes == nil {
- return ErrDecayedLogCorrupted.Default()
- }
-
- // Check to see if this hash prefix has been recorded before. If
- // a value is found, this packet is being replayed.
- valueBytes := sharedHashes.Get(hash[:])
- if valueBytes != nil {
- return sphinx.ErrReplayedPacket.Default()
- }
-
- return sharedHashes.Put(hash[:], scratch[:])
- })
-}
-
-// PutBatch accepts a pending batch of hashed secret entries to write to disk.
-// Each hashed secret is inserted with a corresponding time value, dictating
-// when the entry will be evicted from the log.
-// NOTE: This method enforces idempotency by writing the replay set obtained
-// from the first attempt for a particular batch ID, and decoding the return
-// value to subsequent calls. For the indices of the replay set to be aligned
-// properly, the batch MUST be constructed identically to the first attempt,
-// pruning will cause the indices to become invalid.
-func (d *DecayedLog) PutBatch(b *sphinx.Batch) (*sphinx.ReplaySet, er.R) {
- // Since batched boltdb txns may be executed multiple times before
- // succeeding, we will create a new replay set for each invocation to
- // avoid any side-effects. If the txn is successful, this replay set
- // will be merged with the replay set computed during batch construction
- // to generate the complete replay set. If this batch was previously
- // processed, the replay set will be deserialized from disk.
- var replays *sphinx.ReplaySet
- if err := kvdb.Batch(d.db, func(tx kvdb.RwTx) er.R {
- sharedHashes := tx.ReadWriteBucket(sharedHashBucket)
- if sharedHashes == nil {
- return ErrDecayedLogCorrupted.Default()
- }
-
- // Load the batch replay bucket, which will be used to either
- // retrieve the result of previously processing this batch, or
- // to write the result of this operation.
- batchReplayBkt := tx.ReadWriteBucket(batchReplayBucket)
- if batchReplayBkt == nil {
- return ErrDecayedLogCorrupted.Default()
- }
-
- // Check for the existence of this batch's id in the replay
- // bucket. If a non-nil value is found, this indicates that we
- // have already processed this batch before. We deserialize the
- // resulting and return it to ensure calls to put batch are
- // idempotent.
- replayBytes := batchReplayBkt.Get(b.ID)
- if replayBytes != nil {
- replays = sphinx.NewReplaySet()
- return replays.Decode(bytes.NewReader(replayBytes))
- }
-
- // The CLTV will be stored into scratch and then stored into the
- // sharedHashBucket.
- var scratch [4]byte
-
- replays = sphinx.NewReplaySet()
- err := b.ForEach(func(seqNum uint16, hashPrefix *sphinx.HashPrefix, cltv uint32) er.R {
- // Retrieve the bytes which represents the CLTV
- valueBytes := sharedHashes.Get(hashPrefix[:])
- if valueBytes != nil {
- replays.Add(seqNum)
- return nil
- }
-
- // Serialize the cltv value and write an entry keyed by
- // the hash prefix.
- binary.BigEndian.PutUint32(scratch[:], cltv)
- return sharedHashes.Put(hashPrefix[:], scratch[:])
- })
- if err != nil {
- return err
- }
-
- // Merge the replay set computed from checking the on-disk
- // entries with the in-batch replays computed during this
- // batch's construction.
- replays.Merge(b.ReplaySet)
-
- // Write the replay set under the batch identifier to the batch
- // replays bucket. This can be used during recovery to test (1)
- // that a particular batch was successfully processed and (2)
- // recover the indexes of the adds that were rejected as
- // replays.
- var replayBuf bytes.Buffer
- if err := replays.Encode(&replayBuf); err != nil {
- return err
- }
-
- return batchReplayBkt.Put(b.ID, replayBuf.Bytes())
- }); err != nil {
- return nil, err
- }
-
- b.ReplaySet = replays
- b.IsCommitted = true
-
- return replays, nil
-}
-
-// A compile time check to see if DecayedLog adheres to the PersistLog
-// interface.
-var _ sphinx.ReplayLog = (*DecayedLog)(nil)
diff --git a/lnd/htlcswitch/decayedlog_test.go b/lnd/htlcswitch/decayedlog_test.go
deleted file mode 100644
index 3a91e097..00000000
--- a/lnd/htlcswitch/decayedlog_test.go
+++ /dev/null
@@ -1,333 +0,0 @@
-package htlcswitch
-
-import (
- "crypto/rand"
- "io/ioutil"
- "os"
- "testing"
- "time"
-
- "github.com/pkt-cash/pktd/btcutil/er"
- sphinx "github.com/pkt-cash/pktd/lightning-onion"
- "github.com/pkt-cash/pktd/lnd/chainntnfs"
- "github.com/pkt-cash/pktd/lnd/channeldb/kvdb"
- "github.com/pkt-cash/pktd/lnd/lntest/mock"
-)
-
-const (
- cltv uint32 = 100000
-)
-
-// tempDecayedLogPath creates a new temporary database path to back a single
-// deccayed log instance.
-func tempDecayedLogPath(t *testing.T) (string, string) {
- dir, err := ioutil.TempDir("", "decayedlog")
- if err != nil {
- t.Fatalf("unable to create temporary decayed log dir: %v", err)
- }
-
- return dir, "sphinxreplay.db"
-}
-
-// startup sets up the DecayedLog and possibly the garbage collector.
-func startup(dbPath, dbFileName string, notifier bool) (sphinx.ReplayLog,
- *mock.ChainNotifier, *sphinx.HashPrefix, er.R) {
-
- var log sphinx.ReplayLog
- var chainNotifier *mock.ChainNotifier
- if notifier {
-
- // Create the MockNotifier which triggers the garbage collector
- chainNotifier = &mock.ChainNotifier{
- SpendChan: make(chan *chainntnfs.SpendDetail),
- EpochChan: make(chan *chainntnfs.BlockEpoch, 1),
- ConfChan: make(chan *chainntnfs.TxConfirmation),
- }
-
- // Initialize the DecayedLog object
- log = NewDecayedLog(
- dbPath, dbFileName, &kvdb.BoltConfig{}, chainNotifier,
- )
- } else {
- // Initialize the DecayedLog object
- log = NewDecayedLog(dbPath, dbFileName, &kvdb.BoltConfig{}, nil)
- }
-
- // Open the channeldb (start the garbage collector)
- err := log.Start()
- if err != nil {
- return nil, nil, nil, err
- }
-
- // Create a HashPrefix identifier for a packet. Instead of actually
- // generating an ECDH secret and hashing it, simulate with random bytes.
- // This is used as a key to retrieve the cltv value.
- var hashedSecret sphinx.HashPrefix
- _, errr := rand.Read(hashedSecret[:])
- if errr != nil {
- return nil, nil, nil, er.E(errr)
- }
-
- return log, chainNotifier, &hashedSecret, nil
-}
-
-// shutdown deletes the temporary directory that the test database uses
-// and handles closing the database.
-func shutdown(dir string, d sphinx.ReplayLog) {
- d.Stop()
- os.RemoveAll(dir)
-}
-
-// TestDecayedLogGarbageCollector tests the ability of the garbage collector
-// to delete expired cltv values every time a block is received. Expired cltv
-// values are cltv values that are < current block height.
-func TestDecayedLogGarbageCollector(t *testing.T) {
- t.Parallel()
-
- dbPath, dbFileName := tempDecayedLogPath(t)
-
- d, notifier, hashedSecret, err := startup(dbPath, dbFileName, true)
- if err != nil {
- t.Fatalf("Unable to start up DecayedLog: %v", err)
- }
- defer shutdown(dbPath, d)
-
- // Store in the sharedHashBucket.
- err = d.Put(hashedSecret, cltv)
- if err != nil {
- t.Fatalf("Unable to store in channeldb: %v", err)
- }
-
- // Wait for database write (GC is in a goroutine)
- time.Sleep(500 * time.Millisecond)
-
- // Send block notifications to garbage collector. The garbage collector
- // should remove the entry by block 100001.
-
- // Send block 100000
- notifier.EpochChan <- &chainntnfs.BlockEpoch{
- Height: 100000,
- }
-
- // Assert that hashedSecret is still in the sharedHashBucket
- val, err := d.Get(hashedSecret)
- if err != nil {
- t.Fatalf("Get failed - received an error upon Get: %v", err)
- }
-
- if val != cltv {
- t.Fatalf("GC incorrectly deleted CLTV")
- }
-
- // Send block 100001 (expiry block)
- notifier.EpochChan <- &chainntnfs.BlockEpoch{
- Height: 100001,
- }
-
- // Wait for database write (GC is in a goroutine)
- time.Sleep(500 * time.Millisecond)
-
- // Assert that hashedSecret is not in the sharedHashBucket
- _, err = d.Get(hashedSecret)
- if err == nil {
- t.Fatalf("CLTV was not deleted")
- }
- if !sphinx.ErrLogEntryNotFound.Is(err) {
- t.Fatalf("Get failed - received unexpected error upon Get: %v", err)
- }
-}
-
-// TestDecayedLogPersistentGarbageCollector tests the persistence property of
-// the garbage collector. The garbage collector will be restarted immediately and
-// a block that expires the stored CLTV value will be sent to the ChainNotifier.
-// We test that this causes the pair to be deleted even
-// on GC restarts.
-func TestDecayedLogPersistentGarbageCollector(t *testing.T) {
- t.Parallel()
-
- dbPath, dbFileName := tempDecayedLogPath(t)
-
- d, _, hashedSecret, err := startup(dbPath, dbFileName, true)
- if err != nil {
- t.Fatalf("Unable to start up DecayedLog: %v", err)
- }
- defer shutdown(dbPath, d)
-
- // Store in the sharedHashBucket
- if err = d.Put(hashedSecret, cltv); err != nil {
- t.Fatalf("Unable to store in channeldb: %v", err)
- }
-
- // The hash prefix should be retrievable from the decayed log.
- _, err = d.Get(hashedSecret)
- if err != nil {
- t.Fatalf("Get failed - received unexpected error upon Get: %v", err)
- }
-
- // Shut down DecayedLog and the garbage collector along with it.
- d.Stop()
-
- d2, notifier2, _, err := startup(dbPath, dbFileName, true)
- if err != nil {
- t.Fatalf("Unable to restart DecayedLog: %v", err)
- }
- defer shutdown(dbPath, d2)
-
- // Check that the hash prefix still exists in the new db instance.
- _, err = d2.Get(hashedSecret)
- if err != nil {
- t.Fatalf("Get failed - received unexpected error upon Get: %v", err)
- }
-
- // Send a block notification to the garbage collector that expires
- // the stored CLTV.
- notifier2.EpochChan <- &chainntnfs.BlockEpoch{
- Height: int32(100001),
- }
-
- // Wait for database write (GC is in a goroutine)
- time.Sleep(500 * time.Millisecond)
-
- // Assert that hashedSecret is not in the sharedHashBucket
- _, err = d2.Get(hashedSecret)
- if !sphinx.ErrLogEntryNotFound.Is(err) {
- t.Fatalf("Get failed - received unexpected error upon Get: %v", err)
- }
-}
-
-// TestDecayedLogInsertionAndRetrieval inserts a cltv value into the
-// sharedHashBucket and then deletes it and finally asserts that we can no
-// longer retrieve it.
-func TestDecayedLogInsertionAndDeletion(t *testing.T) {
- t.Parallel()
-
- dbPath, dbFileName := tempDecayedLogPath(t)
-
- d, _, hashedSecret, err := startup(dbPath, dbFileName, false)
- if err != nil {
- t.Fatalf("Unable to start up DecayedLog: %v", err)
- }
- defer shutdown(dbPath, d)
-
- // Store in the sharedHashBucket.
- err = d.Put(hashedSecret, cltv)
- if err != nil {
- t.Fatalf("Unable to store in channeldb: %v", err)
- }
-
- // Delete hashedSecret from the sharedHashBucket.
- err = d.Delete(hashedSecret)
- if err != nil {
- t.Fatalf("Unable to delete from channeldb: %v", err)
- }
-
- // Assert that hashedSecret is not in the sharedHashBucket
- _, err = d.Get(hashedSecret)
- if err == nil {
- t.Fatalf("CLTV was not deleted")
- }
- if !sphinx.ErrLogEntryNotFound.Is(err) {
- t.Fatalf("Get failed - received unexpected error upon Get: %v", err)
- }
-}
-
-// TestDecayedLogStartAndStop tests for persistence. The DecayedLog is started,
-// a cltv value is stored in the sharedHashBucket, and then it the DecayedLog
-// is stopped. The DecayedLog is then started up again and we test that the
-// cltv value is indeed still stored in the sharedHashBucket. We then delete
-// the cltv value and check that it persists upon startup.
-func TestDecayedLogStartAndStop(t *testing.T) {
- t.Parallel()
-
- dbPath, dbFileName := tempDecayedLogPath(t)
-
- d, _, hashedSecret, err := startup(dbPath, dbFileName, false)
- if err != nil {
- t.Fatalf("Unable to start up DecayedLog: %v", err)
- }
- defer shutdown(dbPath, d)
-
- // Store in the sharedHashBucket.
- err = d.Put(hashedSecret, cltv)
- if err != nil {
- t.Fatalf("Unable to store in channeldb: %v", err)
- }
-
- // Shutdown the DecayedLog's channeldb
- d.Stop()
-
- d2, _, hashedSecret2, err := startup(dbPath, dbFileName, false)
- if err != nil {
- t.Fatalf("Unable to restart DecayedLog: %v", err)
- }
- defer shutdown(dbPath, d2)
-
- // Retrieve the stored cltv value given the hashedSecret key.
- value, err := d2.Get(hashedSecret)
- if err != nil {
- t.Fatalf("Unable to retrieve from channeldb: %v", err)
- }
-
- // Check that the original cltv value matches the retrieved cltv
- // value.
- if cltv != value {
- t.Fatalf("Value retrieved doesn't match value stored")
- }
-
- // Delete hashedSecret from sharedHashBucket
- err = d2.Delete(hashedSecret2)
- if err != nil {
- t.Fatalf("Unable to delete from channeldb: %v", err)
- }
-
- // Shutdown the DecayedLog's channeldb
- d2.Stop()
-
- d3, _, hashedSecret3, err := startup(dbPath, dbFileName, false)
- if err != nil {
- t.Fatalf("Unable to restart DecayedLog: %v", err)
- }
- defer shutdown(dbPath, d3)
-
- // Assert that hashedSecret is not in the sharedHashBucket
- _, err = d3.Get(hashedSecret3)
- if err == nil {
- t.Fatalf("CLTV was not deleted")
- }
- if !sphinx.ErrLogEntryNotFound.Is(err) {
- t.Fatalf("Get failed - received unexpected error upon Get: %v", err)
- }
-}
-
-// TestDecayedLogStorageAndRetrieval stores a cltv value and then retrieves it
-// via the nested sharedHashBucket and finally asserts that the original stored
-// and retrieved cltv values are equal.
-func TestDecayedLogStorageAndRetrieval(t *testing.T) {
- t.Parallel()
-
- dbPath, dbFileName := tempDecayedLogPath(t)
-
- d, _, hashedSecret, err := startup(dbPath, dbFileName, false)
- if err != nil {
- t.Fatalf("Unable to start up DecayedLog: %v", err)
- }
- defer shutdown(dbPath, d)
-
- // Store in the sharedHashBucket
- err = d.Put(hashedSecret, cltv)
- if err != nil {
- t.Fatalf("Unable to store in channeldb: %v", err)
- }
-
- // Retrieve the stored cltv value given the hashedSecret key.
- value, err := d.Get(hashedSecret)
- if err != nil {
- t.Fatalf("Unable to retrieve from channeldb: %v", err)
- }
-
- // If the original cltv value does not match the value retrieved,
- // then the test failed.
- if cltv != value {
- t.Fatalf("Value retrieved doesn't match value stored")
- }
-}
diff --git a/lnd/htlcswitch/failure.go b/lnd/htlcswitch/failure.go
deleted file mode 100644
index f98ba2f9..00000000
--- a/lnd/htlcswitch/failure.go
+++ /dev/null
@@ -1,193 +0,0 @@
-package htlcswitch
-
-import (
- "bytes"
- "fmt"
-
- "github.com/pkt-cash/pktd/btcutil/er"
- sphinx "github.com/pkt-cash/pktd/lightning-onion"
- "github.com/pkt-cash/pktd/lnd/lnwire"
-)
-
-var (
- ErrUnknownEncrypterType = Err.Code("ErrUnknownEncrypterType")
-)
-
-// ClearTextError is an interface which is implemented by errors that occur
-// when we know the underlying wire failure message. These errors are the
-// opposite to opaque errors which are onion-encrypted blobs only understandable
-// to the initiating node. ClearTextErrors are used when we fail a htlc at our
-// node, or one of our initiated payments failed and we can decrypt the onion
-// encrypted error fully.
-type ClearTextError interface {
- error
-
- // WireMessage extracts a valid wire failure message from an internal
- // error which may contain additional metadata (which should not be
- // exposed to the network). This value may be nil in the case where
- // an unknown wire error is returned by one of our peers.
- WireMessage() lnwire.FailureMessage
-}
-
-// LinkError is an implementation of the ClearTextError interface which
-// represents failures that occur on our incoming or outgoing link.
-type LinkError struct {
- // msg returns the wire failure associated with the error.
- // This value should *not* be nil, because we should always
- // know the failure type for failures which occur at our own
- // node.
- msg lnwire.FailureMessage
-
- // FailureDetail enriches the wire error with additional information.
- FailureDetail
-}
-
-// NewLinkError returns a LinkError with the failure message provided.
-// The failure message provided should *not* be nil, because we should
-// always know the failure type for failures which occur at our own node.
-func NewLinkError(msg lnwire.FailureMessage) *LinkError {
- return &LinkError{msg: msg}
-}
-
-// NewDetailedLinkError returns a link error that enriches a wire message with
-// a failure detail.
-func NewDetailedLinkError(msg lnwire.FailureMessage,
- detail FailureDetail) *LinkError {
-
- return &LinkError{
- msg: msg,
- FailureDetail: detail,
- }
-}
-
-// WireMessage extracts a valid wire failure message from an internal
-// error which may contain additional metadata (which should not be
-// exposed to the network). This value should never be nil for LinkErrors,
-// because we are the ones failing the htlc.
-//
-// Note this is part of the ClearTextError interface.
-func (l *LinkError) WireMessage() lnwire.FailureMessage {
- return l.msg
-}
-
-// Error returns the string representation of a link error.
-//
-// Note this is part of the ClearTextError interface.
-func (l *LinkError) Error() string {
- // If the link error has no failure detail, return the wire message's
- // error.
- if l.FailureDetail == nil {
- return l.msg.Error()
- }
-
- return l.FailureDetail.FailureString()
-}
-
-// ForwardingError wraps an lnwire.FailureMessage in a struct that also
-// includes the source of the error.
-type ForwardingError struct {
- // FailureSourceIdx is the index of the node that sent the failure. With
- // this information, the dispatcher of a payment can modify their set of
- // candidate routes in response to the type of failure extracted. Index
- // zero is the self node.
- FailureSourceIdx int
-
- // msg is the wire message associated with the error. This value may
- // be nil in the case where we fail to decode failure message sent by
- // a peer.
- msg lnwire.FailureMessage
-}
-
-// WireMessage extracts a valid wire failure message from an internal
-// error which may contain additional metadata (which should not be
-// exposed to the network). This value may be nil in the case where
-// an unknown wire error is returned by one of our peers.
-//
-// Note this is part of the ClearTextError interface.
-func (f *ForwardingError) WireMessage() lnwire.FailureMessage {
- return f.msg
-}
-
-// Error implements the built-in error interface. We use this method to allow
-// the switch or any callers to insert additional context to the error message
-// returned.
-func (f *ForwardingError) Error() string {
- return fmt.Sprintf(
- "%v@%v", f.msg, f.FailureSourceIdx,
- )
-}
-
-// NewForwardingError creates a new payment error which wraps a wire error
-// with additional metadata.
-func NewForwardingError(failure lnwire.FailureMessage,
- index int) *ForwardingError {
-
- return &ForwardingError{
- FailureSourceIdx: index,
- msg: failure,
- }
-}
-
-// NewUnknownForwardingError returns a forwarding error which has a nil failure
-// message. This constructor should only be used in the case where we cannot
-// decode the failure we have received from a peer.
-func NewUnknownForwardingError(index int) *ForwardingError {
- return &ForwardingError{
- FailureSourceIdx: index,
- }
-}
-
-// ErrorDecrypter is an interface that is used to decrypt the onion encrypted
-// failure reason an extra out a well formed error.
-type ErrorDecrypter interface {
- // DecryptError peels off each layer of onion encryption from the first
- // hop, to the source of the error. A fully populated
- // lnwire.FailureMessage is returned along with the source of the
- // error.
- DecryptError(lnwire.OpaqueReason) (*ForwardingError, er.R)
-}
-
-// OnionErrorDecrypter is the interface that provides onion level error
-// decryption.
-type OnionErrorDecrypter interface {
- // DecryptError attempts to decrypt the passed encrypted error response.
- // The onion failure is encrypted in backward manner, starting from the
- // node where error have occurred. As a result, in order to decrypt the
- // error we need get all shared secret and apply decryption in the
- // reverse order.
- DecryptError(encryptedData []byte) (*sphinx.DecryptedError, er.R)
-}
-
-// SphinxErrorDecrypter wraps the sphinx data SphinxErrorDecrypter and maps the
-// returned errors to concrete lnwire.FailureMessage instances.
-type SphinxErrorDecrypter struct {
- OnionErrorDecrypter
-}
-
-// DecryptError peels off each layer of onion encryption from the first hop, to
-// the source of the error. A fully populated lnwire.FailureMessage is returned
-// along with the source of the error.
-//
-// NOTE: Part of the ErrorDecrypter interface.
-func (s *SphinxErrorDecrypter) DecryptError(reason lnwire.OpaqueReason) (
- *ForwardingError, er.R) {
-
- failure, err := s.OnionErrorDecrypter.DecryptError(reason)
- if err != nil {
- return nil, err
- }
-
- // Decode the failure. If an error occurs, we leave the failure message
- // field nil.
- r := bytes.NewReader(failure.Message)
- failureMsg, err := lnwire.DecodeFailure(r, 0)
- if err != nil {
- return NewUnknownForwardingError(failure.SenderIdx), nil
- }
-
- return NewForwardingError(failureMsg, failure.SenderIdx), nil
-}
-
-// A compile time check to ensure ErrorDecrypter implements the Deobfuscator
-// interface.
-var _ ErrorDecrypter = (*SphinxErrorDecrypter)(nil)
diff --git a/lnd/htlcswitch/failure_detail.go b/lnd/htlcswitch/failure_detail.go
deleted file mode 100644
index 341688d1..00000000
--- a/lnd/htlcswitch/failure_detail.go
+++ /dev/null
@@ -1,97 +0,0 @@
-package htlcswitch
-
-// FailureDetail is an interface implemented by failures that occur on
-// our incoming or outgoing link, or within the switch itself.
-type FailureDetail interface {
- // FailureString returns the string representation of a failure
- // detail.
- FailureString() string
-}
-
-// OutgoingFailure is an enum which is used to enrich failures which occur in
-// the switch or on our outgoing link with additional metadata.
-type OutgoingFailure int
-
-const (
- // OutgoingFailureNone is returned when the wire message contains
- // sufficient information.
- OutgoingFailureNone OutgoingFailure = iota
-
- // OutgoingFailureDecodeError indicates that we could not decode the
- // failure reason provided for a failed payment.
- OutgoingFailureDecodeError
-
- // OutgoingFailureLinkNotEligible indicates that a routing attempt was
- // made over a link that is not eligible for routing.
- OutgoingFailureLinkNotEligible
-
- // OutgoingFailureOnChainTimeout indicates that a payment had to be
- // timed out on chain before it got past the first hop by us or the
- // remote party.
- OutgoingFailureOnChainTimeout
-
- // OutgoingFailureHTLCExceedsMax is returned when a htlc exceeds our
- // policy's maximum htlc amount.
- OutgoingFailureHTLCExceedsMax
-
- // OutgoingFailureInsufficientBalance is returned when we cannot route a
- // htlc due to insufficient outgoing capacity.
- OutgoingFailureInsufficientBalance
-
- // OutgoingFailureCircularRoute is returned when an attempt is made
- // to forward a htlc through our node which arrives and leaves on the
- // same channel.
- OutgoingFailureCircularRoute
-
- // OutgoingFailureIncompleteForward is returned when we cancel an incomplete
- // forward.
- OutgoingFailureIncompleteForward
-
- // OutgoingFailureDownstreamHtlcAdd is returned when we fail to add a
- // downstream htlc to our outgoing link.
- OutgoingFailureDownstreamHtlcAdd
-
- // OutgoingFailureForwardsDisabled is returned when the switch is
- // configured to disallow forwards.
- OutgoingFailureForwardsDisabled
-)
-
-// FailureString returns the string representation of a failure detail.
-//
-// Note: it is part of the FailureDetail interface.
-func (fd OutgoingFailure) FailureString() string {
- switch fd {
- case OutgoingFailureNone:
- return "no failure detail"
-
- case OutgoingFailureDecodeError:
- return "could not decode wire failure"
-
- case OutgoingFailureLinkNotEligible:
- return "link not eligible"
-
- case OutgoingFailureOnChainTimeout:
- return "payment was resolved on-chain, then canceled back"
-
- case OutgoingFailureHTLCExceedsMax:
- return "htlc exceeds maximum policy amount"
-
- case OutgoingFailureInsufficientBalance:
- return "insufficient bandwidth to route htlc"
-
- case OutgoingFailureCircularRoute:
- return "same incoming and outgoing channel"
-
- case OutgoingFailureIncompleteForward:
- return "failed after detecting incomplete forward"
-
- case OutgoingFailureDownstreamHtlcAdd:
- return "could not add downstream htlc"
-
- case OutgoingFailureForwardsDisabled:
- return "node configured to disallow forwards"
-
- default:
- return "unknown failure detail"
- }
-}
diff --git a/lnd/htlcswitch/hodl/config_dev.go b/lnd/htlcswitch/hodl/config_dev.go
deleted file mode 100644
index ef8389ad..00000000
--- a/lnd/htlcswitch/hodl/config_dev.go
+++ /dev/null
@@ -1,67 +0,0 @@
-// +build dev
-
-package hodl
-
-// Config is a struct enumerating the possible command line flags that are used
-// to activate specific hodl modes.
-//
-// NOTE: THESE FLAGS ARE INTENDED FOR TESTING PURPOSES ONLY. ACTIVATING THESE
-// FLAGS IN PRODUCTION WILL VIOLATE CRITICAL ASSUMPTIONS MADE BY THIS SOFTWARE.
-type Config struct {
- ExitSettle bool `long:"exit-settle" description:"Instructs the node to drop ADDs for which it is the exit node, and to not settle back to the sender"`
-
- AddIncoming bool `long:"add-incoming" description:"Instructs the node to drop incoming ADDs before processing them in the incoming link"`
-
- SettleIncoming bool `long:"settle-incoming" description:"Instructs the node to drop incoming SETTLEs before processing them in the incoming link"`
-
- FailIncoming bool `long:"fail-incoming" description:"Instructs the node to drop incoming FAILs before processing them in the incoming link"`
-
- AddOutgoing bool `long:"add-outgoing" description:"Instructs the node to drop outgoing ADDs before applying them to the channel state"`
-
- SettleOutgoing bool `long:"settle-outgoing" description:"Instructs the node to drop outgoing SETTLEs before applying them to the channel state"`
-
- FailOutgoing bool `long:"fail-outgoing" description:"Instructs the node to drop outgoing FAILs before applying them to the channel state"`
-
- Commit bool `long:"commit" description:"Instructs the node to add HTLCs to its local commitment state and to open circuits for any ADDs, but abort before committing the changes"`
-
- BogusSettle bool `long:"bogus-settle" description:"Instructs the node to settle back any incoming HTLC with a bogus preimage"`
-}
-
-// Mask extracts the flags specified in the configuration, composing a Mask from
-// the active flags.
-func (c *Config) Mask() Mask {
- var flags []Flag
-
- if c.ExitSettle {
- flags = append(flags, ExitSettle)
- }
- if c.AddIncoming {
- flags = append(flags, AddIncoming)
- }
- if c.SettleIncoming {
- flags = append(flags, SettleIncoming)
- }
- if c.FailIncoming {
- flags = append(flags, FailIncoming)
- }
- if c.AddOutgoing {
- flags = append(flags, AddOutgoing)
- }
- if c.SettleOutgoing {
- flags = append(flags, SettleOutgoing)
- }
- if c.FailOutgoing {
- flags = append(flags, FailOutgoing)
- }
- if c.Commit {
- flags = append(flags, Commit)
- }
- if c.BogusSettle {
- flags = append(flags, BogusSettle)
- }
-
- // NOTE: The value returned here will only honor the configuration if
- // the dev build flag is present. In production, this method always
- // returns hodl.MaskNone and Active(*) always returns false.
- return MaskFromFlags(flags...)
-}
diff --git a/lnd/htlcswitch/hodl/config_prod.go b/lnd/htlcswitch/hodl/config_prod.go
deleted file mode 100644
index c5e9e934..00000000
--- a/lnd/htlcswitch/hodl/config_prod.go
+++ /dev/null
@@ -1,11 +0,0 @@
-// +build !dev
-
-package hodl
-
-// Config is an empty struct disabling command line hodl flags in production.
-type Config struct{}
-
-// Mask in production always returns MaskNone.
-func (c *Config) Mask() Mask {
- return MaskNone
-}
diff --git a/lnd/htlcswitch/hodl/flags.go b/lnd/htlcswitch/hodl/flags.go
deleted file mode 100644
index 7fed7d09..00000000
--- a/lnd/htlcswitch/hodl/flags.go
+++ /dev/null
@@ -1,119 +0,0 @@
-package hodl
-
-import "fmt"
-
-// MaskNone represents the empty Mask, in which no breakpoints are
-// active.
-const MaskNone = Mask(0)
-
-type (
- // Flag represents a single breakpoint where an HTLC should be dropped
- // during forwarding. Flags can be composed into a Mask to express more
- // complex combinations.
- Flag uint32
-
- // Mask is a bitvector combining multiple Flags that can be queried to
- // see which breakpoints are active.
- Mask uint32
-)
-
-const (
- // ExitSettle drops an incoming ADD for which we are the exit node,
- // before processing in the link.
- ExitSettle Flag = 1 << iota
-
- // AddIncoming drops an incoming ADD before processing if we are not
- // the exit node.
- AddIncoming
-
- // SettleIncoming drops an incoming SETTLE before processing if we
- // are not the exit node.
- SettleIncoming
-
- // FailIncoming drops an incoming FAIL before processing if we are
- // not the exit node.
- FailIncoming
-
- // TODO(conner): add modes for switch breakpoints
-
- // AddOutgoing drops an outgoing ADD before it is added to the
- // in-memory commitment state of the link.
- AddOutgoing
-
- // SettleOutgoing drops an SETTLE before it is added to the
- // in-memory commitment state of the link.
- SettleOutgoing
-
- // FailOutgoing drops an outgoing FAIL before is is added to the
- // in-memory commitment state of the link.
- FailOutgoing
-
- // Commit drops all HTLC after any outgoing circuits have been
- // opened, but before the in-memory commitment state is persisted.
- Commit
-
- // BogusSettle attempts to settle back any incoming HTLC for which we
- // are the exit node with a bogus preimage.
- BogusSettle
-)
-
-// String returns a human-readable identifier for a given Flag.
-func (f Flag) String() string {
- switch f {
- case ExitSettle:
- return "ExitSettle"
- case AddIncoming:
- return "AddIncoming"
- case SettleIncoming:
- return "SettleIncoming"
- case FailIncoming:
- return "FailIncoming"
- case AddOutgoing:
- return "AddOutgoing"
- case SettleOutgoing:
- return "SettleOutgoing"
- case FailOutgoing:
- return "FailOutgoing"
- case Commit:
- return "Commit"
- case BogusSettle:
- return "BogusSettle"
- default:
- return "UnknownHodlFlag"
- }
-}
-
-// Warning generates a warning message to log if a particular breakpoint is
-// triggered during execution.
-func (f Flag) Warning() string {
- var msg string
- switch f {
- case ExitSettle:
- msg = "will not attempt to settle ADD with sender"
- case AddIncoming:
- msg = "will not attempt to forward ADD to switch"
- case SettleIncoming:
- msg = "will not attempt to forward SETTLE to switch"
- case FailIncoming:
- msg = "will not attempt to forward FAIL to switch"
- case AddOutgoing:
- msg = "will not update channel state with downstream ADD"
- case SettleOutgoing:
- msg = "will not update channel state with downstream SETTLE"
- case FailOutgoing:
- msg = "will not update channel state with downstream FAIL"
- case Commit:
- msg = "will not commit pending channel updates"
- case BogusSettle:
- msg = "will settle HTLC with bogus preimage"
- default:
- msg = "incorrect hodl flag usage"
- }
-
- return fmt.Sprintf("%s mode enabled -- %s", f, msg)
-}
-
-// Mask returns the Mask consisting solely of this Flag.
-func (f Flag) Mask() Mask {
- return Mask(f)
-}
diff --git a/lnd/htlcswitch/hodl/mask_dev.go b/lnd/htlcswitch/hodl/mask_dev.go
deleted file mode 100644
index a1d50ff0..00000000
--- a/lnd/htlcswitch/hodl/mask_dev.go
+++ /dev/null
@@ -1,41 +0,0 @@
-// +build dev
-
-package hodl
-
-import (
- "fmt"
- "strings"
-)
-
-// MaskFromFlags merges a variadic set of Flags into a single Mask.
-func MaskFromFlags(flags ...Flag) Mask {
- var mask Mask
- for _, flag := range flags {
- mask |= Mask(flag)
- }
-
- return mask
-}
-
-// Active returns true if the bit corresponding to the flag is set within the
-// mask.
-func (m Mask) Active(flag Flag) bool {
- return (Flag(m) & flag) > 0
-}
-
-// String returns a human-readable description of all active Flags.
-func (m Mask) String() string {
- if m == MaskNone {
- return "hodl.Mask(NONE)"
- }
-
- var activeFlags []string
- for i := uint(0); i < 32; i++ {
- flag := Flag(1 << i)
- if m.Active(flag) {
- activeFlags = append(activeFlags, flag.String())
- }
- }
-
- return fmt.Sprintf("hodl.Mask(%s)", strings.Join(activeFlags, "|"))
-}
diff --git a/lnd/htlcswitch/hodl/mask_prod.go b/lnd/htlcswitch/hodl/mask_prod.go
deleted file mode 100644
index 1b8a4b70..00000000
--- a/lnd/htlcswitch/hodl/mask_prod.go
+++ /dev/null
@@ -1,18 +0,0 @@
-// +build !dev
-
-package hodl
-
-// MaskFromFlags in production always returns MaskNone.
-func MaskFromFlags(_ ...Flag) Mask {
- return MaskNone
-}
-
-// Active in production always returns false for all Flags.
-func (m Mask) Active(_ Flag) bool {
- return false
-}
-
-// String returns the human-readable identifier for MaskNone.
-func (m Mask) String() string {
- return "hodl.Mask(NONE)"
-}
diff --git a/lnd/htlcswitch/hodl/mask_test.go b/lnd/htlcswitch/hodl/mask_test.go
deleted file mode 100644
index 40730d99..00000000
--- a/lnd/htlcswitch/hodl/mask_test.go
+++ /dev/null
@@ -1,113 +0,0 @@
-package hodl_test
-
-import (
- "testing"
-
- "github.com/pkt-cash/pktd/lnd/build"
- "github.com/pkt-cash/pktd/lnd/htlcswitch/hodl"
-)
-
-var hodlMaskTests = []struct {
- mask hodl.Mask
- flags map[hodl.Flag]struct{}
-}{
- {
- // Check that the empty mask has no active flags.
- mask: hodl.MaskNone,
- flags: map[hodl.Flag]struct{}{},
- },
- {
- // Check that passing no arguments to MaskFromFlags is
- // equivalent to MaskNone.
- mask: hodl.MaskFromFlags(),
- flags: map[hodl.Flag]struct{}{},
- },
-
- {
- // Check using Mask to convert a single flag into a Mask only
- // reports that flag active.
- mask: hodl.ExitSettle.Mask(),
- flags: map[hodl.Flag]struct{}{
- hodl.ExitSettle: {},
- },
- },
- {
- // Check that using MaskFromFlags on a single flag only reports
- // that flag active.
- mask: hodl.MaskFromFlags(hodl.Commit),
- flags: map[hodl.Flag]struct{}{
- hodl.Commit: {},
- },
- },
-
- {
- // Check that using MaskFromFlags on some-but-not-all flags
- // reports the correct subset of flags as active.
- mask: hodl.MaskFromFlags(
- hodl.ExitSettle,
- hodl.Commit,
- hodl.AddIncoming,
- hodl.SettleOutgoing,
- ),
- flags: map[hodl.Flag]struct{}{
- hodl.ExitSettle: {},
- hodl.Commit: {},
- hodl.AddIncoming: {},
- hodl.SettleOutgoing: {},
- },
- },
- {
- // Check that using MaskFromFlags on all known flags reports
- // those an no other flags.
- mask: hodl.MaskFromFlags(
- hodl.ExitSettle,
- hodl.AddIncoming,
- hodl.SettleIncoming,
- hodl.FailIncoming,
- hodl.AddOutgoing,
- hodl.SettleOutgoing,
- hodl.FailOutgoing,
- hodl.Commit,
- hodl.BogusSettle,
- ),
- flags: map[hodl.Flag]struct{}{
- hodl.ExitSettle: {},
- hodl.AddIncoming: {},
- hodl.SettleIncoming: {},
- hodl.FailIncoming: {},
- hodl.AddOutgoing: {},
- hodl.SettleOutgoing: {},
- hodl.FailOutgoing: {},
- hodl.Commit: {},
- hodl.BogusSettle: {},
- },
- },
-}
-
-// TestMask iterates through all of the hodlMaskTests, checking that the mask
-// correctly reports active for flags in the tests' expected flags, and inactive
-// for all others.
-func TestMask(t *testing.T) {
- if !build.IsDevBuild() {
- t.Fatalf("htlcswitch tests must be run with '-tags=dev'")
- }
-
- for i, test := range hodlMaskTests {
- for j := uint32(0); i < 32; i++ {
- flag := hodl.Flag(1 << j)
- _, shouldBeActive := test.flags[flag]
-
- switch {
- case shouldBeActive && !test.mask.Active(flag):
- t.Fatalf("hodl mask test #%d -- "+
- "expected flag %s to be active",
- i, flag)
-
- case !shouldBeActive && test.mask.Active(flag):
- t.Fatalf("hodl mask test #%d -- "+
- "expected flag %s to be inactive",
- i, flag)
- }
- }
- }
-}
diff --git a/lnd/htlcswitch/hop/error_encryptor.go b/lnd/htlcswitch/hop/error_encryptor.go
deleted file mode 100644
index f3d2f090..00000000
--- a/lnd/htlcswitch/hop/error_encryptor.go
+++ /dev/null
@@ -1,206 +0,0 @@
-package hop
-
-import (
- "bytes"
- "io"
-
- "github.com/pkt-cash/pktd/btcec"
- "github.com/pkt-cash/pktd/btcutil/er"
- "github.com/pkt-cash/pktd/btcutil/util"
- sphinx "github.com/pkt-cash/pktd/lightning-onion"
- "github.com/pkt-cash/pktd/lnd/lnwire"
-)
-
-// EncrypterType establishes an enum used in serialization to indicate how to
-// decode a concrete instance of the ErrorEncrypter interface.
-type EncrypterType byte
-
-const (
- // EncrypterTypeNone signals that no error encyrpter is present, this
- // can happen if the htlc is originates in the switch.
- EncrypterTypeNone EncrypterType = 0
-
- // EncrypterTypeSphinx is used to identify a sphinx onion error
- // encrypter instance.
- EncrypterTypeSphinx = 1
-
- // EncrypterTypeMock is used to identify a mock obfuscator instance.
- EncrypterTypeMock = 2
-)
-
-// ErrorEncrypterExtracter defines a function signature that extracts an
-// ErrorEncrypter from an sphinx OnionPacket.
-type ErrorEncrypterExtracter func(*btcec.PublicKey) (ErrorEncrypter,
- lnwire.FailCode)
-
-// ErrorEncrypter is an interface that is used to encrypt HTLC related errors
-// at the source of the error, and also at each intermediate hop all the way
-// back to the source of the payment.
-type ErrorEncrypter interface {
- // EncryptFirstHop transforms a concrete failure message into an
- // encrypted opaque failure reason. This method will be used at the
- // source that the error occurs. It differs from IntermediateEncrypt
- // slightly, in that it computes a proper MAC over the error.
- EncryptFirstHop(lnwire.FailureMessage) (lnwire.OpaqueReason, er.R)
-
- // EncryptMalformedError is similar to EncryptFirstHop (it adds the
- // MAC), but it accepts an opaque failure reason rather than a failure
- // message. This method is used when we receive an
- // UpdateFailMalformedHTLC from the remote peer and then need to
- // convert that into a proper error from only the raw bytes.
- EncryptMalformedError(lnwire.OpaqueReason) lnwire.OpaqueReason
-
- // IntermediateEncrypt wraps an already encrypted opaque reason error
- // in an additional layer of onion encryption. This process repeats
- // until the error arrives at the source of the payment.
- IntermediateEncrypt(lnwire.OpaqueReason) lnwire.OpaqueReason
-
- // Type returns an enum indicating the underlying concrete instance
- // backing this interface.
- Type() EncrypterType
-
- // Encode serializes the encrypter's ephemeral public key to the given
- // io.Writer.
- Encode(io.Writer) er.R
-
- // Decode deserializes the encrypter' ephemeral public key from the
- // given io.Reader.
- Decode(io.Reader) er.R
-
- // Reextract rederives the encrypter using the extracter, performing an
- // ECDH with the sphinx router's key and the ephemeral public key.
- //
- // NOTE: This should be called shortly after Decode to properly
- // reinitialize the error encrypter.
- Reextract(ErrorEncrypterExtracter) er.R
-}
-
-// SphinxErrorEncrypter is a concrete implementation of both the ErrorEncrypter
-// interface backed by an implementation of the Sphinx packet format. As a
-// result, all errors handled are themselves wrapped in layers of onion
-// encryption and must be treated as such accordingly.
-type SphinxErrorEncrypter struct {
- *sphinx.OnionErrorEncrypter
-
- EphemeralKey *btcec.PublicKey
-}
-
-// NewSphinxErrorEncrypter initializes a blank sphinx error encrypter, that
-// should be used to deserialize an encoded SphinxErrorEncrypter. Since the
-// actual encrypter is not stored in plaintext while at rest, reconstructing the
-// error encrypter requires:
-// 1) Decode: to deserialize the ephemeral public key.
-// 2) Reextract: to "unlock" the actual error encrypter using an active
-// OnionProcessor.
-func NewSphinxErrorEncrypter() *SphinxErrorEncrypter {
- return &SphinxErrorEncrypter{
- OnionErrorEncrypter: nil,
- EphemeralKey: &btcec.PublicKey{},
- }
-}
-
-// EncryptFirstHop transforms a concrete failure message into an encrypted
-// opaque failure reason. This method will be used at the source that the error
-// occurs. It differs from BackwardObfuscate slightly, in that it computes a
-// proper MAC over the error.
-//
-// NOTE: Part of the ErrorEncrypter interface.
-func (s *SphinxErrorEncrypter) EncryptFirstHop(
- failure lnwire.FailureMessage) (lnwire.OpaqueReason, er.R) {
-
- var b bytes.Buffer
- if err := lnwire.EncodeFailure(&b, failure, 0); err != nil {
- return nil, err
- }
-
- // We pass a true as the first parameter to indicate that a MAC should
- // be added.
- return s.EncryptError(true, b.Bytes()), nil
-}
-
-// EncryptMalformedError is similar to EncryptFirstHop (it adds the MAC), but
-// it accepts an opaque failure reason rather than a failure message. This
-// method is used when we receive an UpdateFailMalformedHTLC from the remote
-// peer and then need to convert that into an proper error from only the raw
-// bytes.
-//
-// NOTE: Part of the ErrorEncrypter interface.
-func (s *SphinxErrorEncrypter) EncryptMalformedError(
- reason lnwire.OpaqueReason) lnwire.OpaqueReason {
-
- return s.EncryptError(true, reason)
-}
-
-// IntermediateEncrypt wraps an already encrypted opaque reason error in an
-// additional layer of onion encryption. This process repeats until the error
-// arrives at the source of the payment. We re-encrypt the message on the
-// backwards path to ensure that the error is indistinguishable from any other
-// error seen.
-//
-// NOTE: Part of the ErrorEncrypter interface.
-func (s *SphinxErrorEncrypter) IntermediateEncrypt(
- reason lnwire.OpaqueReason) lnwire.OpaqueReason {
-
- return s.EncryptError(false, reason)
-}
-
-// Type returns the identifier for a sphinx error encrypter.
-func (s *SphinxErrorEncrypter) Type() EncrypterType {
- return EncrypterTypeSphinx
-}
-
-// Encode serializes the error encrypter' ephemeral public key to the provided
-// io.Writer.
-func (s *SphinxErrorEncrypter) Encode(w io.Writer) er.R {
- ephemeral := s.EphemeralKey.SerializeCompressed()
- _, err := util.Write(w, ephemeral)
- return err
-}
-
-// Decode reconstructs the error encrypter's ephemeral public key from the
-// provided io.Reader.
-func (s *SphinxErrorEncrypter) Decode(r io.Reader) er.R {
- var ephemeral [33]byte
- if _, err := util.ReadFull(r, ephemeral[:]); err != nil {
- return err
- }
-
- var err er.R
- s.EphemeralKey, err = btcec.ParsePubKey(ephemeral[:], btcec.S256())
- if err != nil {
- return err
- }
-
- return nil
-}
-
-// Reextract rederives the error encrypter from the currently held EphemeralKey.
-// This intended to be used shortly after Decode, to fully initialize a
-// SphinxErrorEncrypter.
-func (s *SphinxErrorEncrypter) Reextract(
- extract ErrorEncrypterExtracter) er.R {
-
- obfuscator, failcode := extract(s.EphemeralKey)
- if failcode != lnwire.CodeNone {
- // This should never happen, since we already validated that
- // this obfuscator can be extracted when it was received in the
- // link.
- return er.Errorf("unable to reconstruct onion "+
- "obfuscator, got failcode: %d", failcode)
- }
-
- sphinxEncrypter, ok := obfuscator.(*SphinxErrorEncrypter)
- if !ok {
- return er.Errorf("incorrect onion error extracter")
- }
-
- // Copy the freshly extracted encrypter.
- s.OnionErrorEncrypter = sphinxEncrypter.OnionErrorEncrypter
-
- return nil
-
-}
-
-// A compile time check to ensure SphinxErrorEncrypter implements the
-// ErrorEncrypter interface.
-var _ ErrorEncrypter = (*SphinxErrorEncrypter)(nil)
diff --git a/lnd/htlcswitch/hop/forwarding_info.go b/lnd/htlcswitch/hop/forwarding_info.go
deleted file mode 100644
index 73b85872..00000000
--- a/lnd/htlcswitch/hop/forwarding_info.go
+++ /dev/null
@@ -1,29 +0,0 @@
-package hop
-
-import (
- "github.com/pkt-cash/pktd/lnd/lnwire"
-)
-
-// ForwardingInfo contains all the information that is necessary to forward and
-// incoming HTLC to the next hop encoded within a valid HopIterator instance.
-// Forwarding links are to use this information to authenticate the information
-// received within the incoming HTLC, to ensure that the prior hop didn't
-// tamper with the end-to-end routing information at all.
-type ForwardingInfo struct {
- // Network is the target blockchain network that the HTLC will travel
- // over next.
- Network Network
-
- // NextHop is the channel ID of the next hop. The received HTLC should
- // be forwarded to this particular channel in order to continue the
- // end-to-end route.
- NextHop lnwire.ShortChannelID
-
- // AmountToForward is the amount of milli-satoshis that the receiving
- // node should forward to the next hop.
- AmountToForward lnwire.MilliSatoshi
-
- // OutgoingCTLV is the specified value of the CTLV timelock to be used
- // in the outgoing HTLC.
- OutgoingCTLV uint32
-}
diff --git a/lnd/htlcswitch/hop/iterator.go b/lnd/htlcswitch/hop/iterator.go
deleted file mode 100644
index 08d3e865..00000000
--- a/lnd/htlcswitch/hop/iterator.go
+++ /dev/null
@@ -1,394 +0,0 @@
-package hop
-
-import (
- "bytes"
- "io"
-
- "github.com/pkt-cash/pktd/btcec"
- "github.com/pkt-cash/pktd/btcutil/er"
- sphinx "github.com/pkt-cash/pktd/lightning-onion"
- "github.com/pkt-cash/pktd/lnd/lnwire"
- "github.com/pkt-cash/pktd/pktlog/log"
-)
-
-// Iterator is an interface that abstracts away the routing information
-// included in HTLC's which includes the entirety of the payment path of an
-// HTLC. This interface provides two basic method which carry out: how to
-// interpret the forwarding information encoded within the HTLC packet, and hop
-// to encode the forwarding information for the _next_ hop.
-type Iterator interface {
- // HopPayload returns the set of fields that detail exactly _how_ this
- // hop should forward the HTLC to the next hop. Additionally, the
- // information encoded within the returned ForwardingInfo is to be used
- // by each hop to authenticate the information given to it by the prior
- // hop. The payload will also contain any additional TLV fields provided
- // by the sender.
- HopPayload() (*Payload, er.R)
-
- // EncodeNextHop encodes the onion packet destined for the next hop
- // into the passed io.Writer.
- EncodeNextHop(w io.Writer) er.R
-
- // ExtractErrorEncrypter returns the ErrorEncrypter needed for this hop,
- // along with a failure code to signal if the decoding was successful.
- ExtractErrorEncrypter(ErrorEncrypterExtracter) (ErrorEncrypter,
- lnwire.FailCode)
-}
-
-// sphinxHopIterator is the Sphinx implementation of hop iterator which uses
-// onion routing to encode the payment route in such a way so that node might
-// see only the next hop in the route..
-type sphinxHopIterator struct {
- // ogPacket is the original packet from which the processed packet is
- // derived.
- ogPacket *sphinx.OnionPacket
-
- // processedPacket is the outcome of processing an onion packet. It
- // includes the information required to properly forward the packet to
- // the next hop.
- processedPacket *sphinx.ProcessedPacket
-}
-
-// makeSphinxHopIterator converts a processed packet returned from a sphinx
-// router and converts it into an hop iterator for usage in the link.
-func makeSphinxHopIterator(ogPacket *sphinx.OnionPacket,
- packet *sphinx.ProcessedPacket) *sphinxHopIterator {
-
- return &sphinxHopIterator{
- ogPacket: ogPacket,
- processedPacket: packet,
- }
-}
-
-// A compile time check to ensure sphinxHopIterator implements the HopIterator
-// interface.
-var _ Iterator = (*sphinxHopIterator)(nil)
-
-// Encode encodes iterator and writes it to the writer.
-//
-// NOTE: Part of the HopIterator interface.
-func (r *sphinxHopIterator) EncodeNextHop(w io.Writer) er.R {
- return r.processedPacket.NextPacket.Encode(w)
-}
-
-// HopPayload returns the set of fields that detail exactly _how_ this hop
-// should forward the HTLC to the next hop. Additionally, the information
-// encoded within the returned ForwardingInfo is to be used by each hop to
-// authenticate the information given to it by the prior hop. The payload will
-// also contain any additional TLV fields provided by the sender.
-//
-// NOTE: Part of the HopIterator interface.
-func (r *sphinxHopIterator) HopPayload() (*Payload, er.R) {
- switch r.processedPacket.Payload.Type {
-
- // If this is the legacy payload, then we'll extract the information
- // directly from the pre-populated ForwardingInstructions field.
- case sphinx.PayloadLegacy:
- fwdInst := r.processedPacket.ForwardingInstructions
- return NewLegacyPayload(fwdInst), nil
-
- // Otherwise, if this is the TLV payload, then we'll make a new stream
- // to decode only what we need to make routing decisions.
- case sphinx.PayloadTLV:
- return NewPayloadFromReader(bytes.NewReader(
- r.processedPacket.Payload.Payload,
- ))
-
- default:
- return nil, er.Errorf("unknown sphinx payload type: %v",
- r.processedPacket.Payload.Type)
- }
-}
-
-// ExtractErrorEncrypter decodes and returns the ErrorEncrypter for this hop,
-// along with a failure code to signal if the decoding was successful. The
-// ErrorEncrypter is used to encrypt errors back to the sender in the event that
-// a payment fails.
-//
-// NOTE: Part of the HopIterator interface.
-func (r *sphinxHopIterator) ExtractErrorEncrypter(
- extracter ErrorEncrypterExtracter) (ErrorEncrypter, lnwire.FailCode) {
-
- return extracter(r.ogPacket.EphemeralKey)
-}
-
-// OnionProcessor is responsible for keeping all sphinx dependent parts inside
-// and expose only decoding function. With such approach we give freedom for
-// subsystems which wants to decode sphinx path to not be dependable from
-// sphinx at all.
-//
-// NOTE: The reason for keeping decoder separated from hop iterator is too
-// maintain the hop iterator abstraction. Without it the structures which using
-// the hop iterator should contain sphinx router which makes their creations in
-// tests dependent from the sphinx internal parts.
-type OnionProcessor struct {
- router *sphinx.Router
-}
-
-// NewOnionProcessor creates new instance of decoder.
-func NewOnionProcessor(router *sphinx.Router) *OnionProcessor {
- return &OnionProcessor{router}
-}
-
-// Start spins up the onion processor's sphinx router.
-func (p *OnionProcessor) Start() er.R {
- return p.router.Start()
-}
-
-// Stop shutsdown the onion processor's sphinx router.
-func (p *OnionProcessor) Stop() er.R {
- p.router.Stop()
- return nil
-}
-
-// DecodeHopIterator attempts to decode a valid sphinx packet from the passed io.Reader
-// instance using the rHash as the associated data when checking the relevant
-// MACs during the decoding process.
-func (p *OnionProcessor) DecodeHopIterator(r io.Reader, rHash []byte,
- incomingCltv uint32) (Iterator, lnwire.FailCode) {
-
- onionPkt := &sphinx.OnionPacket{}
- if err := onionPkt.Decode(r); err != nil {
- switch {
- case sphinx.ErrInvalidOnionVersion.Is(err):
- return nil, lnwire.CodeInvalidOnionVersion
- case sphinx.ErrInvalidOnionKey.Is(err):
- return nil, lnwire.CodeInvalidOnionKey
- default:
- log.Errorf("unable to decode onion packet: %v", err)
- return nil, lnwire.CodeInvalidOnionKey
- }
- }
-
- // Attempt to process the Sphinx packet. We include the payment hash of
- // the HTLC as it's authenticated within the Sphinx packet itself as
- // associated data in order to thwart attempts a replay attacks. In the
- // case of a replay, an attacker is *forced* to use the same payment
- // hash twice, thereby losing their money entirely.
- sphinxPacket, err := p.router.ProcessOnionPacket(
- onionPkt, rHash, incomingCltv,
- )
- if err != nil {
- switch {
- case sphinx.ErrInvalidOnionVersion.Is(err):
- return nil, lnwire.CodeInvalidOnionVersion
- case sphinx.ErrInvalidOnionHMAC.Is(err):
- return nil, lnwire.CodeInvalidOnionHmac
- case sphinx.ErrInvalidOnionKey.Is(err):
- return nil, lnwire.CodeInvalidOnionKey
- default:
- log.Errorf("unable to process onion packet: %v", err)
- return nil, lnwire.CodeInvalidOnionKey
- }
- }
-
- return makeSphinxHopIterator(onionPkt, sphinxPacket), lnwire.CodeNone
-}
-
-// ReconstructHopIterator attempts to decode a valid sphinx packet from the passed io.Reader
-// instance using the rHash as the associated data when checking the relevant
-// MACs during the decoding process.
-func (p *OnionProcessor) ReconstructHopIterator(r io.Reader, rHash []byte) (
- Iterator, er.R) {
-
- onionPkt := &sphinx.OnionPacket{}
- if err := onionPkt.Decode(r); err != nil {
- return nil, err
- }
-
- // Attempt to process the Sphinx packet. We include the payment hash of
- // the HTLC as it's authenticated within the Sphinx packet itself as
- // associated data in order to thwart attempts a replay attacks. In the
- // case of a replay, an attacker is *forced* to use the same payment
- // hash twice, thereby losing their money entirely.
- sphinxPacket, err := p.router.ReconstructOnionPacket(onionPkt, rHash)
- if err != nil {
- return nil, err
- }
-
- return makeSphinxHopIterator(onionPkt, sphinxPacket), nil
-}
-
-// DecodeHopIteratorRequest encapsulates all date necessary to process an onion
-// packet, perform sphinx replay detection, and schedule the entry for garbage
-// collection.
-type DecodeHopIteratorRequest struct {
- OnionReader io.Reader
- RHash []byte
- IncomingCltv uint32
-}
-
-// DecodeHopIteratorResponse encapsulates the outcome of a batched sphinx onion
-// processing.
-type DecodeHopIteratorResponse struct {
- HopIterator Iterator
- FailCode lnwire.FailCode
-}
-
-// Result returns the (HopIterator, lnwire.FailCode) tuple, which should
-// correspond to the index of a particular DecodeHopIteratorRequest.
-//
-// NOTE: The HopIterator should be considered invalid if the fail code is
-// anything but lnwire.CodeNone.
-func (r *DecodeHopIteratorResponse) Result() (Iterator, lnwire.FailCode) {
- return r.HopIterator, r.FailCode
-}
-
-// DecodeHopIterators performs batched decoding and validation of incoming
-// sphinx packets. For the same `id`, this method will return the same iterators
-// and failcodes upon subsequent invocations.
-//
-// NOTE: In order for the responses to be valid, the caller must guarantee that
-// the presented readers and rhashes *NEVER* deviate across invocations for the
-// same id.
-func (p *OnionProcessor) DecodeHopIterators(id []byte,
- reqs []DecodeHopIteratorRequest) ([]DecodeHopIteratorResponse, er.R) {
-
- var (
- batchSize = len(reqs)
- onionPkts = make([]sphinx.OnionPacket, batchSize)
- resps = make([]DecodeHopIteratorResponse, batchSize)
- )
-
- tx := p.router.BeginTxn(id, batchSize)
-
- for i, req := range reqs {
- onionPkt := &onionPkts[i]
- resp := &resps[i]
-
- err := onionPkt.Decode(req.OnionReader)
- switch {
- case nil == err:
- // success
-
- case sphinx.ErrInvalidOnionVersion.Is(err):
- resp.FailCode = lnwire.CodeInvalidOnionVersion
- continue
-
- case sphinx.ErrInvalidOnionKey.Is(err):
- resp.FailCode = lnwire.CodeInvalidOnionKey
- continue
-
- default:
- log.Errorf("unable to decode onion packet: %v", err)
- resp.FailCode = lnwire.CodeInvalidOnionKey
- continue
- }
-
- err = tx.ProcessOnionPacket(
- uint16(i), onionPkt, req.RHash, req.IncomingCltv,
- )
- switch {
- case err == nil:
- // success
-
- case sphinx.ErrInvalidOnionVersion.Is(err):
- resp.FailCode = lnwire.CodeInvalidOnionVersion
- continue
-
- case sphinx.ErrInvalidOnionHMAC.Is(err):
- resp.FailCode = lnwire.CodeInvalidOnionHmac
- continue
-
- case sphinx.ErrInvalidOnionKey.Is(err):
- resp.FailCode = lnwire.CodeInvalidOnionKey
- continue
-
- default:
- log.Errorf("unable to process onion packet: %v", err)
- resp.FailCode = lnwire.CodeInvalidOnionKey
- continue
- }
- }
-
- // With that batch created, we will now attempt to write the shared
- // secrets to disk. This operation will returns the set of indices that
- // were detected as replays, and the computed sphinx packets for all
- // indices that did not fail the above loop. Only indices that are not
- // in the replay set should be considered valid, as they are
- // opportunistically computed.
- packets, replays, err := tx.Commit()
- if err != nil {
- log.Errorf("unable to process onion packet batch %x: %v",
- id, err)
-
- // If we failed to commit the batch to the secret share log, we
- // will mark all not-yet-failed channels with a temporary
- // channel failure and exit since we cannot proceed.
- for i := range resps {
- resp := &resps[i]
-
- // Skip any indexes that already failed onion decoding.
- if resp.FailCode != lnwire.CodeNone {
- continue
- }
-
- log.Errorf("unable to process onion packet %x-%v",
- id, i)
- resp.FailCode = lnwire.CodeTemporaryChannelFailure
- }
-
- // TODO(conner): return real errors to caller so link can fail?
- return resps, err
- }
-
- // Otherwise, the commit was successful. Now we will post process any
- // remaining packets, additionally failing any that were included in the
- // replay set.
- for i := range resps {
- resp := &resps[i]
-
- // Skip any indexes that already failed onion decoding.
- if resp.FailCode != lnwire.CodeNone {
- continue
- }
-
- // If this index is contained in the replay set, mark it with a
- // temporary channel failure error code. We infer that the
- // offending error was due to a replayed packet because this
- // index was found in the replay set.
- if replays.Contains(uint16(i)) {
- log.Errorf("unable to process onion packet: %v",
- sphinx.ErrReplayedPacket)
- resp.FailCode = lnwire.CodeTemporaryChannelFailure
- continue
- }
-
- // Finally, construct a hop iterator from our processed sphinx
- // packet, simultaneously caching the original onion packet.
- resp.HopIterator = makeSphinxHopIterator(&onionPkts[i], &packets[i])
- }
-
- return resps, nil
-}
-
-// ExtractErrorEncrypter takes an io.Reader which should contain the onion
-// packet as original received by a forwarding node and creates an
-// ErrorEncrypter instance using the derived shared secret. In the case that en
-// error occurs, a lnwire failure code detailing the parsing failure will be
-// returned.
-func (p *OnionProcessor) ExtractErrorEncrypter(ephemeralKey *btcec.PublicKey) (
- ErrorEncrypter, lnwire.FailCode) {
-
- onionObfuscator, err := sphinx.NewOnionErrorEncrypter(
- p.router, ephemeralKey,
- )
- if err != nil {
- switch {
- case sphinx.ErrInvalidOnionVersion.Is(err):
- return nil, lnwire.CodeInvalidOnionVersion
- case sphinx.ErrInvalidOnionHMAC.Is(err):
- return nil, lnwire.CodeInvalidOnionHmac
- case sphinx.ErrInvalidOnionKey.Is(err):
- return nil, lnwire.CodeInvalidOnionKey
- default:
- log.Errorf("unable to process onion packet: %v", err)
- return nil, lnwire.CodeInvalidOnionKey
- }
- }
-
- return &SphinxErrorEncrypter{
- OnionErrorEncrypter: onionObfuscator,
- EphemeralKey: ephemeralKey,
- }, lnwire.CodeNone
-}
diff --git a/lnd/htlcswitch/hop/iterator_test.go b/lnd/htlcswitch/hop/iterator_test.go
deleted file mode 100644
index 8b84895e..00000000
--- a/lnd/htlcswitch/hop/iterator_test.go
+++ /dev/null
@@ -1,101 +0,0 @@
-package hop
-
-import (
- "bytes"
- "encoding/binary"
- "testing"
-
- "github.com/davecgh/go-spew/spew"
- sphinx "github.com/pkt-cash/pktd/lightning-onion"
- "github.com/pkt-cash/pktd/lnd/lnwire"
- "github.com/pkt-cash/pktd/lnd/record"
- "github.com/pkt-cash/pktd/lnd/tlv"
-)
-
-// TestSphinxHopIteratorForwardingInstructions tests that we're able to
-// properly decode an onion payload, no matter the payload type, into the
-// original set of forwarding instructions.
-func TestSphinxHopIteratorForwardingInstructions(t *testing.T) {
- t.Parallel()
-
- // First, we'll make the hop data that the sender would create to send
- // an HTLC through our imaginary route.
- hopData := sphinx.HopData{
- ForwardAmount: 100000,
- OutgoingCltv: 4343,
- }
- copy(hopData.NextAddress[:], bytes.Repeat([]byte("a"), 8))
-
- // Next, we'll make the hop forwarding information that we should
- // extract each type, no matter the payload type.
- nextAddrInt := binary.BigEndian.Uint64(hopData.NextAddress[:])
- expectedFwdInfo := ForwardingInfo{
- NextHop: lnwire.NewShortChanIDFromInt(nextAddrInt),
- AmountToForward: lnwire.MilliSatoshi(hopData.ForwardAmount),
- OutgoingCTLV: hopData.OutgoingCltv,
- }
-
- // For our TLV payload, we'll serialize the hop into into a TLV stream
- // as we would normally in the routing network.
- var b bytes.Buffer
- tlvRecords := []tlv.Record{
- record.NewAmtToFwdRecord(&hopData.ForwardAmount),
- record.NewLockTimeRecord(&hopData.OutgoingCltv),
- record.NewNextHopIDRecord(&nextAddrInt),
- }
- tlvStream, err := tlv.NewStream(tlvRecords...)
- if err != nil {
- t.Fatalf("unable to create stream: %v", err)
- }
- if err := tlvStream.Encode(&b); err != nil {
- t.Fatalf("unable to encode stream: %v", err)
- }
-
- var testCases = []struct {
- sphinxPacket *sphinx.ProcessedPacket
- expectedFwdInfo ForwardingInfo
- }{
- // A regular legacy payload that signals more hops.
- {
- sphinxPacket: &sphinx.ProcessedPacket{
- Payload: sphinx.HopPayload{
- Type: sphinx.PayloadLegacy,
- },
- Action: sphinx.MoreHops,
- ForwardingInstructions: &hopData,
- },
- expectedFwdInfo: expectedFwdInfo,
- },
- // A TLV payload, we can leave off the action as we'll always
- // read the cid encoded.
- {
- sphinxPacket: &sphinx.ProcessedPacket{
- Payload: sphinx.HopPayload{
- Type: sphinx.PayloadTLV,
- Payload: b.Bytes(),
- },
- },
- expectedFwdInfo: expectedFwdInfo,
- },
- }
-
- // Finally, we'll test that we get the same set of
- // ForwardingInstructions for each payload type.
- iterator := sphinxHopIterator{}
- for i, testCase := range testCases {
- iterator.processedPacket = testCase.sphinxPacket
-
- pld, err := iterator.HopPayload()
- if err != nil {
- t.Fatalf("#%v: unable to extract forwarding "+
- "instructions: %v", i, err)
- }
-
- fwdInfo := pld.ForwardingInfo()
- if fwdInfo != testCase.expectedFwdInfo {
- t.Fatalf("#%v: wrong fwding info: expected %v, got %v",
- i, spew.Sdump(testCase.expectedFwdInfo),
- spew.Sdump(fwdInfo))
- }
- }
-}
diff --git a/lnd/htlcswitch/hop/network.go b/lnd/htlcswitch/hop/network.go
deleted file mode 100644
index 6f121642..00000000
--- a/lnd/htlcswitch/hop/network.go
+++ /dev/null
@@ -1,28 +0,0 @@
-package hop
-
-// Network indicates the blockchain network that is intended to be the next hop
-// for a forwarded HTLC. The existence of this field within the ForwardingInfo
-// struct enables the ability for HTLC to cross chain-boundaries at will.
-type Network uint8
-
-const (
- // BitcoinNetwork denotes that an HTLC is to be forwarded along the
- // Bitcoin link with the specified short channel ID.
- BitcoinNetwork Network = iota
-
- // LitecoinNetwork denotes that an HTLC is to be forwarded along the
- // Litecoin link with the specified short channel ID.
- LitecoinNetwork
-)
-
-// String returns the string representation of the target Network.
-func (c Network) String() string {
- switch c {
- case BitcoinNetwork:
- return "Bitcoin"
- case LitecoinNetwork:
- return "Litecoin"
- default:
- return "Kekcoin"
- }
-}
diff --git a/lnd/htlcswitch/hop/payload.go b/lnd/htlcswitch/hop/payload.go
deleted file mode 100644
index 2a3e9567..00000000
--- a/lnd/htlcswitch/hop/payload.go
+++ /dev/null
@@ -1,292 +0,0 @@
-package hop
-
-import (
- "encoding/binary"
- "fmt"
- "io"
-
- "github.com/pkt-cash/pktd/btcutil/er"
- sphinx "github.com/pkt-cash/pktd/lightning-onion"
- "github.com/pkt-cash/pktd/lnd/lnwire"
- "github.com/pkt-cash/pktd/lnd/record"
- "github.com/pkt-cash/pktd/lnd/tlv"
-)
-
-// PayloadViolation is an enum encapsulating the possible invalid payload
-// violations that can occur when processing or validating a payload.
-type PayloadViolation byte
-
-const (
- // OmittedViolation indicates that a type was expected to be found the
- // payload but was absent.
- OmittedViolation PayloadViolation = iota
-
- // IncludedViolation indicates that a type was expected to be omitted
- // from the payload but was present.
- IncludedViolation
-
- // RequiredViolation indicates that an unknown even type was found in
- // the payload that we could not process.
- RequiredViolation
-)
-
-// String returns a human-readable description of the violation as a verb.
-func (v PayloadViolation) String() string {
- switch v {
- case OmittedViolation:
- return "omitted"
-
- case IncludedViolation:
- return "included"
-
- case RequiredViolation:
- return "required"
-
- default:
- return "unknown violation"
- }
-}
-
-// ErrInvalidPayload is an error returned when a parsed onion payload either
-// included or omitted incorrect records for a particular hop type.
-type ErrInvalidPayload struct {
- // Type the record's type that cause the violation.
- Type tlv.Type
-
- // Violation is an enum indicating the type of violation detected in
- // processing Type.
- Violation PayloadViolation
-
- // FinalHop if true, indicates that the violation is for the final hop
- // in the route (identified by next hop id), otherwise the violation is
- // for an intermediate hop.
- FinalHop bool
-}
-
-// Error returns a human-readable description of the invalid payload error.
-func (e ErrInvalidPayload) Error() string {
- hopType := "intermediate"
- if e.FinalHop {
- hopType = "final"
- }
-
- return fmt.Sprintf("onion payload for %s hop %v record with type %d",
- hopType, e.Violation, e.Type)
-}
-
-// Payload encapsulates all information delivered to a hop in an onion payload.
-// A Hop can represent either a TLV or legacy payload. The primary forwarding
-// instruction can be accessed via ForwardingInfo, and additional records can be
-// accessed by other member functions.
-type Payload struct {
- // FwdInfo holds the basic parameters required for HTLC forwarding, e.g.
- // amount, cltv, and next hop.
- FwdInfo ForwardingInfo
-
- // MPP holds the info provided in an option_mpp record when parsed from
- // a TLV onion payload.
- MPP *record.MPP
-
- // customRecords are user-defined records in the custom type range that
- // were included in the payload.
- customRecords record.CustomSet
-}
-
-// NewLegacyPayload builds a Payload from the amount, cltv, and next hop
-// parameters provided by leegacy onion payloads.
-func NewLegacyPayload(f *sphinx.HopData) *Payload {
- nextHop := binary.BigEndian.Uint64(f.NextAddress[:])
-
- return &Payload{
- FwdInfo: ForwardingInfo{
- Network: BitcoinNetwork,
- NextHop: lnwire.NewShortChanIDFromInt(nextHop),
- AmountToForward: lnwire.MilliSatoshi(f.ForwardAmount),
- OutgoingCTLV: f.OutgoingCltv,
- },
- customRecords: make(record.CustomSet),
- }
-}
-
-// NewPayloadFromReader builds a new Hop from the passed io.Reader. The reader
-// should correspond to the bytes encapsulated in a TLV onion payload.
-func NewPayloadFromReader(r io.Reader) (*Payload, er.R) {
- var (
- cid uint64
- amt uint64
- cltv uint32
- mpp = &record.MPP{}
- )
-
- tlvStream, err := tlv.NewStream(
- record.NewAmtToFwdRecord(&amt),
- record.NewLockTimeRecord(&cltv),
- record.NewNextHopIDRecord(&cid),
- mpp.Record(),
- )
- if err != nil {
- return nil, err
- }
-
- parsedTypes, err := tlvStream.DecodeWithParsedTypes(r)
- if err != nil {
- return nil, err
- }
-
- // Validate whether the sender properly included or omitted tlv records
- // in accordance with BOLT 04.
- nextHop := lnwire.NewShortChanIDFromInt(cid)
- err = ValidateParsedPayloadTypes(parsedTypes, nextHop)
- if err != nil {
- return nil, err
- }
-
- // Check for violation of the rules for mandatory fields.
- violatingType := getMinRequiredViolation(parsedTypes)
- if violatingType != nil {
- return nil, er.E(ErrInvalidPayload{
- Type: *violatingType,
- Violation: RequiredViolation,
- FinalHop: nextHop == Exit,
- })
- }
-
- // If no MPP field was parsed, set the MPP field on the resulting
- // payload to nil.
- if _, ok := parsedTypes[record.MPPOnionType]; !ok {
- mpp = nil
- }
-
- // Filter out the custom records.
- customRecords := NewCustomRecords(parsedTypes)
-
- return &Payload{
- FwdInfo: ForwardingInfo{
- Network: BitcoinNetwork,
- NextHop: nextHop,
- AmountToForward: lnwire.MilliSatoshi(amt),
- OutgoingCTLV: cltv,
- },
- MPP: mpp,
- customRecords: customRecords,
- }, nil
-}
-
-// ForwardingInfo returns the basic parameters required for HTLC forwarding,
-// e.g. amount, cltv, and next hop.
-func (h *Payload) ForwardingInfo() ForwardingInfo {
- return h.FwdInfo
-}
-
-// NewCustomRecords filters the types parsed from the tlv stream for custom
-// records.
-func NewCustomRecords(parsedTypes tlv.TypeMap) record.CustomSet {
- customRecords := make(record.CustomSet)
- for t, parseResult := range parsedTypes {
- if parseResult == nil || t < record.CustomTypeStart {
- continue
- }
- customRecords[uint64(t)] = parseResult
- }
- return customRecords
-}
-
-// ValidateParsedPayloadTypes checks the types parsed from a hop payload to
-// ensure that the proper fields are either included or omitted. The finalHop
-// boolean should be true if the payload was parsed for an exit hop. The
-// requirements for this method are described in BOLT 04.
-func ValidateParsedPayloadTypes(parsedTypes tlv.TypeMap,
- nextHop lnwire.ShortChannelID) er.R {
-
- isFinalHop := nextHop == Exit
-
- _, hasAmt := parsedTypes[record.AmtOnionType]
- _, hasLockTime := parsedTypes[record.LockTimeOnionType]
- _, hasNextHop := parsedTypes[record.NextHopOnionType]
- _, hasMPP := parsedTypes[record.MPPOnionType]
-
- switch {
-
- // All hops must include an amount to forward.
- case !hasAmt:
- return er.E(ErrInvalidPayload{
- Type: record.AmtOnionType,
- Violation: OmittedViolation,
- FinalHop: isFinalHop,
- })
-
- // All hops must include a cltv expiry.
- case !hasLockTime:
- return er.E(ErrInvalidPayload{
- Type: record.LockTimeOnionType,
- Violation: OmittedViolation,
- FinalHop: isFinalHop,
- })
-
- // The exit hop should omit the next hop id. If nextHop != Exit, the
- // sender must have included a record, so we don't need to test for its
- // inclusion at intermediate hops directly.
- case isFinalHop && hasNextHop:
- return er.E(ErrInvalidPayload{
- Type: record.NextHopOnionType,
- Violation: IncludedViolation,
- FinalHop: true,
- })
-
- // Intermediate nodes should never receive MPP fields.
- case !isFinalHop && hasMPP:
- return er.E(ErrInvalidPayload{
- Type: record.MPPOnionType,
- Violation: IncludedViolation,
- FinalHop: isFinalHop,
- })
- }
-
- return nil
-}
-
-// MultiPath returns the record corresponding the option_mpp parsed from the
-// onion payload.
-func (h *Payload) MultiPath() *record.MPP {
- return h.MPP
-}
-
-// CustomRecords returns the custom tlv type records that were parsed from the
-// payload.
-func (h *Payload) CustomRecords() record.CustomSet {
- return h.customRecords
-}
-
-// getMinRequiredViolation checks for unrecognized required (even) fields in the
-// standard range and returns the lowest required type. Always returning the
-// lowest required type allows a failure message to be deterministic.
-func getMinRequiredViolation(set tlv.TypeMap) *tlv.Type {
- var (
- requiredViolation bool
- minRequiredViolationType tlv.Type
- )
- for t, parseResult := range set {
- // If a type is even but not known to us, we cannot process the
- // payload. We are required to understand a field that we don't
- // support.
- //
- // We always accept custom fields, because a higher level
- // application may understand them.
- if parseResult == nil || t%2 != 0 ||
- t >= record.CustomTypeStart {
-
- continue
- }
-
- if !requiredViolation || t < minRequiredViolationType {
- minRequiredViolationType = t
- }
- requiredViolation = true
- }
-
- if requiredViolation {
- return &minRequiredViolationType
- }
-
- return nil
-}
diff --git a/lnd/htlcswitch/hop/payload_test.go b/lnd/htlcswitch/hop/payload_test.go
deleted file mode 100644
index 478ee44a..00000000
--- a/lnd/htlcswitch/hop/payload_test.go
+++ /dev/null
@@ -1,256 +0,0 @@
-package hop_test
-
-import (
- "bytes"
- "reflect"
- "testing"
-
- "github.com/pkt-cash/pktd/btcutil/er"
- "github.com/pkt-cash/pktd/lnd/htlcswitch/hop"
- "github.com/pkt-cash/pktd/lnd/lnwire"
- "github.com/pkt-cash/pktd/lnd/record"
-)
-
-type decodePayloadTest struct {
- name string
- payload []byte
- expErr error
- expCustomRecords map[uint64][]byte
- shouldHaveMPP bool
-}
-
-var decodePayloadTests = []decodePayloadTest{
- {
- name: "final hop valid",
- payload: []byte{0x02, 0x00, 0x04, 0x00},
- },
- {
- name: "intermediate hop valid",
- payload: []byte{0x02, 0x00, 0x04, 0x00, 0x06, 0x08, 0x01, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- },
- },
- {
- name: "final hop no amount",
- payload: []byte{0x04, 0x00},
- expErr: hop.ErrInvalidPayload{
- Type: record.AmtOnionType,
- Violation: hop.OmittedViolation,
- FinalHop: true,
- },
- },
- {
- name: "intermediate hop no amount",
- payload: []byte{0x04, 0x00, 0x06, 0x08, 0x01, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00,
- },
- expErr: hop.ErrInvalidPayload{
- Type: record.AmtOnionType,
- Violation: hop.OmittedViolation,
- FinalHop: false,
- },
- },
- {
- name: "final hop no expiry",
- payload: []byte{0x02, 0x00},
- expErr: hop.ErrInvalidPayload{
- Type: record.LockTimeOnionType,
- Violation: hop.OmittedViolation,
- FinalHop: true,
- },
- },
- {
- name: "intermediate hop no expiry",
- payload: []byte{0x02, 0x00, 0x06, 0x08, 0x01, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00,
- },
- expErr: hop.ErrInvalidPayload{
- Type: record.LockTimeOnionType,
- Violation: hop.OmittedViolation,
- FinalHop: false,
- },
- },
- {
- name: "final hop next sid present",
- payload: []byte{0x02, 0x00, 0x04, 0x00, 0x06, 0x08, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- },
- expErr: hop.ErrInvalidPayload{
- Type: record.NextHopOnionType,
- Violation: hop.IncludedViolation,
- FinalHop: true,
- },
- },
- {
- name: "required type after omitted hop id",
- payload: []byte{0x02, 0x00, 0x04, 0x00, 0x0a, 0x00},
- expErr: hop.ErrInvalidPayload{
- Type: 10,
- Violation: hop.RequiredViolation,
- FinalHop: true,
- },
- },
- {
- name: "required type after included hop id",
- payload: []byte{0x02, 0x00, 0x04, 0x00, 0x06, 0x08, 0x01, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0a, 0x00,
- },
- expErr: hop.ErrInvalidPayload{
- Type: 10,
- Violation: hop.RequiredViolation,
- FinalHop: false,
- },
- },
- {
- name: "required type zero final hop",
- payload: []byte{0x00, 0x00, 0x02, 0x00, 0x04, 0x00},
- expErr: hop.ErrInvalidPayload{
- Type: 0,
- Violation: hop.RequiredViolation,
- FinalHop: true,
- },
- },
- {
- name: "required type zero final hop zero sid",
- payload: []byte{0x00, 0x00, 0x02, 0x00, 0x04, 0x00, 0x06, 0x08,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- },
- expErr: hop.ErrInvalidPayload{
- Type: record.NextHopOnionType,
- Violation: hop.IncludedViolation,
- FinalHop: true,
- },
- },
- {
- name: "required type zero intermediate hop",
- payload: []byte{0x00, 0x00, 0x02, 0x00, 0x04, 0x00, 0x06, 0x08,
- 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- },
- expErr: hop.ErrInvalidPayload{
- Type: 0,
- Violation: hop.RequiredViolation,
- FinalHop: false,
- },
- },
- {
- name: "required type in custom range",
- payload: []byte{0x02, 0x00, 0x04, 0x00,
- 0xfe, 0x00, 0x01, 0x00, 0x00, 0x02, 0x10, 0x11,
- },
- expCustomRecords: map[uint64][]byte{
- 65536: {0x10, 0x11},
- },
- },
- {
- name: "valid intermediate hop",
- payload: []byte{0x02, 0x00, 0x04, 0x00, 0x06, 0x08, 0x01, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- },
- expErr: nil,
- },
- {
- name: "valid final hop",
- payload: []byte{0x02, 0x00, 0x04, 0x00},
- expErr: nil,
- },
- {
- name: "intermediate hop with mpp",
- payload: []byte{
- // amount
- 0x02, 0x00,
- // cltv
- 0x04, 0x00,
- // next hop id
- 0x06, 0x08,
- 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- // mpp
- 0x08, 0x21,
- 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11,
- 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11,
- 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11,
- 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11,
- 0x08,
- },
- expErr: hop.ErrInvalidPayload{
- Type: record.MPPOnionType,
- Violation: hop.IncludedViolation,
- FinalHop: false,
- },
- },
- {
- name: "final hop with mpp",
- payload: []byte{
- // amount
- 0x02, 0x00,
- // cltv
- 0x04, 0x00,
- // mpp
- 0x08, 0x21,
- 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11,
- 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11,
- 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11,
- 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11,
- 0x08,
- },
- expErr: nil,
- shouldHaveMPP: true,
- },
-}
-
-// TestDecodeHopPayloadRecordValidation asserts that parsing the payloads in the
-// tests yields the expected errors depending on whether the proper fields were
-// included or omitted.
-func TestDecodeHopPayloadRecordValidation(t *testing.T) {
- for _, test := range decodePayloadTests {
- t.Run(test.name, func(t *testing.T) {
- testDecodeHopPayloadValidation(t, test)
- })
- }
-}
-
-func testDecodeHopPayloadValidation(t *testing.T, test decodePayloadTest) {
- var (
- testTotalMsat = lnwire.MilliSatoshi(8)
- testAddr = [32]byte{
- 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11,
- 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11,
- 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11,
- 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11,
- }
- )
-
- p, err := hop.NewPayloadFromReader(bytes.NewReader(test.payload))
- errr := er.Wrapped(err)
- if !reflect.DeepEqual(test.expErr, errr) {
- t.Fatalf("expected error mismatch, want: %v, got: %v",
- test.expErr, err)
- }
- if err != nil {
- return
- }
-
- // Assert MPP fields if we expect them.
- if test.shouldHaveMPP {
- if p.MPP == nil {
- t.Fatalf("payload should have MPP record")
- }
- if p.MPP.TotalMsat() != testTotalMsat {
- t.Fatalf("invalid total msat")
- }
- if p.MPP.PaymentAddr() != testAddr {
- t.Fatalf("invalid payment addr")
- }
- } else if p.MPP != nil {
- t.Fatalf("unexpected MPP payload")
- }
-
- // Convert expected nil map to empty map, because we always expect an
- // initiated map from the payload.
- expCustomRecords := make(record.CustomSet)
- if test.expCustomRecords != nil {
- expCustomRecords = test.expCustomRecords
- }
- if !reflect.DeepEqual(expCustomRecords, p.CustomRecords()) {
- t.Fatalf("invalid custom records")
- }
-}
diff --git a/lnd/htlcswitch/hop/type.go b/lnd/htlcswitch/hop/type.go
deleted file mode 100644
index b99f73b9..00000000
--- a/lnd/htlcswitch/hop/type.go
+++ /dev/null
@@ -1,13 +0,0 @@
-package hop
-
-import "github.com/pkt-cash/pktd/lnd/lnwire"
-
-var (
- // Exit is a special "hop" denoting that an incoming HTLC is meant to
- // pay finally to the receiving node.
- Exit lnwire.ShortChannelID
-
- // Source is a sentinel "hop" denoting that an incoming HTLC is
- // initiated by our own switch.
- Source lnwire.ShortChannelID
-)
diff --git a/lnd/htlcswitch/htlcnotifier.go b/lnd/htlcswitch/htlcnotifier.go
deleted file mode 100644
index 4bf17e26..00000000
--- a/lnd/htlcswitch/htlcnotifier.go
+++ /dev/null
@@ -1,431 +0,0 @@
-package htlcswitch
-
-import (
- "fmt"
- "strings"
- "sync"
- "time"
-
- "github.com/pkt-cash/pktd/btcutil/er"
- "github.com/pkt-cash/pktd/lnd/channeldb"
- "github.com/pkt-cash/pktd/lnd/htlcswitch/hop"
- "github.com/pkt-cash/pktd/lnd/lnwire"
- "github.com/pkt-cash/pktd/lnd/subscribe"
- "github.com/pkt-cash/pktd/pktlog/log"
-)
-
-// HtlcNotifier notifies clients of htlc forwards, failures and settles for
-// htlcs that the switch handles. It takes subscriptions for its events and
-// notifies them when htlc events occur. These are served on a best-effort
-// basis; events are not persisted, delivery is not guaranteed (in the event
-// of a crash in the switch, forward events may be lost) and some events may
-// be replayed upon restart. Events consumed from this package should be
-// de-duplicated by the htlc's unique combination of incoming+outgoing circuit
-// and not relied upon for critical operations.
-//
-// The htlc notifier sends the following kinds of events:
-// Forwarding Event:
-// - Represents a htlc which is forwarded onward from our node.
-// - Present for htlc forwards through our node and local sends.
-//
-// Link Failure Event:
-// - Indicates that a htlc has failed on our incoming or outgoing link,
-// with an incoming boolean which indicates where the failure occurred.
-// - Incoming link failures are present for failed attempts to pay one of
-// our invoices (insufficient amount or mpp timeout, for example) and for
-// forwards that we cannot decode to forward onwards.
-// - Outgoing link failures are present for forwards or local payments that
-// do not meet our outgoing link's policy (insufficient fees, for example)
-// and when we fail to forward the payment on (insufficient outgoing
-// capacity, or an unknown outgoing link).
-//
-// Forwarding Failure Event:
-// - Forwarding failures indicate that a htlc we forwarded has failed at
-// another node down the route.
-// - Present for local sends and htlc forwards which fail after they left
-// our node.
-//
-// Settle event:
-// - Settle events are present when a htlc which we added is settled through
-// the release of a preimage.
-// - Present for local receives, and successful local sends or forwards.
-//
-// Each htlc is identified by its incoming and outgoing circuit key. Htlcs,
-// and their subsequent settles or fails, can be identified by the combination
-// of incoming and outgoing circuits. Note that receives to our node will
-// have a zero outgoing circuit key because the htlc terminates at our
-// node, and sends from our node will have a zero incoming circuit key because
-// the send originates at our node.
-type HtlcNotifier struct {
- started sync.Once
- stopped sync.Once
-
- // now returns the current time, it is set in the htlcnotifier to allow
- // for timestamp mocking in tests.
- now func() time.Time
-
- ntfnServer *subscribe.Server
-}
-
-// NewHtlcNotifier creates a new HtlcNotifier which gets htlc forwarded,
-// failed and settled events from links our node has established with peers
-// and sends notifications to subscribing clients.
-func NewHtlcNotifier(now func() time.Time) *HtlcNotifier {
- return &HtlcNotifier{
- now: now,
- ntfnServer: subscribe.NewServer(),
- }
-}
-
-// Start starts the HtlcNotifier and all goroutines it needs to consume
-// events and provide subscriptions to clients.
-func (h *HtlcNotifier) Start() er.R {
- var err er.R
- h.started.Do(func() {
- log.Trace("HtlcNotifier starting")
- err = h.ntfnServer.Start()
- })
- return err
-}
-
-// Stop signals the notifier for a graceful shutdown.
-func (h *HtlcNotifier) Stop() {
- h.stopped.Do(func() {
- if err := h.ntfnServer.Stop(); err != nil {
- log.Warnf("error stopping htlc notifier: %v", err)
- }
- })
-}
-
-// SubscribeHtlcEvents returns a subscribe.Client that will receive updates
-// any time the server is made aware of a new event.
-func (h *HtlcNotifier) SubscribeHtlcEvents() (*subscribe.Client, er.R) {
- return h.ntfnServer.Subscribe()
-}
-
-// HtlcKey uniquely identifies the htlc.
-type HtlcKey struct {
- // IncomingCircuit is the channel an htlc id of the incoming htlc.
- IncomingCircuit channeldb.CircuitKey
-
- // OutgoingCircuit is the channel and htlc id of the outgoing htlc.
- OutgoingCircuit channeldb.CircuitKey
-}
-
-// String returns a string representation of a htlc key.
-func (k HtlcKey) String() string {
- switch {
- case k.IncomingCircuit.ChanID == hop.Source:
- return k.OutgoingCircuit.String()
-
- case k.OutgoingCircuit.ChanID == hop.Exit:
- return k.IncomingCircuit.String()
-
- default:
- return fmt.Sprintf("%v -> %v", k.IncomingCircuit,
- k.OutgoingCircuit)
- }
-}
-
-// HtlcInfo provides the details of a htlc that our node has processed. For
-// forwards, incoming and outgoing values are set, whereas sends and receives
-// will only have outgoing or incoming details set.
-type HtlcInfo struct {
- // IncomingTimelock is the time lock of the htlc on our incoming
- // channel.
- IncomingTimeLock uint32
-
- // OutgoingTimelock is the time lock the htlc on our outgoing channel.
- OutgoingTimeLock uint32
-
- // IncomingAmt is the amount of the htlc on our incoming channel.
- IncomingAmt lnwire.MilliSatoshi
-
- // OutgoingAmt is the amount of the htlc on our outgoing channel.
- OutgoingAmt lnwire.MilliSatoshi
-}
-
-// String returns a string representation of a htlc.
-func (h HtlcInfo) String() string {
- var details []string
-
- // If the incoming information is not zero, as is the case for a send,
- // we include the incoming amount and timelock.
- if h.IncomingAmt != 0 || h.IncomingTimeLock != 0 {
- str := fmt.Sprintf("incoming amount: %v, "+
- "incoming timelock: %v", h.IncomingAmt,
- h.IncomingTimeLock)
-
- details = append(details, str)
- }
-
- // If the outgoing information is not zero, as is the case for a
- // receive, we include the outgoing amount and timelock.
- if h.OutgoingAmt != 0 || h.OutgoingTimeLock != 0 {
- str := fmt.Sprintf("outgoing amount: %v, "+
- "outgoing timelock: %v", h.OutgoingAmt,
- h.OutgoingTimeLock)
-
- details = append(details, str)
- }
-
- return strings.Join(details, ", ")
-}
-
-// HtlcEventType represents the type of event that a htlc was part of.
-type HtlcEventType int
-
-const (
- // HtlcEventTypeSend represents a htlc that was part of a send from
- // our node.
- HtlcEventTypeSend HtlcEventType = iota
-
- // HtlcEventTypeReceive represents a htlc that was part of a receive
- // to our node.
- HtlcEventTypeReceive
-
- // HtlcEventTypeForward represents a htlc that was forwarded through
- // our node.
- HtlcEventTypeForward
-)
-
-// String returns a string representation of a htlc event type.
-func (h HtlcEventType) String() string {
- switch h {
- case HtlcEventTypeSend:
- return "send"
-
- case HtlcEventTypeReceive:
- return "receive"
-
- case HtlcEventTypeForward:
- return "forward"
-
- default:
- return "unknown"
- }
-}
-
-// ForwardingEvent represents a htlc that was forwarded onwards from our node.
-// Sends which originate from our node will report forward events with zero
-// incoming circuits in their htlc key.
-type ForwardingEvent struct {
- // HtlcKey uniquely identifies the htlc, and can be used to match the
- // forwarding event with subsequent settle/fail events.
- HtlcKey
-
- // HtlcInfo contains details about the htlc.
- HtlcInfo
-
- // HtlcEventType classifies the event as part of a local send or
- // receive, or as part of a forward.
- HtlcEventType
-
- // Timestamp is the time when this htlc was forwarded.
- Timestamp time.Time
-}
-
-// LinkFailEvent describes a htlc that failed on our incoming or outgoing
-// link. The incoming bool is true for failures on incoming links, and false
-// for failures on outgoing links. The failure reason is provided by a lnwire
-// failure message which is enriched with a failure detail in the cases where
-// the wire failure message does not contain full information about the
-// failure.
-type LinkFailEvent struct {
- // HtlcKey uniquely identifies the htlc.
- HtlcKey
-
- // HtlcInfo contains details about the htlc.
- HtlcInfo
-
- // HtlcEventType classifies the event as part of a local send or
- // receive, or as part of a forward.
- HtlcEventType
-
- // LinkError is the reason that we failed the htlc.
- LinkError *LinkError
-
- // Incoming is true if the htlc was failed on an incoming link.
- // If it failed on the outgoing link, it is false.
- Incoming bool
-
- // Timestamp is the time when the link failure occurred.
- Timestamp time.Time
-}
-
-// ForwardingFailEvent represents a htlc failure which occurred down the line
-// after we forwarded a htlc onwards. An error is not included in this event
-// because errors returned down the route are encrypted. HtlcInfo is not
-// reliably available for forwarding failures, so it is omitted. These events
-// should be matched with their corresponding forward event to obtain this
-// information.
-type ForwardingFailEvent struct {
- // HtlcKey uniquely identifies the htlc, and can be used to match the
- // htlc with its corresponding forwarding event.
- HtlcKey
-
- // HtlcEventType classifies the event as part of a local send or
- // receive, or as part of a forward.
- HtlcEventType
-
- // Timestamp is the time when the forwarding failure was received.
- Timestamp time.Time
-}
-
-// SettleEvent represents a htlc that was settled. HtlcInfo is not reliably
-// available for forwarding failures, so it is omitted. These events should
-// be matched with corresponding forward events or invoices (for receives)
-// to obtain additional information about the htlc.
-type SettleEvent struct {
- // HtlcKey uniquely identifies the htlc, and can be used to match
- // forwards with their corresponding forwarding event.
- HtlcKey
-
- // HtlcEventType classifies the event as part of a local send or
- // receive, or as part of a forward.
- HtlcEventType
-
- // Timestamp is the time when this htlc was settled.
- Timestamp time.Time
-}
-
-// NotifyForwardingEvent notifies the HtlcNotifier than a htlc has been
-// forwarded.
-//
-// Note this is part of the htlcNotifier interface.
-func (h *HtlcNotifier) NotifyForwardingEvent(key HtlcKey, info HtlcInfo,
- eventType HtlcEventType) {
-
- event := &ForwardingEvent{
- HtlcKey: key,
- HtlcInfo: info,
- HtlcEventType: eventType,
- Timestamp: h.now(),
- }
-
- log.Tracef("Notifying forward event: %v over %v, %v", eventType, key,
- info)
-
- if err := h.ntfnServer.SendUpdate(event); err != nil {
- log.Warnf("Unable to send forwarding event: %v", err)
- }
-}
-
-// NotifyLinkFailEvent notifies that a htlc has failed on our incoming
-// or outgoing link.
-//
-// Note this is part of the htlcNotifier interface.
-func (h *HtlcNotifier) NotifyLinkFailEvent(key HtlcKey, info HtlcInfo,
- eventType HtlcEventType, linkErr *LinkError, incoming bool) {
-
- event := &LinkFailEvent{
- HtlcKey: key,
- HtlcInfo: info,
- HtlcEventType: eventType,
- LinkError: linkErr,
- Incoming: incoming,
- Timestamp: h.now(),
- }
-
- log.Tracef("Notifying link failure event: %v over %v, %v", eventType,
- key, info)
-
- if err := h.ntfnServer.SendUpdate(event); err != nil {
- log.Warnf("Unable to send link fail event: %v", err)
- }
-}
-
-// NotifyForwardingFailEvent notifies the HtlcNotifier that a htlc we
-// forwarded has failed down the line.
-//
-// Note this is part of the htlcNotifier interface.
-func (h *HtlcNotifier) NotifyForwardingFailEvent(key HtlcKey,
- eventType HtlcEventType) {
-
- event := &ForwardingFailEvent{
- HtlcKey: key,
- HtlcEventType: eventType,
- Timestamp: h.now(),
- }
-
- log.Tracef("Notifying forwarding failure event: %v over %v", eventType,
- key)
-
- if err := h.ntfnServer.SendUpdate(event); err != nil {
- log.Warnf("Unable to send forwarding fail event: %v", err)
- }
-}
-
-// NotifySettleEvent notifies the HtlcNotifier that a htlc that we committed
-// to as part of a forward or a receive to our node has been settled.
-//
-// Note this is part of the htlcNotifier interface.
-func (h *HtlcNotifier) NotifySettleEvent(key HtlcKey, eventType HtlcEventType) {
- event := &SettleEvent{
- HtlcKey: key,
- HtlcEventType: eventType,
- Timestamp: h.now(),
- }
-
- log.Tracef("Notifying settle event: %v over %v", eventType, key)
-
- if err := h.ntfnServer.SendUpdate(event); err != nil {
- log.Warnf("Unable to send settle event: %v", err)
- }
-}
-
-// newHtlc key returns a htlc key for the packet provided. If the packet
-// has a zero incoming channel ID, the packet is for one of our own sends,
-// which has the payment id stashed in the incoming htlc id. If this is the
-// case, we replace the incoming htlc id with zero so that the notifier
-// consistently reports zero circuit keys for events that terminate or
-// originate at our node.
-func newHtlcKey(pkt *htlcPacket) HtlcKey {
- htlcKey := HtlcKey{
- IncomingCircuit: channeldb.CircuitKey{
- ChanID: pkt.incomingChanID,
- HtlcID: pkt.incomingHTLCID,
- },
- OutgoingCircuit: CircuitKey{
- ChanID: pkt.outgoingChanID,
- HtlcID: pkt.outgoingHTLCID,
- },
- }
-
- // If the packet has a zero incoming channel ID, it is a send that was
- // initiated at our node. If this is the case, our internal pid is in
- // the incoming htlc ID, so we overwrite it with 0 for notification
- // purposes.
- if pkt.incomingChanID == hop.Source {
- htlcKey.IncomingCircuit.HtlcID = 0
- }
-
- return htlcKey
-}
-
-// newHtlcInfo returns HtlcInfo for the packet provided.
-func newHtlcInfo(pkt *htlcPacket) HtlcInfo {
- return HtlcInfo{
- IncomingTimeLock: pkt.incomingTimeout,
- OutgoingTimeLock: pkt.outgoingTimeout,
- IncomingAmt: pkt.incomingAmount,
- OutgoingAmt: pkt.amount,
- }
-}
-
-// getEventType returns the htlc type based on the fields set in the htlc
-// packet. Sends that originate at our node have the source (zero) incoming
-// channel ID. Receives to our node have the exit (zero) outgoing channel ID
-// and forwards have both fields set.
-func getEventType(pkt *htlcPacket) HtlcEventType {
- switch {
- case pkt.incomingChanID == hop.Source:
- return HtlcEventTypeSend
-
- case pkt.outgoingChanID == hop.Exit:
- return HtlcEventTypeReceive
-
- default:
- return HtlcEventTypeForward
- }
-}
diff --git a/lnd/htlcswitch/interceptable_switch.go b/lnd/htlcswitch/interceptable_switch.go
deleted file mode 100644
index ce5553dd..00000000
--- a/lnd/htlcswitch/interceptable_switch.go
+++ /dev/null
@@ -1,174 +0,0 @@
-package htlcswitch
-
-import (
- "sync"
-
- "github.com/pkt-cash/pktd/btcutil/er"
- "github.com/pkt-cash/pktd/lnd/channeldb"
- "github.com/pkt-cash/pktd/lnd/htlcswitch/hop"
- "github.com/pkt-cash/pktd/lnd/lntypes"
- "github.com/pkt-cash/pktd/lnd/lnwire"
-)
-
-var (
- // ErrFwdNotExists is an error returned when the caller tries to resolve
- // a forward that doesn't exist anymore.
- ErrFwdNotExists = Err.CodeWithDetail("ErrFwdNotExists", "forward does not exist")
-)
-
-// InterceptableSwitch is an implementation of ForwardingSwitch interface.
-// This implementation is used like a proxy that wraps the switch and
-// intercepts forward requests. A reference to the Switch is held in order
-// to communicate back the interception result where the options are:
-// Resume - forwards the original request to the switch as is.
-// Settle - routes UpdateFulfillHTLC to the originating link.
-// Fail - routes UpdateFailHTLC to the originating link.
-type InterceptableSwitch struct {
- sync.RWMutex
-
- // htlcSwitch is the underline switch
- htlcSwitch *Switch
-
- // fwdInterceptor is the callback that is called for each forward of
- // an incoming htlc. It should return true if it is interested in handling
- // it.
- fwdInterceptor ForwardInterceptor
-}
-
-// NewInterceptableSwitch returns an instance of InterceptableSwitch.
-func NewInterceptableSwitch(s *Switch) *InterceptableSwitch {
- return &InterceptableSwitch{htlcSwitch: s}
-}
-
-// SetInterceptor sets the ForwardInterceptor to be used.
-func (s *InterceptableSwitch) SetInterceptor(
- interceptor ForwardInterceptor) {
-
- s.Lock()
- defer s.Unlock()
- s.fwdInterceptor = interceptor
-}
-
-// ForwardPackets attempts to forward the batch of htlcs through the
-// switch, any failed packets will be returned to the provided
-// ChannelLink. The link's quit signal should be provided to allow
-// cancellation of forwarding during link shutdown.
-func (s *InterceptableSwitch) ForwardPackets(linkQuit chan struct{},
- packets ...*htlcPacket) er.R {
-
- var interceptor ForwardInterceptor
- s.Lock()
- interceptor = s.fwdInterceptor
- s.Unlock()
-
- // Optimize for the case we don't have an interceptor.
- if interceptor == nil {
- return s.htlcSwitch.ForwardPackets(linkQuit, packets...)
- }
-
- var notIntercepted []*htlcPacket
- for _, p := range packets {
- if !s.interceptForward(p, interceptor, linkQuit) {
- notIntercepted = append(notIntercepted, p)
- }
- }
- return s.htlcSwitch.ForwardPackets(linkQuit, notIntercepted...)
-}
-
-// interceptForward checks if there is any external interceptor interested in
-// this packet. Currently only htlc type of UpdateAddHTLC that are forwarded
-// are being checked for interception. It can be extended in the future given
-// the right use case.
-func (s *InterceptableSwitch) interceptForward(packet *htlcPacket,
- interceptor ForwardInterceptor, linkQuit chan struct{}) bool {
-
- switch htlc := packet.htlc.(type) {
- case *lnwire.UpdateAddHTLC:
- // We are not interested in intercepting initated payments.
- if packet.incomingChanID == hop.Source {
- return false
- }
-
- intercepted := &interceptedForward{
- linkQuit: linkQuit,
- htlc: htlc,
- packet: packet,
- htlcSwitch: s.htlcSwitch,
- }
-
- // If this htlc was intercepted, don't handle the forward.
- return interceptor(intercepted)
- default:
- return false
- }
-}
-
-// interceptedForward implements the InterceptedForward interface.
-// It is passed from the switch to external interceptors that are interested
-// in holding forwards and resolve them manually.
-type interceptedForward struct {
- linkQuit chan struct{}
- htlc *lnwire.UpdateAddHTLC
- packet *htlcPacket
- htlcSwitch *Switch
-}
-
-// Packet returns the intercepted htlc packet.
-func (f *interceptedForward) Packet() InterceptedPacket {
- return InterceptedPacket{
- IncomingCircuit: channeldb.CircuitKey{
- ChanID: f.packet.incomingChanID,
- HtlcID: f.packet.incomingHTLCID,
- },
- OutgoingChanID: f.packet.outgoingChanID,
- Hash: f.htlc.PaymentHash,
- OutgoingExpiry: f.htlc.Expiry,
- OutgoingAmount: f.htlc.Amount,
- IncomingAmount: f.packet.incomingAmount,
- IncomingExpiry: f.packet.incomingTimeout,
- CustomRecords: f.packet.customRecords,
- OnionBlob: f.htlc.OnionBlob,
- }
-}
-
-// Resume resumes the default behavior as if the packet was not intercepted.
-func (f *interceptedForward) Resume() er.R {
- return f.htlcSwitch.ForwardPackets(f.linkQuit, f.packet)
-}
-
-// Fail forward a failed packet to the switch.
-func (f *interceptedForward) Fail() er.R {
- reason, err := f.packet.obfuscator.EncryptFirstHop(lnwire.NewTemporaryChannelFailure(nil))
- if err != nil {
- return er.Errorf("failed to encrypt failure reason %v", err)
- }
- return f.resolve(&lnwire.UpdateFailHTLC{
- Reason: reason,
- })
-}
-
-// Settle forwards a settled packet to the switch.
-func (f *interceptedForward) Settle(preimage lntypes.Preimage) er.R {
- if !preimage.Matches(f.htlc.PaymentHash) {
- return er.New("preimage does not match hash")
- }
- return f.resolve(&lnwire.UpdateFulfillHTLC{
- PaymentPreimage: preimage,
- })
-}
-
-// resolve is used for both Settle and Fail and forwards the message to the
-// switch.
-func (f *interceptedForward) resolve(message lnwire.Message) er.R {
- pkt := &htlcPacket{
- incomingChanID: f.packet.incomingChanID,
- incomingHTLCID: f.packet.incomingHTLCID,
- outgoingChanID: f.packet.outgoingChanID,
- outgoingHTLCID: f.packet.outgoingHTLCID,
- isResolution: true,
- circuit: f.packet.circuit,
- htlc: message,
- obfuscator: f.packet.obfuscator,
- }
- return f.htlcSwitch.mailOrchestrator.Deliver(pkt.incomingChanID, pkt)
-}
diff --git a/lnd/htlcswitch/interfaces.go b/lnd/htlcswitch/interfaces.go
deleted file mode 100644
index 5c0a382b..00000000
--- a/lnd/htlcswitch/interfaces.go
+++ /dev/null
@@ -1,286 +0,0 @@
-package htlcswitch
-
-import (
- "github.com/pkt-cash/pktd/btcutil/er"
- "github.com/pkt-cash/pktd/lnd/channeldb"
- "github.com/pkt-cash/pktd/lnd/invoices"
- "github.com/pkt-cash/pktd/lnd/lnpeer"
- "github.com/pkt-cash/pktd/lnd/lntypes"
- "github.com/pkt-cash/pktd/lnd/lnwallet"
- "github.com/pkt-cash/pktd/lnd/lnwire"
- "github.com/pkt-cash/pktd/lnd/record"
- "github.com/pkt-cash/pktd/wire"
-)
-
-// InvoiceDatabase is an interface which represents the persistent subsystem
-// which may search, lookup and settle invoices.
-type InvoiceDatabase interface {
- // LookupInvoice attempts to look up an invoice according to its 32
- // byte payment hash.
- LookupInvoice(lntypes.Hash) (channeldb.Invoice, er.R)
-
- // NotifyExitHopHtlc attempts to mark an invoice as settled. If the
- // invoice is a debug invoice, then this method is a noop as debug
- // invoices are never fully settled. The return value describes how the
- // htlc should be resolved. If the htlc cannot be resolved immediately,
- // the resolution is sent on the passed in hodlChan later. The eob
- // field passes the entire onion hop payload into the invoice registry
- // for decoding purposes.
- NotifyExitHopHtlc(payHash lntypes.Hash, paidAmount lnwire.MilliSatoshi,
- expiry uint32, currentHeight int32,
- circuitKey channeldb.CircuitKey, hodlChan chan<- interface{},
- payload invoices.Payload) (invoices.HtlcResolution, er.R)
-
- // CancelInvoice attempts to cancel the invoice corresponding to the
- // passed payment hash.
- CancelInvoice(payHash lntypes.Hash) er.R
-
- // SettleHodlInvoice settles a hold invoice.
- SettleHodlInvoice(preimage lntypes.Preimage) er.R
-
- // HodlUnsubscribeAll unsubscribes from all htlc resolutions.
- HodlUnsubscribeAll(subscriber chan<- interface{})
-}
-
-// ChannelLink is an interface which represents the subsystem for managing the
-// incoming htlc requests, applying the changes to the channel, and also
-// propagating/forwarding it to htlc switch.
-//
-// abstraction level
-// ^
-// |
-// | - - - - - - - - - - - - Lightning - - - - - - - - - - - - -
-// |
-// | (Switch) (Switch) (Switch)
-// | Alice <-- channel link --> Bob <-- channel link --> Carol
-// |
-// | - - - - - - - - - - - - - TCP - - - - - - - - - - - - - - -
-// |
-// | (Peer) (Peer) (Peer)
-// | Alice <----- tcp conn --> Bob <---- tcp conn -----> Carol
-// |
-//
-type ChannelLink interface {
- // TODO(roasbeef): modify interface to embed mail boxes?
-
- // HandleSwitchPacket handles the switch packets. This packets might be
- // forwarded to us from another channel link in case the htlc update
- // came from another peer or if the update was created by user
- // initially.
- //
- // NOTE: This function MUST be non-blocking (or block as little as
- // possible).
- HandleSwitchPacket(*htlcPacket) er.R
-
- // HandleLocalAddPacket handles a locally-initiated UpdateAddHTLC
- // packet. It will be processed synchronously.
- HandleLocalAddPacket(*htlcPacket) er.R
-
- // HandleChannelUpdate handles the htlc requests as settle/add/fail
- // which sent to us from remote peer we have a channel with.
- //
- // NOTE: This function MUST be non-blocking (or block as little as
- // possible).
- HandleChannelUpdate(lnwire.Message)
-
- // ChannelPoint returns the channel outpoint for the channel link.
- ChannelPoint() *wire.OutPoint
-
- // ChanID returns the channel ID for the channel link. The channel ID
- // is a more compact representation of a channel's full outpoint.
- ChanID() lnwire.ChannelID
-
- // ShortChanID returns the short channel ID for the channel link. The
- // short channel ID encodes the exact location in the main chain that
- // the original funding output can be found.
- ShortChanID() lnwire.ShortChannelID
-
- // UpdateShortChanID updates the short channel ID for a link. This may
- // be required in the event that a link is created before the short
- // chan ID for it is known, or a re-org occurs, and the funding
- // transaction changes location within the chain.
- UpdateShortChanID() (lnwire.ShortChannelID, er.R)
-
- // UpdateForwardingPolicy updates the forwarding policy for the target
- // ChannelLink. Once updated, the link will use the new forwarding
- // policy to govern if it an incoming HTLC should be forwarded or not.
- UpdateForwardingPolicy(ForwardingPolicy)
-
- // CheckHtlcForward should return a nil error if the passed HTLC details
- // satisfy the current forwarding policy fo the target link. Otherwise,
- // a LinkError with a valid protocol failure message should be returned
- // in order to signal to the source of the HTLC, the policy consistency
- // issue.
- CheckHtlcForward(payHash [32]byte, incomingAmt lnwire.MilliSatoshi,
- amtToForward lnwire.MilliSatoshi,
- incomingTimeout, outgoingTimeout uint32,
- heightNow uint32) *LinkError
-
- // CheckHtlcTransit should return a nil error if the passed HTLC details
- // satisfy the current channel policy. Otherwise, a LinkError with a
- // valid protocol failure message should be returned in order to signal
- // the violation. This call is intended to be used for locally initiated
- // payments for which there is no corresponding incoming htlc.
- CheckHtlcTransit(payHash [32]byte, amt lnwire.MilliSatoshi,
- timeout uint32, heightNow uint32) *LinkError
-
- // Bandwidth returns the amount of milli-satoshis which current link
- // might pass through channel link. The value returned from this method
- // represents the up to date available flow through the channel. This
- // takes into account any forwarded but un-cleared HTLC's, and any
- // HTLC's which have been set to the over flow queue.
- Bandwidth() lnwire.MilliSatoshi
-
- // Stats return the statistics of channel link. Number of updates,
- // total sent/received milli-satoshis.
- Stats() (uint64, lnwire.MilliSatoshi, lnwire.MilliSatoshi)
-
- // Peer returns the representation of remote peer with which we have
- // the channel link opened.
- Peer() lnpeer.Peer
-
- // EligibleToForward returns a bool indicating if the channel is able
- // to actively accept requests to forward HTLC's. A channel may be
- // active, but not able to forward HTLC's if it hasn't yet finalized
- // the pre-channel operation protocol with the remote peer. The switch
- // will use this function in forwarding decisions accordingly.
- EligibleToForward() bool
-
- // AttachMailBox delivers an active MailBox to the link. The MailBox may
- // have buffered messages.
- AttachMailBox(MailBox)
-
- // Start/Stop are used to initiate the start/stop of the channel link
- // functioning.
- Start() er.R
- Stop()
-}
-
-// ForwardingLog is an interface that represents a time series database which
-// keep track of all successfully completed payment circuits. Every few
-// seconds, the switch will collate and flush out all the successful payment
-// circuits during the last interval.
-type ForwardingLog interface {
- // AddForwardingEvents is a method that should write out the set of
- // forwarding events in a batch to persistent storage. Outside
- // sub-systems can then query the contents of the log for analysis,
- // visualizations, etc.
- AddForwardingEvents([]channeldb.ForwardingEvent) er.R
-}
-
-// TowerClient is the primary interface used by the daemon to backup pre-signed
-// justice transactions to watchtowers.
-type TowerClient interface {
- // RegisterChannel persistently initializes any channel-dependent
- // parameters within the client. This should be called during link
- // startup to ensure that the client is able to support the link during
- // operation.
- RegisterChannel(lnwire.ChannelID) er.R
-
- // BackupState initiates a request to back up a particular revoked
- // state. If the method returns nil, the backup is guaranteed to be
- // successful unless the tower is unavailable and client is force quit,
- // or the justice transaction would create dust outputs when trying to
- // abide by the negotiated policy. If the channel we're trying to back
- // up doesn't have a tweak for the remote party's output, then
- // isTweakless should be true.
- BackupState(*lnwire.ChannelID, *lnwallet.BreachRetribution, bool) er.R
-}
-
-// InterceptableHtlcForwarder is the interface to set the interceptor
-// implementation that intercepts htlc forwards.
-type InterceptableHtlcForwarder interface {
- // SetInterceptor sets a ForwardInterceptor.
- SetInterceptor(interceptor ForwardInterceptor)
-}
-
-// ForwardInterceptor is a function that is invoked from the switch for every
-// incoming htlc that is intended to be forwarded. It is passed with the
-// InterceptedForward that contains the information about the packet and a way
-// to resolve it manually later in case it is held.
-// The return value indicates if this handler will take control of this forward
-// and resolve it later or let the switch execute its default behavior.
-type ForwardInterceptor func(InterceptedForward) bool
-
-// InterceptedPacket contains the relevant information for the interceptor about
-// an htlc.
-type InterceptedPacket struct {
- // IncomingCircuit contains the incoming channel and htlc id of the
- // packet.
- IncomingCircuit channeldb.CircuitKey
-
- // OutgoingChanID is the destination channel for this packet.
- OutgoingChanID lnwire.ShortChannelID
-
- // Hash is the payment hash of the htlc.
- Hash lntypes.Hash
-
- // OutgoingExpiry is the absolute block height at which the outgoing
- // htlc expires.
- OutgoingExpiry uint32
-
- // OutgoingAmount is the amount to forward.
- OutgoingAmount lnwire.MilliSatoshi
-
- // IncomingExpiry is the absolute block height at which the incoming
- // htlc expires.
- IncomingExpiry uint32
-
- // IncomingAmount is the amount of the accepted htlc.
- IncomingAmount lnwire.MilliSatoshi
-
- // CustomRecords are user-defined records in the custom type range that
- // were included in the payload.
- CustomRecords record.CustomSet
-
- // OnionBlob is the onion packet for the next hop
- OnionBlob [lnwire.OnionPacketSize]byte
-}
-
-// InterceptedForward is passed to the ForwardInterceptor for every forwarded
-// htlc. It contains all the information about the packet which accordingly
-// the interceptor decides if to hold or not.
-// In addition this interface allows a later resolution by calling either
-// Resume, Settle or Fail.
-type InterceptedForward interface {
- // Packet returns the intercepted packet.
- Packet() InterceptedPacket
-
- // Resume notifies the intention to resume an existing hold forward. This
- // basically means the caller wants to resume with the default behavior for
- // this htlc which usually means forward it.
- Resume() er.R
-
- // Settle notifies the intention to settle an existing hold
- // forward with a given preimage.
- Settle(lntypes.Preimage) er.R
-
- // Fails notifies the intention to fail an existing hold forward
- Fail() er.R
-}
-
-// htlcNotifier is an interface which represents the input side of the
-// HtlcNotifier which htlc events are piped through. This interface is intended
-// to allow for mocking of the htlcNotifier in tests, so is unexported because
-// it is not needed outside of the htlcSwitch package.
-type htlcNotifier interface {
- // NotifyForwardingEvent notifies the HtlcNotifier than a htlc has been
- // forwarded.
- NotifyForwardingEvent(key HtlcKey, info HtlcInfo,
- eventType HtlcEventType)
-
- // NotifyIncomingLinkFailEvent notifies that a htlc has failed on our
- // incoming link. It takes an isReceive bool to differentiate between
- // our node's receives and forwards.
- NotifyLinkFailEvent(key HtlcKey, info HtlcInfo,
- eventType HtlcEventType, linkErr *LinkError, incoming bool)
-
- // NotifyForwardingFailEvent notifies the HtlcNotifier that a htlc we
- // forwarded has failed down the line.
- NotifyForwardingFailEvent(key HtlcKey, eventType HtlcEventType)
-
- // NotifySettleEvent notifies the HtlcNotifier that a htlc that we
- // committed to as part of a forward or a receive to our node has been
- // settled.
- NotifySettleEvent(key HtlcKey, eventType HtlcEventType)
-}
diff --git a/lnd/htlcswitch/link.go b/lnd/htlcswitch/link.go
deleted file mode 100644
index 274e52d2..00000000
--- a/lnd/htlcswitch/link.go
+++ /dev/null
@@ -1,3100 +0,0 @@
-package htlcswitch
-
-import (
- "bytes"
- "crypto/sha256"
- "fmt"
- "math"
- prand "math/rand"
- "sync"
- "sync/atomic"
- "time"
-
- "github.com/davecgh/go-spew/spew"
- "github.com/go-errors/errors"
- "github.com/pkt-cash/pktd/btcutil/er"
- "github.com/pkt-cash/pktd/lnd/channeldb"
- "github.com/pkt-cash/pktd/lnd/contractcourt"
- "github.com/pkt-cash/pktd/lnd/htlcswitch/hodl"
- "github.com/pkt-cash/pktd/lnd/htlcswitch/hop"
- "github.com/pkt-cash/pktd/lnd/invoices"
- "github.com/pkt-cash/pktd/lnd/lnpeer"
- "github.com/pkt-cash/pktd/lnd/lntypes"
- "github.com/pkt-cash/pktd/lnd/lnwallet"
- "github.com/pkt-cash/pktd/lnd/lnwallet/chainfee"
- "github.com/pkt-cash/pktd/lnd/lnwire"
- "github.com/pkt-cash/pktd/lnd/queue"
- "github.com/pkt-cash/pktd/lnd/ticker"
- "github.com/pkt-cash/pktd/pktlog/log"
- "github.com/pkt-cash/pktd/wire"
-)
-
-func init() {
- prand.Seed(time.Now().UnixNano())
-}
-
-const (
- // DefaultMaxOutgoingCltvExpiry is the maximum outgoing time lock that
- // the node accepts for forwarded payments. The value is relative to the
- // current block height. The reason to have a maximum is to prevent
- // funds getting locked up unreasonably long. Otherwise, an attacker
- // willing to lock its own funds too, could force the funds of this node
- // to be locked up for an indefinite (max int32) number of blocks.
- //
- // The value 2016 corresponds to on average two weeks worth of blocks
- // and is based on the maximum number of hops (20), the default CLTV
- // delta (40), and some extra margin to account for the other lightning
- // implementations and past lnd versions which used to have a default
- // CLTV delta of 144.
- DefaultMaxOutgoingCltvExpiry = 2016
-
- // DefaultMinLinkFeeUpdateTimeout represents the minimum interval in
- // which a link should propose to update its commitment fee rate.
- DefaultMinLinkFeeUpdateTimeout = 10 * time.Minute
-
- // DefaultMaxLinkFeeUpdateTimeout represents the maximum interval in
- // which a link should propose to update its commitment fee rate.
- DefaultMaxLinkFeeUpdateTimeout = 60 * time.Minute
-
- // DefaultMaxLinkFeeAllocation is the highest allocation we'll allow
- // a channel's commitment fee to be of its balance. This only applies to
- // the initiator of the channel.
- DefaultMaxLinkFeeAllocation float64 = 0.5
-)
-
-// ForwardingPolicy describes the set of constraints that a given ChannelLink
-// is to adhere to when forwarding HTLC's. For each incoming HTLC, this set of
-// constraints will be consulted in order to ensure that adequate fees are
-// paid, and our time-lock parameters are respected. In the event that an
-// incoming HTLC violates any of these constraints, it is to be _rejected_ with
-// the error possibly carrying along a ChannelUpdate message that includes the
-// latest policy.
-type ForwardingPolicy struct {
- // MinHTLC is the smallest HTLC that is to be forwarded.
- MinHTLCOut lnwire.MilliSatoshi
-
- // MaxHTLC is the largest HTLC that is to be forwarded.
- MaxHTLC lnwire.MilliSatoshi
-
- // BaseFee is the base fee, expressed in milli-satoshi that must be
- // paid for each incoming HTLC. This field, combined with FeeRate is
- // used to compute the required fee for a given HTLC.
- BaseFee lnwire.MilliSatoshi
-
- // FeeRate is the fee rate, expressed in milli-satoshi that must be
- // paid for each incoming HTLC. This field combined with BaseFee is
- // used to compute the required fee for a given HTLC.
- FeeRate lnwire.MilliSatoshi
-
- // TimeLockDelta is the absolute time-lock value, expressed in blocks,
- // that will be subtracted from an incoming HTLC's timelock value to
- // create the time-lock value for the forwarded outgoing HTLC. The
- // following constraint MUST hold for an HTLC to be forwarded:
- //
- // * incomingHtlc.timeLock - timeLockDelta = fwdInfo.OutgoingCTLV
- //
- // where fwdInfo is the forwarding information extracted from the
- // per-hop payload of the incoming HTLC's onion packet.
- TimeLockDelta uint32
-
- // TODO(roasbeef): add fee module inside of switch
-}
-
-// ExpectedFee computes the expected fee for a given htlc amount. The value
-// returned from this function is to be used as a sanity check when forwarding
-// HTLC's to ensure that an incoming HTLC properly adheres to our propagated
-// forwarding policy.
-//
-// TODO(roasbeef): also add in current available channel bandwidth, inverse
-// func
-func ExpectedFee(f ForwardingPolicy,
- htlcAmt lnwire.MilliSatoshi) lnwire.MilliSatoshi {
-
- return f.BaseFee + (htlcAmt*f.FeeRate)/1000000
-}
-
-// ChannelLinkConfig defines the configuration for the channel link. ALL
-// elements within the configuration MUST be non-nil for channel link to carry
-// out its duties.
-type ChannelLinkConfig struct {
- // FwrdingPolicy is the initial forwarding policy to be used when
- // deciding whether to forwarding incoming HTLC's or not. This value
- // can be updated with subsequent calls to UpdateForwardingPolicy
- // targeted at a given ChannelLink concrete interface implementation.
- FwrdingPolicy ForwardingPolicy
-
- // Circuits provides restricted access to the switch's circuit map,
- // allowing the link to open and close circuits.
- Circuits CircuitModifier
-
- // Switch provides a reference to the HTLC switch, we only use this in
- // testing to access circuit operations not typically exposed by the
- // CircuitModifier.
- //
- // TODO(conner): remove after refactoring htlcswitch testing framework.
- Switch *Switch
-
- // ForwardPackets attempts to forward the batch of htlcs through the
- // switch. The function returns and error in case it fails to send one or
- // more packets. The link's quit signal should be provided to allow
- // cancellation of forwarding during link shutdown.
- ForwardPackets func(chan struct{}, ...*htlcPacket) er.R
-
- // DecodeHopIterators facilitates batched decoding of HTLC Sphinx onion
- // blobs, which are then used to inform how to forward an HTLC.
- //
- // NOTE: This function assumes the same set of readers and preimages
- // are always presented for the same identifier.
- DecodeHopIterators func([]byte, []hop.DecodeHopIteratorRequest) (
- []hop.DecodeHopIteratorResponse, er.R)
-
- // ExtractErrorEncrypter function is responsible for decoding HTLC
- // Sphinx onion blob, and creating onion failure obfuscator.
- ExtractErrorEncrypter hop.ErrorEncrypterExtracter
-
- // FetchLastChannelUpdate retrieves the latest routing policy for a
- // target channel. This channel will typically be the outgoing channel
- // specified when we receive an incoming HTLC. This will be used to
- // provide payment senders our latest policy when sending encrypted
- // error messages.
- FetchLastChannelUpdate func(lnwire.ShortChannelID) (*lnwire.ChannelUpdate, er.R)
-
- // Peer is a lightning network node with which we have the channel link
- // opened.
- Peer lnpeer.Peer
-
- // Registry is a sub-system which responsible for managing the invoices
- // in thread-safe manner.
- Registry InvoiceDatabase
-
- // PreimageCache is a global witness beacon that houses any new
- // preimages discovered by other links. We'll use this to add new
- // witnesses that we discover which will notify any sub-systems
- // subscribed to new events.
- PreimageCache contractcourt.WitnessBeacon
-
- // OnChannelFailure is a function closure that we'll call if the
- // channel failed for some reason. Depending on the severity of the
- // error, the closure potentially must force close this channel and
- // disconnect the peer.
- //
- // NOTE: The method must return in order for the ChannelLink to be able
- // to shut down properly.
- OnChannelFailure func(lnwire.ChannelID, lnwire.ShortChannelID,
- LinkFailureError)
-
- // UpdateContractSignals is a function closure that we'll use to update
- // outside sub-systems with the latest signals for our inner Lightning
- // channel. These signals will notify the caller when the channel has
- // been closed, or when the set of active HTLC's is updated.
- UpdateContractSignals func(*contractcourt.ContractSignals) er.R
-
- // ChainEvents is an active subscription to the chain watcher for this
- // channel to be notified of any on-chain activity related to this
- // channel.
- ChainEvents *contractcourt.ChainEventSubscription
-
- // FeeEstimator is an instance of a live fee estimator which will be
- // used to dynamically regulate the current fee of the commitment
- // transaction to ensure timely confirmation.
- FeeEstimator chainfee.Estimator
-
- // hodl.Mask is a bitvector composed of hodl.Flags, specifying breakpoints
- // for HTLC forwarding internal to the switch.
- //
- // NOTE: This should only be used for testing.
- HodlMask hodl.Mask
-
- // SyncStates is used to indicate that we need send the channel
- // reestablishment message to the remote peer. It should be done if our
- // clients have been restarted, or remote peer have been reconnected.
- SyncStates bool
-
- // BatchTicker is the ticker that determines the interval that we'll
- // use to check the batch to see if there're any updates we should
- // flush out. By batching updates into a single commit, we attempt to
- // increase throughput by maximizing the number of updates coalesced
- // into a single commit.
- BatchTicker ticker.Ticker
-
- // FwdPkgGCTicker is the ticker determining the frequency at which
- // garbage collection of forwarding packages occurs. We use a
- // time-based approach, as opposed to block epochs, as to not hinder
- // syncing.
- FwdPkgGCTicker ticker.Ticker
-
- // PendingCommitTicker is a ticker that allows the link to determine if
- // a locally initiated commitment dance gets stuck waiting for the
- // remote party to revoke.
- PendingCommitTicker ticker.Ticker
-
- // BatchSize is the max size of a batch of updates done to the link
- // before we do a state update.
- BatchSize uint32
-
- // UnsafeReplay will cause a link to replay the adds in its latest
- // commitment txn after the link is restarted. This should only be used
- // in testing, it is here to ensure the sphinx replay detection on the
- // receiving node is persistent.
- UnsafeReplay bool
-
- // MinFeeUpdateTimeout represents the minimum interval in which a link
- // will propose to update its commitment fee rate. A random timeout will
- // be selected between this and MaxFeeUpdateTimeout.
- MinFeeUpdateTimeout time.Duration
-
- // MaxFeeUpdateTimeout represents the maximum interval in which a link
- // will propose to update its commitment fee rate. A random timeout will
- // be selected between this and MinFeeUpdateTimeout.
- MaxFeeUpdateTimeout time.Duration
-
- // OutgoingCltvRejectDelta defines the number of blocks before expiry of
- // an htlc where we don't offer an htlc anymore. This should be at least
- // the outgoing broadcast delta, because in any case we don't want to
- // risk offering an htlc that triggers channel closure.
- OutgoingCltvRejectDelta uint32
-
- // TowerClient is an optional engine that manages the signing,
- // encrypting, and uploading of justice transactions to the daemon's
- // configured set of watchtowers.
- TowerClient TowerClient
-
- // MaxOutgoingCltvExpiry is the maximum outgoing timelock that the link
- // should accept for a forwarded HTLC. The value is relative to the
- // current block height.
- MaxOutgoingCltvExpiry uint32
-
- // MaxFeeAllocation is the highest allocation we'll allow a channel's
- // commitment fee to be of its balance. This only applies to the
- // initiator of the channel.
- MaxFeeAllocation float64
-
- // NotifyActiveLink allows the link to tell the ChannelNotifier when a
- // link is first started.
- NotifyActiveLink func(wire.OutPoint)
-
- // NotifyActiveChannel allows the link to tell the ChannelNotifier when
- // channels becomes active.
- NotifyActiveChannel func(wire.OutPoint)
-
- // NotifyInactiveChannel allows the switch to tell the ChannelNotifier
- // when channels become inactive.
- NotifyInactiveChannel func(wire.OutPoint)
-
- // HtlcNotifier is an instance of a htlcNotifier which we will pipe htlc
- // events through.
- HtlcNotifier htlcNotifier
-}
-
-// localUpdateAddMsg contains a locally initiated htlc and a channel that will
-// receive the outcome of the link processing. This channel must be buffered to
-// prevent the link from blocking.
-type localUpdateAddMsg struct {
- pkt *htlcPacket
- err chan er.R
-}
-
-// channelLink is the service which drives a channel's commitment update
-// state-machine. In the event that an HTLC needs to be propagated to another
-// link, the forward handler from config is used which sends HTLC to the
-// switch. Additionally, the link encapsulate logic of commitment protocol
-// message ordering and updates.
-type channelLink struct {
- // The following fields are only meant to be used *atomically*
- started int32
- reestablished int32
- shutdown int32
-
- // failed should be set to true in case a link error happens, making
- // sure we don't process any more updates.
- failed bool
-
- // keystoneBatch represents a volatile list of keystones that must be
- // written before attempting to sign the next commitment txn. These
- // represent all the HTLC's forwarded to the link from the switch. Once
- // we lock them into our outgoing commitment, then the circuit has a
- // keystone, and is fully opened.
- keystoneBatch []Keystone
-
- // openedCircuits is the set of all payment circuits that will be open
- // once we make our next commitment. After making the commitment we'll
- // ACK all these from our mailbox to ensure that they don't get
- // re-delivered if we reconnect.
- openedCircuits []CircuitKey
-
- // closedCircuits is the set of all payment circuits that will be
- // closed once we make our next commitment. After taking the commitment
- // we'll ACK all these to ensure that they don't get re-delivered if we
- // reconnect.
- closedCircuits []CircuitKey
-
- // channel is a lightning network channel to which we apply htlc
- // updates.
- channel *lnwallet.LightningChannel
-
- // shortChanID is the most up to date short channel ID for the link.
- shortChanID lnwire.ShortChannelID
-
- // cfg is a structure which carries all dependable fields/handlers
- // which may affect behaviour of the service.
- cfg ChannelLinkConfig
-
- // mailBox is the main interface between the outside world and the
- // link. All incoming messages will be sent over this mailBox. Messages
- // include new updates from our connected peer, and new packets to be
- // forwarded sent by the switch.
- mailBox MailBox
-
- // upstream is a channel that new messages sent from the remote peer to
- // the local peer will be sent across.
- upstream chan lnwire.Message
-
- // downstream is a channel in which new multi-hop HTLC's to be
- // forwarded will be sent across. Messages from this channel are sent
- // by the HTLC switch.
- downstream chan *htlcPacket
-
- // localUpdateAdd is a channel to which locally initiated HTLCs are
- // sent across.
- localUpdateAdd chan *localUpdateAddMsg
-
- // htlcUpdates is a channel that we'll use to update outside
- // sub-systems with the latest set of active HTLC's on our channel.
- htlcUpdates chan *contractcourt.ContractUpdate
-
- // updateFeeTimer is the timer responsible for updating the link's
- // commitment fee every time it fires.
- updateFeeTimer *time.Timer
-
- // uncommittedPreimages stores a list of all preimages that have been
- // learned since receiving the last CommitSig from the remote peer. The
- // batch will be flushed just before accepting the subsequent CommitSig
- // or on shutdown to avoid doing a write for each preimage received.
- uncommittedPreimages []lntypes.Preimage
-
- sync.RWMutex
-
- // hodlQueue is used to receive exit hop htlc resolutions from invoice
- // registry.
- hodlQueue *queue.ConcurrentQueue
-
- // hodlMap stores related htlc data for a circuit key. It allows
- // resolving those htlcs when we receive a message on hodlQueue.
- hodlMap map[channeldb.CircuitKey]hodlHtlc
-
- wg sync.WaitGroup
- quit chan struct{}
-}
-
-// hodlHtlc contains htlc data that is required for resolution.
-type hodlHtlc struct {
- pd *lnwallet.PaymentDescriptor
- obfuscator hop.ErrorEncrypter
-}
-
-// NewChannelLink creates a new instance of a ChannelLink given a configuration
-// and active channel that will be used to verify/apply updates to.
-func NewChannelLink(cfg ChannelLinkConfig,
- channel *lnwallet.LightningChannel) ChannelLink {
-
- return &channelLink{
- cfg: cfg,
- channel: channel,
- shortChanID: channel.ShortChanID(),
- // TODO(roasbeef): just do reserve here?
- htlcUpdates: make(chan *contractcourt.ContractUpdate),
- hodlMap: make(map[channeldb.CircuitKey]hodlHtlc),
- hodlQueue: queue.NewConcurrentQueue(10),
- quit: make(chan struct{}),
- localUpdateAdd: make(chan *localUpdateAddMsg),
- }
-}
-
-// A compile time check to ensure channelLink implements the ChannelLink
-// interface.
-var _ ChannelLink = (*channelLink)(nil)
-
-// Start starts all helper goroutines required for the operation of the channel
-// link.
-//
-// NOTE: Part of the ChannelLink interface.
-func (l *channelLink) Start() er.R {
- if !atomic.CompareAndSwapInt32(&l.started, 0, 1) {
- err := er.Errorf("channel link(%v): already started", l)
- log.Warn("already started")
- return err
- }
-
- log.Info("starting")
-
- // If the config supplied watchtower client, ensure the channel is
- // registered before trying to use it during operation.
- // TODO(halseth): support anchor types for watchtower.
- state := l.channel.State()
- if l.cfg.TowerClient != nil && state.ChanType.HasAnchors() {
- log.Warnf("Skipping tower registration for anchor " +
- "channel type")
- } else if l.cfg.TowerClient != nil && !state.ChanType.HasAnchors() {
- err := l.cfg.TowerClient.RegisterChannel(l.ChanID())
- if err != nil {
- return err
- }
- }
-
- l.mailBox.ResetMessages()
- l.hodlQueue.Start()
-
- // Before launching the htlcManager messages, revert any circuits that
- // were marked open in the switch's circuit map, but did not make it
- // into a commitment txn. We use the next local htlc index as the cut
- // off point, since all indexes below that are committed. This action
- // is only performed if the link's final short channel ID has been
- // assigned, otherwise we would try to trim the htlcs belonging to the
- // all-zero, hop.Source ID.
- if l.ShortChanID() != hop.Source {
- localHtlcIndex, err := l.channel.NextLocalHtlcIndex()
- if err != nil {
- return er.Errorf("unable to retrieve next local "+
- "htlc index: %v", err)
- }
-
- // NOTE: This is automatically done by the switch when it
- // starts up, but is necessary to prevent inconsistencies in
- // the case that the link flaps. This is a result of a link's
- // life-cycle being shorter than that of the switch.
- chanID := l.ShortChanID()
- err = l.cfg.Circuits.TrimOpenCircuits(chanID, localHtlcIndex)
- if err != nil {
- return er.Errorf("unable to trim circuits above "+
- "local htlc index %d: %v", localHtlcIndex, err)
- }
-
- // Since the link is live, before we start the link we'll update
- // the ChainArbitrator with the set of new channel signals for
- // this channel.
- //
- // TODO(roasbeef): split goroutines within channel arb to avoid
- go func() {
- signals := &contractcourt.ContractSignals{
- HtlcUpdates: l.htlcUpdates,
- ShortChanID: l.channel.ShortChanID(),
- }
-
- err := l.cfg.UpdateContractSignals(signals)
- if err != nil {
- log.Errorf("unable to update signals")
- }
- }()
- }
-
- l.updateFeeTimer = time.NewTimer(l.randomFeeUpdateTimeout())
-
- l.wg.Add(1)
- go l.htlcManager()
-
- return nil
-}
-
-// Stop gracefully stops all active helper goroutines, then waits until they've
-// exited.
-//
-// NOTE: Part of the ChannelLink interface.
-func (l *channelLink) Stop() {
- if !atomic.CompareAndSwapInt32(&l.shutdown, 0, 1) {
- log.Warn("already stopped")
- return
- }
-
- log.Info("stopping")
-
- // As the link is stopping, we are no longer interested in htlc
- // resolutions coming from the invoice registry.
- l.cfg.Registry.HodlUnsubscribeAll(l.hodlQueue.ChanIn())
-
- if l.cfg.ChainEvents.Cancel != nil {
- l.cfg.ChainEvents.Cancel()
- }
-
- l.updateFeeTimer.Stop()
- l.hodlQueue.Stop()
-
- close(l.quit)
- l.wg.Wait()
-
- // Now that the htlcManager has completely exited, reset the packet
- // courier. This allows the mailbox to revaluate any lingering Adds that
- // were delivered but didn't make it on a commitment to be failed back
- // if the link is offline for an extended period of time. The error is
- // ignored since it can only fail when the daemon is exiting.
- _ = l.mailBox.ResetPackets()
-
- // As a final precaution, we will attempt to flush any uncommitted
- // preimages to the preimage cache. The preimages should be re-delivered
- // after channel reestablishment, however this adds an extra layer of
- // protection in case the peer never returns. Without this, we will be
- // unable to settle any contracts depending on the preimages even though
- // we had learned them at some point.
- err := l.cfg.PreimageCache.AddPreimages(l.uncommittedPreimages...)
- if err != nil {
- log.Errorf("unable to add preimages=%v to cache: %v",
- l.uncommittedPreimages, err)
- }
-}
-
-// WaitForShutdown blocks until the link finishes shutting down, which includes
-// termination of all dependent goroutines.
-func (l *channelLink) WaitForShutdown() {
- l.wg.Wait()
-}
-
-// EligibleToForward returns a bool indicating if the channel is able to
-// actively accept requests to forward HTLC's. We're able to forward HTLC's if
-// we know the remote party's next revocation point. Otherwise, we can't
-// initiate new channel state. We also require that the short channel ID not be
-// the all-zero source ID, meaning that the channel has had its ID finalized.
-func (l *channelLink) EligibleToForward() bool {
- return l.channel.RemoteNextRevocation() != nil &&
- l.ShortChanID() != hop.Source &&
- l.isReestablished()
-}
-
-// isReestablished returns true if the link has successfully completed the
-// channel reestablishment dance.
-func (l *channelLink) isReestablished() bool {
- return atomic.LoadInt32(&l.reestablished) == 1
-}
-
-// markReestablished signals that the remote peer has successfully exchanged
-// channel reestablish messages and that the channel is ready to process
-// subsequent messages.
-func (l *channelLink) markReestablished() {
- atomic.StoreInt32(&l.reestablished, 1)
-}
-
-// sampleNetworkFee samples the current fee rate on the network to get into the
-// chain in a timely manner. The returned value is expressed in fee-per-kw, as
-// this is the native rate used when computing the fee for commitment
-// transactions, and the second-level HTLC transactions.
-func (l *channelLink) sampleNetworkFee() (chainfee.SatPerKWeight, er.R) {
- // We'll first query for the sat/kw recommended to be confirmed within 3
- // blocks.
- feePerKw, err := l.cfg.FeeEstimator.EstimateFeePerKW(3)
- if err != nil {
- return 0, err
- }
-
- log.Debugf("sampled fee rate for 3 block conf: %v sat/kw",
- int64(feePerKw))
-
- return feePerKw, nil
-}
-
-// shouldAdjustCommitFee returns true if we should update our commitment fee to
-// match that of the network fee. We'll only update our commitment fee if the
-// network fee is +/- 10% to our network fee.
-func shouldAdjustCommitFee(netFee, chanFee chainfee.SatPerKWeight) bool {
- switch {
- // If the network fee is greater than the commitment fee, then we'll
- // switch to it if it's at least 10% greater than the commit fee.
- case netFee > chanFee && netFee >= (chanFee+(chanFee*10)/100):
- return true
-
- // If the network fee is less than our commitment fee, then we'll
- // switch to it if it's at least 10% less than the commitment fee.
- case netFee < chanFee && netFee <= (chanFee-(chanFee*10)/100):
- return true
-
- // Otherwise, we won't modify our fee.
- default:
- return false
- }
-}
-
-// createFailureWithUpdate retrieves this link's last channel update message and
-// passes it into the callback. It expects a fully populated failure message.
-func (l *channelLink) createFailureWithUpdate(
- cb func(update *lnwire.ChannelUpdate) lnwire.FailureMessage) lnwire.FailureMessage {
-
- update, err := l.cfg.FetchLastChannelUpdate(l.ShortChanID())
- if err != nil {
- return &lnwire.FailTemporaryNodeFailure{}
- }
-
- return cb(update)
-}
-
-// syncChanState attempts to synchronize channel states with the remote party.
-// This method is to be called upon reconnection after the initial funding
-// flow. We'll compare out commitment chains with the remote party, and re-send
-// either a danging commit signature, a revocation, or both.
-func (l *channelLink) syncChanStates() er.R {
- log.Info("attempting to re-resynchronize")
-
- // First, we'll generate our ChanSync message to send to the other
- // side. Based on this message, the remote party will decide if they
- // need to retransmit any data or not.
- chanState := l.channel.State()
- localChanSyncMsg, err := chanState.ChanSyncMsg()
- if err != nil {
- return er.Errorf("unable to generate chan sync message for "+
- "ChannelPoint(%v)", l.channel.ChannelPoint())
- }
-
- if err := l.cfg.Peer.SendMessage(true, localChanSyncMsg); err != nil {
- return er.Errorf("unable to send chan sync message for "+
- "ChannelPoint(%v): %v", l.channel.ChannelPoint(), err)
- }
-
- var msgsToReSend []lnwire.Message
-
- // Next, we'll wait indefinitely to receive the ChanSync message. The
- // first message sent MUST be the ChanSync message.
- select {
- case msg := <-l.upstream:
- remoteChanSyncMsg, ok := msg.(*lnwire.ChannelReestablish)
- if !ok {
- return er.Errorf("first message sent to sync "+
- "should be ChannelReestablish, instead "+
- "received: %T", msg)
- }
-
- // If the remote party indicates that they think we haven't
- // done any state updates yet, then we'll retransmit the
- // funding locked message first. We do this, as at this point
- // we can't be sure if they've really received the
- // FundingLocked message.
- if remoteChanSyncMsg.NextLocalCommitHeight == 1 &&
- localChanSyncMsg.NextLocalCommitHeight == 1 &&
- !l.channel.IsPending() {
-
- log.Infof("resending FundingLocked message to peer")
-
- nextRevocation, err := l.channel.NextRevocationKey()
- if err != nil {
- return er.Errorf("unable to create next "+
- "revocation: %v", err)
- }
-
- fundingLockedMsg := lnwire.NewFundingLocked(
- l.ChanID(), nextRevocation,
- )
- err = l.cfg.Peer.SendMessage(false, fundingLockedMsg)
- if err != nil {
- return er.Errorf("unable to re-send "+
- "FundingLocked: %v", err)
- }
- }
-
- // In any case, we'll then process their ChanSync message.
- log.Info("received re-establishment message from remote side")
-
- var (
- openedCircuits []CircuitKey
- closedCircuits []CircuitKey
- )
-
- // We've just received a ChanSync message from the remote
- // party, so we'll process the message in order to determine
- // if we need to re-transmit any messages to the remote party.
- msgsToReSend, openedCircuits, closedCircuits, err =
- l.channel.ProcessChanSyncMsg(remoteChanSyncMsg)
- if err != nil {
- return err
- }
-
- // Repopulate any identifiers for circuits that may have been
- // opened or unclosed. This may happen if we needed to
- // retransmit a commitment signature message.
- l.openedCircuits = openedCircuits
- l.closedCircuits = closedCircuits
-
- // Ensure that all packets have been have been removed from the
- // link's mailbox.
- if err := l.ackDownStreamPackets(); err != nil {
- return err
- }
-
- if len(msgsToReSend) > 0 {
- log.Infof("sending %v updates to synchronize the "+
- "state", len(msgsToReSend))
- }
-
- // If we have any messages to retransmit, we'll do so
- // immediately so we return to a synchronized state as soon as
- // possible.
- for _, msg := range msgsToReSend {
- l.cfg.Peer.SendMessage(false, msg)
- }
-
- case <-l.quit:
- return ErrLinkShuttingDown.Default()
- }
-
- return nil
-}
-
-// resolveFwdPkgs loads any forwarding packages for this link from disk, and
-// reprocesses them in order. The primary goal is to make sure that any HTLCs
-// we previously received are reinstated in memory, and forwarded to the switch
-// if necessary. After a restart, this will also delete any previously
-// completed packages.
-func (l *channelLink) resolveFwdPkgs() er.R {
- fwdPkgs, err := l.channel.LoadFwdPkgs()
- if err != nil {
- return err
- }
-
- log.Debugf("loaded %d fwd pks", len(fwdPkgs))
-
- for _, fwdPkg := range fwdPkgs {
- if err := l.resolveFwdPkg(fwdPkg); err != nil {
- return err
- }
- }
-
- // If any of our reprocessing steps require an update to the commitment
- // txn, we initiate a state transition to capture all relevant changes.
- if l.channel.PendingLocalUpdateCount() > 0 {
- return l.updateCommitTx()
- }
-
- return nil
-}
-
-// resolveFwdPkg interprets the FwdState of the provided package, either
-// reprocesses any outstanding htlcs in the package, or performs garbage
-// collection on the package.
-func (l *channelLink) resolveFwdPkg(fwdPkg *channeldb.FwdPkg) er.R {
- // Remove any completed packages to clear up space.
- if fwdPkg.State == channeldb.FwdStateCompleted {
- log.Debugf("removing completed fwd pkg for height=%d",
- fwdPkg.Height)
-
- err := l.channel.RemoveFwdPkgs(fwdPkg.Height)
- if err != nil {
- log.Errorf("unable to remove fwd pkg for height=%d: "+
- "%v", fwdPkg.Height, err)
- return err
- }
- }
-
- // Otherwise this is either a new package or one has gone through
- // processing, but contains htlcs that need to be restored in memory.
- // We replay this forwarding package to make sure our local mem state
- // is resurrected, we mimic any original responses back to the remote
- // party, and re-forward the relevant HTLCs to the switch.
-
- // If the package is fully acked but not completed, it must still have
- // settles and fails to propagate.
- if !fwdPkg.SettleFailFilter.IsFull() {
- settleFails, err := lnwallet.PayDescsFromRemoteLogUpdates(
- fwdPkg.Source, fwdPkg.Height, fwdPkg.SettleFails,
- )
- if err != nil {
- log.Errorf("unable to process remote log updates: %v",
- err)
- return err
- }
- l.processRemoteSettleFails(fwdPkg, settleFails)
- }
-
- // Finally, replay *ALL ADDS* in this forwarding package. The
- // downstream logic is able to filter out any duplicates, but we must
- // shove the entire, original set of adds down the pipeline so that the
- // batch of adds presented to the sphinx router does not ever change.
- if !fwdPkg.AckFilter.IsFull() {
- adds, err := lnwallet.PayDescsFromRemoteLogUpdates(
- fwdPkg.Source, fwdPkg.Height, fwdPkg.Adds,
- )
- if err != nil {
- log.Errorf("unable to process remote log updates: %v",
- err)
- return err
- }
- l.processRemoteAdds(fwdPkg, adds)
-
- // If the link failed during processing the adds, we must
- // return to ensure we won't attempted to update the state
- // further.
- if l.failed {
- return er.Errorf("link failed while " +
- "processing remote adds")
- }
- }
-
- return nil
-}
-
-// fwdPkgGarbager periodically reads all forwarding packages from disk and
-// removes those that can be discarded. It is safe to do this entirely in the
-// background, since all state is coordinated on disk. This also ensures the
-// link can continue to process messages and interleave database accesses.
-//
-// NOTE: This MUST be run as a goroutine.
-func (l *channelLink) fwdPkgGarbager() {
- defer l.wg.Done()
-
- l.cfg.FwdPkgGCTicker.Resume()
- defer l.cfg.FwdPkgGCTicker.Stop()
-
- if err := l.loadAndRemove(); err != nil {
- log.Warnf("unable to run initial fwd pkgs gc: %v", err)
- }
-
- for {
- select {
- case <-l.cfg.FwdPkgGCTicker.Ticks():
- if err := l.loadAndRemove(); err != nil {
- log.Warnf("unable to remove fwd pkgs: %v",
- err)
- continue
- }
- case <-l.quit:
- return
- }
- }
-}
-
-// loadAndRemove loads all the channels forwarding packages and determines if
-// they can be removed. It is called once before the FwdPkgGCTicker ticks so that
-// a longer tick interval can be used.
-func (l *channelLink) loadAndRemove() er.R {
- fwdPkgs, err := l.channel.LoadFwdPkgs()
- if err != nil {
- return err
- }
-
- var removeHeights []uint64
- for _, fwdPkg := range fwdPkgs {
- if fwdPkg.State != channeldb.FwdStateCompleted {
- continue
- }
-
- removeHeights = append(removeHeights, fwdPkg.Height)
- }
-
- // If removeHeights is empty, return early so we don't use a db
- // transaction.
- if len(removeHeights) == 0 {
- return nil
- }
-
- return l.channel.RemoveFwdPkgs(removeHeights...)
-}
-
-// htlcManager is the primary goroutine which drives a channel's commitment
-// update state-machine in response to messages received via several channels.
-// This goroutine reads messages from the upstream (remote) peer, and also from
-// downstream channel managed by the channel link. In the event that an htlc
-// needs to be forwarded, then send-only forward handler is used which sends
-// htlc packets to the switch. Additionally, the this goroutine handles acting
-// upon all timeouts for any active HTLCs, manages the channel's revocation
-// window, and also the htlc trickle queue+timer for this active channels.
-//
-// NOTE: This MUST be run as a goroutine.
-func (l *channelLink) htlcManager() {
- defer func() {
- l.cfg.BatchTicker.Stop()
- l.wg.Done()
- log.Infof("exited")
- }()
-
- log.Infof("HTLC manager started, bandwidth=%v", l.Bandwidth())
-
- // Notify any clients that the link is now in the switch via an
- // ActiveLinkEvent.
- l.cfg.NotifyActiveLink(*l.ChannelPoint())
-
- // TODO(roasbeef): need to call wipe chan whenever D/C?
-
- // If this isn't the first time that this channel link has been
- // created, then we'll need to check to see if we need to
- // re-synchronize state with the remote peer. settledHtlcs is a map of
- // HTLC's that we re-settled as part of the channel state sync.
- if l.cfg.SyncStates {
- err := l.syncChanStates()
- if err != nil {
- log.Warnf("error when syncing channel states: %v", err)
-
- errr := er.Wrapped(err)
- errDataLoss, localDataLoss :=
- errr.(*lnwallet.ErrCommitSyncLocalDataLoss)
-
- switch {
- case ErrLinkShuttingDown.Is(err):
- log.Debugf("unable to sync channel states, " +
- "link is shutting down")
- return
-
- // We failed syncing the commit chains, probably
- // because the remote has lost state. We should force
- // close the channel.
- case lnwallet.ErrCommitSyncRemoteDataLoss.Is(err):
- fallthrough
-
- // The remote sent us an invalid last commit secret, we
- // should force close the channel.
- // TODO(halseth): and permanently ban the peer?
- case lnwallet.ErrInvalidLastCommitSecret.Is(err):
- fallthrough
-
- // The remote sent us a commit point different from
- // what they sent us before.
- // TODO(halseth): ban peer?
- case lnwallet.ErrInvalidLocalUnrevokedCommitPoint.Is(err):
- // We'll fail the link and tell the peer to
- // force close the channel. Note that the
- // database state is not updated here, but will
- // be updated when the close transaction is
- // ready to avoid that we go down before
- // storing the transaction in the db.
- l.fail(
- LinkFailureError{
- code: ErrSyncError,
- ForceClose: true,
- },
- "unable to synchronize channel "+
- "states: %v", err,
- )
- return
-
- // We have lost state and cannot safely force close the
- // channel. Fail the channel and wait for the remote to
- // hopefully force close it. The remote has sent us its
- // latest unrevoked commitment point, and we'll store
- // it in the database, such that we can attempt to
- // recover the funds if the remote force closes the
- // channel.
- case localDataLoss:
- err := l.channel.MarkDataLoss(
- errDataLoss.CommitPoint,
- )
- if err != nil {
- log.Errorf("unable to mark channel "+
- "data loss: %v", err)
- }
-
- // We determined the commit chains were not possible to
- // sync. We cautiously fail the channel, but don't
- // force close.
- // TODO(halseth): can we safely force close in any
- // cases where this error is returned?
- case lnwallet.ErrCannotSyncCommitChains.Is(err):
- if err := l.channel.MarkBorked(); err != nil {
- log.Errorf("unable to mark channel "+
- "borked: %v", err)
- }
-
- // Other, unspecified error.
- default:
- }
-
- l.fail(
- LinkFailureError{
- code: ErrRecoveryError,
- ForceClose: false,
- },
- "unable to synchronize channel "+
- "states: %v", err,
- )
- return
- }
- }
-
- // We've successfully reestablished the channel, mark it as such to
- // allow the switch to forward HTLCs in the outbound direction.
- l.markReestablished()
-
- // Now that we've received both funding locked and channel reestablish,
- // we can go ahead and send the active channel notification. We'll also
- // defer the inactive notification for when the link exits to ensure
- // that every active notification is matched by an inactive one.
- l.cfg.NotifyActiveChannel(*l.ChannelPoint())
- defer l.cfg.NotifyInactiveChannel(*l.ChannelPoint())
-
- // With the channel states synced, we now reset the mailbox to ensure
- // we start processing all unacked packets in order. This is done here
- // to ensure that all acknowledgments that occur during channel
- // resynchronization have taken affect, causing us only to pull unacked
- // packets after starting to read from the downstream mailbox.
- l.mailBox.ResetPackets()
-
- // After cleaning up any memory pertaining to incoming packets, we now
- // replay our forwarding packages to handle any htlcs that can be
- // processed locally, or need to be forwarded out to the switch. We will
- // only attempt to resolve packages if our short chan id indicates that
- // the channel is not pending, otherwise we should have no htlcs to
- // reforward.
- if l.ShortChanID() != hop.Source {
- if err := l.resolveFwdPkgs(); err != nil {
- l.fail(LinkFailureError{code: ErrInternalError},
- "unable to resolve fwd pkgs: %v", err)
- return
- }
-
- // With our link's in-memory state fully reconstructed, spawn a
- // goroutine to manage the reclamation of disk space occupied by
- // completed forwarding packages.
- l.wg.Add(1)
- go l.fwdPkgGarbager()
- }
-
- for {
- // We must always check if we failed at some point processing
- // the last update before processing the next.
- if l.failed {
- log.Errorf("link failed, exiting htlcManager")
- return
- }
-
- // If the previous event resulted in a non-empty batch, resume
- // the batch ticker so that it can be cleared. Otherwise pause
- // the ticker to prevent waking up the htlcManager while the
- // batch is empty.
- if l.channel.PendingLocalUpdateCount() > 0 {
- l.cfg.BatchTicker.Resume()
- } else {
- l.cfg.BatchTicker.Pause()
- }
-
- select {
- // Our update fee timer has fired, so we'll check the network
- // fee to see if we should adjust our commitment fee.
- case <-l.updateFeeTimer.C:
- l.updateFeeTimer.Reset(l.randomFeeUpdateTimeout())
-
- // If we're not the initiator of the channel, don't we
- // don't control the fees, so we can ignore this.
- if !l.channel.IsInitiator() {
- continue
- }
-
- // If we are the initiator, then we'll sample the
- // current fee rate to get into the chain within 3
- // blocks.
- netFee, err := l.sampleNetworkFee()
- if err != nil {
- log.Errorf("unable to sample network fee: %v",
- err)
- continue
- }
-
- // We'll check to see if we should update the fee rate
- // based on our current set fee rate. We'll cap the new
- // fee rate to our max fee allocation.
- commitFee := l.channel.CommitFeeRate()
- maxFee := l.channel.MaxFeeRate(l.cfg.MaxFeeAllocation)
- newCommitFee := chainfee.SatPerKWeight(
- math.Min(float64(netFee), float64(maxFee)),
- )
- if !shouldAdjustCommitFee(newCommitFee, commitFee) {
- continue
- }
-
- // If we do, then we'll send a new UpdateFee message to
- // the remote party, to be locked in with a new update.
- if err := l.updateChannelFee(newCommitFee); err != nil {
- log.Errorf("unable to update fee rate: %v",
- err)
- continue
- }
-
- // The underlying channel has notified us of a unilateral close
- // carried out by the remote peer. In the case of such an
- // event, we'll wipe the channel state from the peer, and mark
- // the contract as fully settled. Afterwards we can exit.
- //
- // TODO(roasbeef): add force closure? also breach?
- case <-l.cfg.ChainEvents.RemoteUnilateralClosure:
- log.Warnf("remote peer has closed on-chain")
-
- // TODO(roasbeef): remove all together
- go func() {
- chanPoint := l.channel.ChannelPoint()
- l.cfg.Peer.WipeChannel(chanPoint)
- }()
-
- return
-
- case <-l.cfg.BatchTicker.Ticks():
- // Attempt to extend the remote commitment chain
- // including all the currently pending entries. If the
- // send was unsuccessful, then abandon the update,
- // waiting for the revocation window to open up.
- if !l.updateCommitTxOrFail() {
- return
- }
-
- case <-l.cfg.PendingCommitTicker.Ticks():
- l.fail(LinkFailureError{code: ErrRemoteUnresponsive},
- "unable to complete dance")
- return
-
- // A message from the switch was just received. This indicates
- // that the link is an intermediate hop in a multi-hop HTLC
- // circuit.
- case pkt := <-l.downstream:
- l.handleDownstreamPkt(pkt)
-
- // A message containing a locally initiated add was received.
- case msg := <-l.localUpdateAdd:
- msg.err <- l.handleDownstreamUpdateAdd(msg.pkt)
-
- // A message from the connected peer was just received. This
- // indicates that we have a new incoming HTLC, either directly
- // for us, or part of a multi-hop HTLC circuit.
- case msg := <-l.upstream:
- l.handleUpstreamMsg(msg)
-
- // A htlc resolution is received. This means that we now have a
- // resolution for a previously accepted htlc.
- case hodlItem := <-l.hodlQueue.ChanOut():
- htlcResolution := hodlItem.(invoices.HtlcResolution)
- err := l.processHodlQueue(htlcResolution)
- if err != nil {
- l.fail(LinkFailureError{code: ErrInternalError},
- fmt.Sprintf("process hodl queue: %v",
- err.String()),
- )
- return
- }
-
- case <-l.quit:
- return
- }
- }
-}
-
-// processHodlQueue processes a received htlc resolution and continues reading
-// from the hodl queue until no more resolutions remain. When this function
-// returns without an error, the commit tx should be updated.
-func (l *channelLink) processHodlQueue(
- firstResolution invoices.HtlcResolution) er.R {
-
- // Try to read all waiting resolution messages, so that they can all be
- // processed in a single commitment tx update.
- htlcResolution := firstResolution
-loop:
- for {
- // Lookup all hodl htlcs that can be failed or settled with this event.
- // The hodl htlc must be present in the map.
- circuitKey := htlcResolution.CircuitKey()
- hodlHtlc, ok := l.hodlMap[circuitKey]
- if !ok {
- return er.Errorf("hodl htlc not found: %v", circuitKey)
- }
-
- if err := l.processHtlcResolution(htlcResolution, hodlHtlc); err != nil {
- return err
- }
-
- // Clean up hodl map.
- delete(l.hodlMap, circuitKey)
-
- select {
- case item := <-l.hodlQueue.ChanOut():
- htlcResolution = item.(invoices.HtlcResolution)
- default:
- break loop
- }
- }
-
- // Update the commitment tx.
- if err := l.updateCommitTx(); err != nil {
- return er.Errorf("unable to update commitment: %v", err)
- }
-
- return nil
-}
-
-// processHtlcResolution applies a received htlc resolution to the provided
-// htlc. When this function returns without an error, the commit tx should be
-// updated.
-func (l *channelLink) processHtlcResolution(resolution invoices.HtlcResolution,
- htlc hodlHtlc) er.R {
-
- circuitKey := resolution.CircuitKey()
-
- // Determine required action for the resolution based on the type of
- // resolution we have received.
- switch res := resolution.(type) {
- // Settle htlcs that returned a settle resolution using the preimage
- // in the resolution.
- case *invoices.HtlcSettleResolution:
- log.Debugf("received settle resolution for %v "+
- "with outcome: %v", circuitKey, res.Outcome)
-
- return l.settleHTLC(res.Preimage, htlc.pd)
-
- // For htlc failures, we get the relevant failure message based
- // on the failure resolution and then fail the htlc.
- case *invoices.HtlcFailResolution:
- log.Debugf("received cancel resolution for "+
- "%v with outcome: %v", circuitKey, res.Outcome)
-
- // Get the lnwire failure message based on the resolution
- // result.
- failure := getResolutionFailure(res, htlc.pd.Amount)
-
- l.sendHTLCError(
- htlc.pd, failure, htlc.obfuscator, true,
- )
- return nil
-
- // Fail if we do not get a settle of fail resolution, since we
- // are only expecting to handle settles and fails.
- default:
- return er.Errorf("unknown htlc resolution type: %T",
- resolution)
- }
-}
-
-// getResolutionFailure returns the wire message that a htlc resolution should
-// be failed with.
-func getResolutionFailure(resolution *invoices.HtlcFailResolution,
- amount lnwire.MilliSatoshi) *LinkError {
-
- // If the resolution has been resolved as part of a MPP timeout,
- // we need to fail the htlc with lnwire.FailMppTimeout.
- if resolution.Outcome == invoices.ResultMppTimeout {
- return NewDetailedLinkError(
- &lnwire.FailMPPTimeout{}, resolution.Outcome,
- )
- }
-
- // If the htlc is not a MPP timeout, we fail it with
- // FailIncorrectDetails. This error is sent for invoice payment
- // failures such as underpayment/ expiry too soon and hodl invoices
- // (which return FailIncorrectDetails to avoid leaking information).
- incorrectDetails := lnwire.NewFailIncorrectDetails(
- amount, uint32(resolution.AcceptHeight),
- )
-
- return NewDetailedLinkError(incorrectDetails, resolution.Outcome)
-}
-
-// randomFeeUpdateTimeout returns a random timeout between the bounds defined
-// within the link's configuration that will be used to determine when the link
-// should propose an update to its commitment fee rate.
-func (l *channelLink) randomFeeUpdateTimeout() time.Duration {
- lower := int64(l.cfg.MinFeeUpdateTimeout)
- upper := int64(l.cfg.MaxFeeUpdateTimeout)
- return time.Duration(prand.Int63n(upper-lower) + lower)
-}
-
-// handleDownstreamUpdateAdd processes an UpdateAddHTLC packet sent from the
-// downstream HTLC Switch.
-func (l *channelLink) handleDownstreamUpdateAdd(pkt *htlcPacket) er.R {
- htlc, ok := pkt.htlc.(*lnwire.UpdateAddHTLC)
- if !ok {
- return er.New("not an UpdateAddHTLC packet")
- }
-
- // If hodl.AddOutgoing mode is active, we exit early to simulate
- // arbitrary delays between the switch adding an ADD to the
- // mailbox, and the HTLC being added to the commitment state.
- if l.cfg.HodlMask.Active(hodl.AddOutgoing) {
- log.Warnf(hodl.AddOutgoing.Warning())
- l.mailBox.AckPacket(pkt.inKey())
- return nil
- }
-
- // A new payment has been initiated via the downstream channel,
- // so we add the new HTLC to our local log, then update the
- // commitment chains.
- htlc.ChanID = l.ChanID()
- openCircuitRef := pkt.inKey()
- index, err := l.channel.AddHTLC(htlc, &openCircuitRef)
- if err != nil {
- // The HTLC was unable to be added to the state machine,
- // as a result, we'll signal the switch to cancel the
- // pending payment.
- log.Warnf("Unable to handle downstream add HTLC: %v",
- err)
-
- // Remove this packet from the link's mailbox, this
- // prevents it from being reprocessed if the link
- // restarts and resets it mailbox. If this response
- // doesn't make it back to the originating link, it will
- // be rejected upon attempting to reforward the Add to
- // the switch, since the circuit was never fully opened,
- // and the forwarding package shows it as
- // unacknowledged.
- l.mailBox.FailAdd(pkt)
-
- return er.E(NewDetailedLinkError(
- lnwire.NewTemporaryChannelFailure(nil),
- OutgoingFailureDownstreamHtlcAdd,
- ))
- }
-
- log.Tracef("received downstream htlc: payment_hash=%x, "+
- "local_log_index=%v, pend_updates=%v",
- htlc.PaymentHash[:], index,
- l.channel.PendingLocalUpdateCount())
-
- pkt.outgoingChanID = l.ShortChanID()
- pkt.outgoingHTLCID = index
- htlc.ID = index
-
- log.Debugf("queueing keystone of ADD open circuit: %s->%s",
- pkt.inKey(), pkt.outKey())
-
- l.openedCircuits = append(l.openedCircuits, pkt.inKey())
- l.keystoneBatch = append(l.keystoneBatch, pkt.keystone())
-
- _ = l.cfg.Peer.SendMessage(false, htlc)
-
- // Send a forward event notification to htlcNotifier.
- l.cfg.HtlcNotifier.NotifyForwardingEvent(
- newHtlcKey(pkt),
- HtlcInfo{
- IncomingTimeLock: pkt.incomingTimeout,
- IncomingAmt: pkt.incomingAmount,
- OutgoingTimeLock: htlc.Expiry,
- OutgoingAmt: htlc.Amount,
- },
- getEventType(pkt),
- )
-
- l.tryBatchUpdateCommitTx()
-
- return nil
-}
-
-// handleDownstreamPkt processes an HTLC packet sent from the downstream HTLC
-// Switch. Possible messages sent by the switch include requests to forward new
-// HTLCs, timeout previously cleared HTLCs, and finally to settle currently
-// cleared HTLCs with the upstream peer.
-//
-// TODO(roasbeef): add sync ntfn to ensure switch always has consistent view?
-func (l *channelLink) handleDownstreamPkt(pkt *htlcPacket) {
- switch htlc := pkt.htlc.(type) {
- case *lnwire.UpdateAddHTLC:
- // Handle add message. The returned error can be ignored,
- // because it is also sent through the mailbox.
- _ = l.handleDownstreamUpdateAdd(pkt)
-
- case *lnwire.UpdateFulfillHTLC:
- // If hodl.SettleOutgoing mode is active, we exit early to
- // simulate arbitrary delays between the switch adding the
- // SETTLE to the mailbox, and the HTLC being added to the
- // commitment state.
- if l.cfg.HodlMask.Active(hodl.SettleOutgoing) {
- log.Warnf(hodl.SettleOutgoing.Warning())
- l.mailBox.AckPacket(pkt.inKey())
- return
- }
-
- // An HTLC we forward to the switch has just settled somewhere
- // upstream. Therefore we settle the HTLC within the our local
- // state machine.
- inKey := pkt.inKey()
- err := l.channel.SettleHTLC(
- htlc.PaymentPreimage,
- pkt.incomingHTLCID,
- pkt.sourceRef,
- pkt.destRef,
- &inKey,
- )
- if err != nil {
- log.Errorf("unable to settle incoming HTLC for "+
- "circuit-key=%v: %v", inKey, err)
-
- // If the HTLC index for Settle response was not known
- // to our commitment state, it has already been
- // cleaned up by a prior response. We'll thus try to
- // clean up any lingering state to ensure we don't
- // continue reforwarding.
- errr := er.Wrapped(err)
- if _, ok := errr.(lnwallet.ErrUnknownHtlcIndex); ok {
- l.cleanupSpuriousResponse(pkt)
- }
-
- // Remove the packet from the link's mailbox to ensure
- // it doesn't get replayed after a reconnection.
- l.mailBox.AckPacket(inKey)
-
- return
- }
-
- log.Debugf("queueing removal of SETTLE closed circuit: "+
- "%s->%s", pkt.inKey(), pkt.outKey())
-
- l.closedCircuits = append(l.closedCircuits, pkt.inKey())
-
- // With the HTLC settled, we'll need to populate the wire
- // message to target the specific channel and HTLC to be
- // canceled.
- htlc.ChanID = l.ChanID()
- htlc.ID = pkt.incomingHTLCID
-
- // Then we send the HTLC settle message to the connected peer
- // so we can continue the propagation of the settle message.
- l.cfg.Peer.SendMessage(false, htlc)
-
- // Send a settle event notification to htlcNotifier.
- l.cfg.HtlcNotifier.NotifySettleEvent(
- newHtlcKey(pkt),
- getEventType(pkt),
- )
-
- // Immediately update the commitment tx to minimize latency.
- l.updateCommitTxOrFail()
-
- case *lnwire.UpdateFailHTLC:
- // If hodl.FailOutgoing mode is active, we exit early to
- // simulate arbitrary delays between the switch adding a FAIL to
- // the mailbox, and the HTLC being added to the commitment
- // state.
- if l.cfg.HodlMask.Active(hodl.FailOutgoing) {
- log.Warnf(hodl.FailOutgoing.Warning())
- l.mailBox.AckPacket(pkt.inKey())
- return
- }
-
- // An HTLC cancellation has been triggered somewhere upstream,
- // we'll remove then HTLC from our local state machine.
- inKey := pkt.inKey()
- err := l.channel.FailHTLC(
- pkt.incomingHTLCID,
- htlc.Reason,
- pkt.sourceRef,
- pkt.destRef,
- &inKey,
- )
- if err != nil {
- log.Errorf("unable to cancel incoming HTLC for "+
- "circuit-key=%v: %v", inKey, err)
-
- // If the HTLC index for Fail response was not known to
- // our commitment state, it has already been cleaned up
- // by a prior response. We'll thus try to clean up any
- // lingering state to ensure we don't continue
- // reforwarding.
- errr := er.Wrapped(err)
- if _, ok := errr.(lnwallet.ErrUnknownHtlcIndex); ok {
- l.cleanupSpuriousResponse(pkt)
- }
-
- // Remove the packet from the link's mailbox to ensure
- // it doesn't get replayed after a reconnection.
- l.mailBox.AckPacket(inKey)
-
- return
- }
-
- log.Debugf("queueing removal of FAIL closed circuit: %s->%s",
- pkt.inKey(), pkt.outKey())
-
- l.closedCircuits = append(l.closedCircuits, pkt.inKey())
-
- // With the HTLC removed, we'll need to populate the wire
- // message to target the specific channel and HTLC to be
- // canceled. The "Reason" field will have already been set
- // within the switch.
- htlc.ChanID = l.ChanID()
- htlc.ID = pkt.incomingHTLCID
-
- // We send the HTLC message to the peer which initially created
- // the HTLC.
- l.cfg.Peer.SendMessage(false, htlc)
-
- // If the packet does not have a link failure set, it failed
- // further down the route so we notify a forwarding failure.
- // Otherwise, we notify a link failure because it failed at our
- // node.
- if pkt.linkFailure != nil {
- l.cfg.HtlcNotifier.NotifyLinkFailEvent(
- newHtlcKey(pkt),
- newHtlcInfo(pkt),
- getEventType(pkt),
- pkt.linkFailure,
- false,
- )
- } else {
- l.cfg.HtlcNotifier.NotifyForwardingFailEvent(
- newHtlcKey(pkt), getEventType(pkt),
- )
- }
-
- // Immediately update the commitment tx to minimize latency.
- l.updateCommitTxOrFail()
- }
-}
-
-// tryBatchUpdateCommitTx updates the commitment transaction if the batch is
-// full.
-func (l *channelLink) tryBatchUpdateCommitTx() {
- if l.channel.PendingLocalUpdateCount() < uint64(l.cfg.BatchSize) {
- return
- }
-
- l.updateCommitTxOrFail()
-}
-
-// cleanupSpuriousResponse attempts to ack any AddRef or SettleFailRef
-// associated with this packet. If successful in doing so, it will also purge
-// the open circuit from the circuit map and remove the packet from the link's
-// mailbox.
-func (l *channelLink) cleanupSpuriousResponse(pkt *htlcPacket) {
- inKey := pkt.inKey()
-
- log.Debugf("cleaning up spurious response for incoming "+
- "circuit-key=%v", inKey)
-
- // If the htlc packet doesn't have a source reference, it is unsafe to
- // proceed, as skipping this ack may cause the htlc to be reforwarded.
- if pkt.sourceRef == nil {
- log.Errorf("uanble to cleanup response for incoming "+
- "circuit-key=%v, does not contain source reference",
- inKey)
- return
- }
-
- // If the source reference is present, we will try to prevent this link
- // from resending the packet to the switch. To do so, we ack the AddRef
- // of the incoming HTLC belonging to this link.
- err := l.channel.AckAddHtlcs(*pkt.sourceRef)
- if err != nil {
- log.Errorf("unable to ack AddRef for incoming "+
- "circuit-key=%v: %v", inKey, err)
-
- // If this operation failed, it is unsafe to attempt removal of
- // the destination reference or circuit, so we exit early. The
- // cleanup may proceed with a different packet in the future
- // that succeeds on this step.
- return
- }
-
- // Now that we know this link will stop retransmitting Adds to the
- // switch, we can begin to teardown the response reference and circuit
- // map.
- //
- // If the packet includes a destination reference, then a response for
- // this HTLC was locked into the outgoing channel. Attempt to remove
- // this reference, so we stop retransmitting the response internally.
- // Even if this fails, we will proceed in trying to delete the circuit.
- // When retransmitting responses, the destination references will be
- // cleaned up if an open circuit is not found in the circuit map.
- if pkt.destRef != nil {
- err := l.channel.AckSettleFails(*pkt.destRef)
- if err != nil {
- log.Errorf("unable to ack SettleFailRef "+
- "for incoming circuit-key=%v: %v",
- inKey, err)
- }
- }
-
- log.Debugf("deleting circuit for incoming circuit-key=%x", inKey)
-
- // With all known references acked, we can now safely delete the circuit
- // from the switch's circuit map, as the state is no longer needed.
- err = l.cfg.Circuits.DeleteCircuits(inKey)
- if err != nil {
- log.Errorf("unable to delete circuit for "+
- "circuit-key=%v: %v", inKey, err)
- }
-}
-
-// handleUpstreamMsg processes wire messages related to commitment state
-// updates from the upstream peer. The upstream peer is the peer whom we have a
-// direct channel with, updating our respective commitment chains.
-func (l *channelLink) handleUpstreamMsg(msg lnwire.Message) {
- switch msg := msg.(type) {
-
- case *lnwire.UpdateAddHTLC:
- // We just received an add request from an upstream peer, so we
- // add it to our state machine, then add the HTLC to our
- // "settle" list in the event that we know the preimage.
- index, err := l.channel.ReceiveHTLC(msg)
- if err != nil {
- l.fail(LinkFailureError{code: ErrInvalidUpdate},
- "unable to handle upstream add HTLC: %v", err)
- return
- }
-
- log.Tracef("receive upstream htlc with payment hash(%x), "+
- "assigning index: %v", msg.PaymentHash[:], index)
-
- case *lnwire.UpdateFulfillHTLC:
- pre := msg.PaymentPreimage
- idx := msg.ID
- if err := l.channel.ReceiveHTLCSettle(pre, idx); err != nil {
- l.fail(
- LinkFailureError{
- code: ErrInvalidUpdate,
- ForceClose: true,
- },
- "unable to handle upstream settle HTLC: %v", err,
- )
- return
- }
-
- settlePacket := &htlcPacket{
- outgoingChanID: l.ShortChanID(),
- outgoingHTLCID: idx,
- htlc: &lnwire.UpdateFulfillHTLC{
- PaymentPreimage: pre,
- },
- }
-
- // Add the newly discovered preimage to our growing list of
- // uncommitted preimage. These will be written to the witness
- // cache just before accepting the next commitment signature
- // from the remote peer.
- l.uncommittedPreimages = append(l.uncommittedPreimages, pre)
-
- // Pipeline this settle, send it to the switch.
- go l.forwardBatch(settlePacket)
-
- case *lnwire.UpdateFailMalformedHTLC:
- // Convert the failure type encoded within the HTLC fail
- // message to the proper generic lnwire error code.
- var failure lnwire.FailureMessage
- switch msg.FailureCode {
- case lnwire.CodeInvalidOnionVersion:
- failure = &lnwire.FailInvalidOnionVersion{
- OnionSHA256: msg.ShaOnionBlob,
- }
- case lnwire.CodeInvalidOnionHmac:
- failure = &lnwire.FailInvalidOnionHmac{
- OnionSHA256: msg.ShaOnionBlob,
- }
-
- case lnwire.CodeInvalidOnionKey:
- failure = &lnwire.FailInvalidOnionKey{
- OnionSHA256: msg.ShaOnionBlob,
- }
- default:
- log.Warnf("unexpected failure code received in "+
- "UpdateFailMailformedHTLC: %v", msg.FailureCode)
-
- // We don't just pass back the error we received from
- // our successor. Otherwise we might report a failure
- // that penalizes us more than needed. If the onion that
- // we forwarded was correct, the node should have been
- // able to send back its own failure. The node did not
- // send back its own failure, so we assume there was a
- // problem with the onion and report that back. We reuse
- // the invalid onion key failure because there is no
- // specific error for this case.
- failure = &lnwire.FailInvalidOnionKey{
- OnionSHA256: msg.ShaOnionBlob,
- }
- }
-
- // With the error parsed, we'll convert the into it's opaque
- // form.
- var b bytes.Buffer
- if err := lnwire.EncodeFailure(&b, failure, 0); err != nil {
- log.Errorf("unable to encode malformed error: %v", err)
- return
- }
-
- // If remote side have been unable to parse the onion blob we
- // have sent to it, than we should transform the malformed HTLC
- // message to the usual HTLC fail message.
- err := l.channel.ReceiveFailHTLC(msg.ID, b.Bytes())
- if err != nil {
- l.fail(LinkFailureError{code: ErrInvalidUpdate},
- "unable to handle upstream fail HTLC: %v", err)
- return
- }
-
- case *lnwire.UpdateFailHTLC:
- idx := msg.ID
- err := l.channel.ReceiveFailHTLC(idx, msg.Reason[:])
- if err != nil {
- l.fail(LinkFailureError{code: ErrInvalidUpdate},
- "unable to handle upstream fail HTLC: %v", err)
- return
- }
-
- case *lnwire.CommitSig:
- // Since we may have learned new preimages for the first time,
- // we'll add them to our preimage cache. By doing this, we
- // ensure any contested contracts watched by any on-chain
- // arbitrators can now sweep this HTLC on-chain. We delay
- // committing the preimages until just before accepting the new
- // remote commitment, as afterwards the peer won't resend the
- // Settle messages on the next channel reestablishment. Doing so
- // allows us to more effectively batch this operation, instead
- // of doing a single write per preimage.
- err := l.cfg.PreimageCache.AddPreimages(
- l.uncommittedPreimages...,
- )
- if err != nil {
- l.fail(
- LinkFailureError{code: ErrInternalError},
- "unable to add preimages=%v to cache: %v",
- l.uncommittedPreimages, err,
- )
- return
- }
-
- // Instead of truncating the slice to conserve memory
- // allocations, we simply set the uncommitted preimage slice to
- // nil so that a new one will be initialized if any more
- // witnesses are discovered. We do this maximum size of the
- // slice can occupy 15KB, and want to ensure we release that
- // memory back to the runtime.
- l.uncommittedPreimages = nil
-
- // We just received a new updates to our local commitment
- // chain, validate this new commitment, closing the link if
- // invalid.
- err = l.channel.ReceiveNewCommitment(msg.CommitSig, msg.HtlcSigs)
- if err != nil {
- // If we were unable to reconstruct their proposed
- // commitment, then we'll examine the type of error. If
- // it's an InvalidCommitSigError, then we'll send a
- // direct error.
- var sendData []byte
- errr := er.Wrapped(err)
- switch errr.(type) {
- case *lnwallet.InvalidCommitSigError:
- sendData = []byte(err.String())
- case *lnwallet.InvalidHtlcSigError:
- sendData = []byte(err.String())
- }
- l.fail(
- LinkFailureError{
- code: ErrInvalidCommitment,
- ForceClose: true,
- SendData: sendData,
- },
- "ChannelPoint(%v): unable to accept new "+
- "commitment: %v",
- l.channel.ChannelPoint(), err,
- )
- return
- }
-
- // As we've just accepted a new state, we'll now
- // immediately send the remote peer a revocation for our prior
- // state.
- nextRevocation, currentHtlcs, err := l.channel.RevokeCurrentCommitment()
- if err != nil {
- log.Errorf("unable to revoke commitment: %v", err)
- return
- }
- l.cfg.Peer.SendMessage(false, nextRevocation)
-
- // Since we just revoked our commitment, we may have a new set
- // of HTLC's on our commitment, so we'll send them over our
- // HTLC update channel so any callers can be notified.
- select {
- case l.htlcUpdates <- &contractcourt.ContractUpdate{
- HtlcKey: contractcourt.LocalHtlcSet,
- Htlcs: currentHtlcs,
- }:
- case <-l.quit:
- return
- }
-
- // If both commitment chains are fully synced from our PoV,
- // then we don't need to reply with a signature as both sides
- // already have a commitment with the latest accepted.
- if !l.channel.OweCommitment(true) {
- return
- }
-
- // Otherwise, the remote party initiated the state transition,
- // so we'll reply with a signature to provide them with their
- // version of the latest commitment.
- if !l.updateCommitTxOrFail() {
- return
- }
-
- case *lnwire.RevokeAndAck:
- // We've received a revocation from the remote chain, if valid,
- // this moves the remote chain forward, and expands our
- // revocation window.
- fwdPkg, adds, settleFails, remoteHTLCs, err := l.channel.ReceiveRevocation(
- msg,
- )
- if err != nil {
- // TODO(halseth): force close?
- l.fail(LinkFailureError{code: ErrInvalidRevocation},
- "unable to accept revocation: %v", err)
- return
- }
-
- // The remote party now has a new primary commitment, so we'll
- // update the contract court to be aware of this new set (the
- // prior old remote pending).
- select {
- case l.htlcUpdates <- &contractcourt.ContractUpdate{
- HtlcKey: contractcourt.RemoteHtlcSet,
- Htlcs: remoteHTLCs,
- }:
- case <-l.quit:
- return
- }
-
- // If we have a tower client, we'll proceed in backing up the
- // state that was just revoked.
- // TODO(halseth): support anchor types for watchtower.
- state := l.channel.State()
- if l.cfg.TowerClient != nil && state.ChanType.HasAnchors() {
- log.Warnf("Skipping tower backup for anchor " +
- "channel type")
- } else if l.cfg.TowerClient != nil && !state.ChanType.HasAnchors() {
- breachInfo, err := lnwallet.NewBreachRetribution(
- state, state.RemoteCommitment.CommitHeight-1, 0,
- )
- if err != nil {
- l.fail(LinkFailureError{code: ErrInternalError},
- "failed to load breach info: %v", err)
- return
- }
-
- chanType := l.channel.State().ChanType
- chanID := l.ChanID()
- err = l.cfg.TowerClient.BackupState(
- &chanID, breachInfo, chanType.IsTweakless(),
- )
- if err != nil {
- l.fail(LinkFailureError{code: ErrInternalError},
- "unable to queue breach backup: %v", err)
- return
- }
- }
-
- l.processRemoteSettleFails(fwdPkg, settleFails)
- l.processRemoteAdds(fwdPkg, adds)
-
- // If the link failed during processing the adds, we must
- // return to ensure we won't attempted to update the state
- // further.
- if l.failed {
- return
- }
-
- // The revocation window opened up. If there are pending local
- // updates, try to update the commit tx. Pending updates could
- // already have been present because of a previously failed
- // update to the commit tx or freshly added in by
- // processRemoteAdds. Also in case there are no local updates,
- // but there are still remote updates that are not in the remote
- // commit tx yet, send out an update.
- if l.channel.OweCommitment(true) {
- if !l.updateCommitTxOrFail() {
- return
- }
- }
-
- case *lnwire.UpdateFee:
- // We received fee update from peer. If we are the initiator we
- // will fail the channel, if not we will apply the update.
- fee := chainfee.SatPerKWeight(msg.FeePerKw)
- if err := l.channel.ReceiveUpdateFee(fee); err != nil {
- l.fail(LinkFailureError{code: ErrInvalidUpdate},
- "error receiving fee update: %v", err)
- return
- }
- case *lnwire.Error:
- // Error received from remote, MUST fail channel, but should
- // only print the contents of the error message if all
- // characters are printable ASCII.
- l.fail(
- LinkFailureError{
- code: ErrRemoteError,
-
- // TODO(halseth): we currently don't fail the
- // channel permanently, as there are some sync
- // issues with other implementations that will
- // lead to them sending an error message, but
- // we can recover from on next connection. See
- // https://github.com/ElementsProject/lightning/issues/4212
- PermanentFailure: false,
- },
- "ChannelPoint(%v): received error from peer: %v",
- l.channel.ChannelPoint(), msg.Error(),
- )
- default:
- log.Warnf("received unknown message of type %T", msg)
- }
-
-}
-
-// ackDownStreamPackets is responsible for removing htlcs from a link's mailbox
-// for packets delivered from server, and cleaning up any circuits closed by
-// signing a previous commitment txn. This method ensures that the circuits are
-// removed from the circuit map before removing them from the link's mailbox,
-// otherwise it could be possible for some circuit to be missed if this link
-// flaps.
-func (l *channelLink) ackDownStreamPackets() er.R {
- // First, remove the downstream Add packets that were included in the
- // previous commitment signature. This will prevent the Adds from being
- // replayed if this link disconnects.
- for _, inKey := range l.openedCircuits {
- // In order to test the sphinx replay logic of the remote
- // party, unsafe replay does not acknowledge the packets from
- // the mailbox. We can then force a replay of any Add packets
- // held in memory by disconnecting and reconnecting the link.
- if l.cfg.UnsafeReplay {
- continue
- }
-
- log.Debugf("removing Add packet %s from mailbox", inKey)
- l.mailBox.AckPacket(inKey)
- }
-
- // Now, we will delete all circuits closed by the previous commitment
- // signature, which is the result of downstream Settle/Fail packets. We
- // batch them here to ensure circuits are closed atomically and for
- // performance.
- err := l.cfg.Circuits.DeleteCircuits(l.closedCircuits...)
- switch err {
- case nil:
- // Successful deletion.
-
- default:
- log.Errorf("unable to delete %d circuits: %v",
- len(l.closedCircuits), err)
- return err
- }
-
- // With the circuits removed from memory and disk, we now ack any
- // Settle/Fails in the mailbox to ensure they do not get redelivered
- // after startup. If forgive is enabled and we've reached this point,
- // the circuits must have been removed at some point, so it is now safe
- // to un-queue the corresponding Settle/Fails.
- for _, inKey := range l.closedCircuits {
- log.Debugf("removing Fail/Settle packet %s from mailbox",
- inKey)
- l.mailBox.AckPacket(inKey)
- }
-
- // Lastly, reset our buffers to be empty while keeping any acquired
- // growth in the backing array.
- l.openedCircuits = l.openedCircuits[:0]
- l.closedCircuits = l.closedCircuits[:0]
-
- return nil
-}
-
-// updateCommitTxOrFail updates the commitment tx and if that fails, it fails
-// the link.
-func (l *channelLink) updateCommitTxOrFail() bool {
- if err := l.updateCommitTx(); err != nil {
- l.fail(LinkFailureError{code: ErrInternalError},
- "unable to update commitment: %v", err)
- return false
- }
-
- return true
-}
-
-// updateCommitTx signs, then sends an update to the remote peer adding a new
-// commitment to their commitment chain which includes all the latest updates
-// we've received+processed up to this point.
-func (l *channelLink) updateCommitTx() er.R {
- // Preemptively write all pending keystones to disk, just in case the
- // HTLCs we have in memory are included in the subsequent attempt to
- // sign a commitment state.
- err := l.cfg.Circuits.OpenCircuits(l.keystoneBatch...)
- if err != nil {
- return err
- }
-
- // Reset the batch, but keep the backing buffer to avoid reallocating.
- l.keystoneBatch = l.keystoneBatch[:0]
-
- // If hodl.Commit mode is active, we will refrain from attempting to
- // commit any in-memory modifications to the channel state. Exiting here
- // permits testing of either the switch or link's ability to trim
- // circuits that have been opened, but unsuccessfully committed.
- if l.cfg.HodlMask.Active(hodl.Commit) {
- log.Warnf(hodl.Commit.Warning())
- return nil
- }
-
- theirCommitSig, htlcSigs, pendingHTLCs, err := l.channel.SignNextCommitment()
- if lnwallet.ErrNoWindow.Is(err) {
- l.cfg.PendingCommitTicker.Resume()
-
- log.Tracef("revocation window exhausted, unable to send: "+
- "%v, pend_updates=%v, dangling_closes%v",
- l.channel.PendingLocalUpdateCount(),
- log.C(func() string {
- return spew.Sdump(l.openedCircuits)
- }),
- log.C(func() string {
- return spew.Sdump(l.closedCircuits)
- }),
- )
- return nil
- } else if err != nil {
- return err
- }
-
- if err := l.ackDownStreamPackets(); err != nil {
- return err
- }
-
- l.cfg.PendingCommitTicker.Pause()
-
- // The remote party now has a new pending commitment, so we'll update
- // the contract court to be aware of this new set (the prior old remote
- // pending).
- select {
- case l.htlcUpdates <- &contractcourt.ContractUpdate{
- HtlcKey: contractcourt.RemotePendingHtlcSet,
- Htlcs: pendingHTLCs,
- }:
- case <-l.quit:
- return ErrLinkShuttingDown.Default()
- }
-
- commitSig := &lnwire.CommitSig{
- ChanID: l.ChanID(),
- CommitSig: theirCommitSig,
- HtlcSigs: htlcSigs,
- }
- l.cfg.Peer.SendMessage(false, commitSig)
-
- return nil
-}
-
-// Peer returns the representation of remote peer with which we have the
-// channel link opened.
-//
-// NOTE: Part of the ChannelLink interface.
-func (l *channelLink) Peer() lnpeer.Peer {
- return l.cfg.Peer
-}
-
-// ChannelPoint returns the channel outpoint for the channel link.
-// NOTE: Part of the ChannelLink interface.
-func (l *channelLink) ChannelPoint() *wire.OutPoint {
- return l.channel.ChannelPoint()
-}
-
-// ShortChanID returns the short channel ID for the channel link. The short
-// channel ID encodes the exact location in the main chain that the original
-// funding output can be found.
-//
-// NOTE: Part of the ChannelLink interface.
-func (l *channelLink) ShortChanID() lnwire.ShortChannelID {
- l.RLock()
- defer l.RUnlock()
-
- return l.shortChanID
-}
-
-// UpdateShortChanID updates the short channel ID for a link. This may be
-// required in the event that a link is created before the short chan ID for it
-// is known, or a re-org occurs, and the funding transaction changes location
-// within the chain.
-//
-// NOTE: Part of the ChannelLink interface.
-func (l *channelLink) UpdateShortChanID() (lnwire.ShortChannelID, er.R) {
- chanID := l.ChanID()
-
- // Refresh the channel state's short channel ID by loading it from disk.
- // This ensures that the channel state accurately reflects the updated
- // short channel ID.
- err := l.channel.State().RefreshShortChanID()
- if err != nil {
- log.Errorf("unable to refresh short_chan_id for chan_id=%v: "+
- "%v", chanID, err)
- return hop.Source, err
- }
-
- sid := l.channel.ShortChanID()
-
- log.Infof("updating to short_chan_id=%v for chan_id=%v", sid, chanID)
-
- l.Lock()
- l.shortChanID = sid
- l.Unlock()
-
- go func() {
- err := l.cfg.UpdateContractSignals(&contractcourt.ContractSignals{
- HtlcUpdates: l.htlcUpdates,
- ShortChanID: sid,
- })
- if err != nil {
- log.Errorf("unable to update signals")
- }
- }()
-
- // Now that the short channel ID has been properly updated, we can begin
- // garbage collecting any forwarding packages we create.
- l.wg.Add(1)
- go l.fwdPkgGarbager()
-
- return sid, nil
-}
-
-// ChanID returns the channel ID for the channel link. The channel ID is a more
-// compact representation of a channel's full outpoint.
-//
-// NOTE: Part of the ChannelLink interface.
-func (l *channelLink) ChanID() lnwire.ChannelID {
- return lnwire.NewChanIDFromOutPoint(l.channel.ChannelPoint())
-}
-
-// Bandwidth returns the total amount that can flow through the channel link at
-// this given instance. The value returned is expressed in millisatoshi and can
-// be used by callers when making forwarding decisions to determine if a link
-// can accept an HTLC.
-//
-// NOTE: Part of the ChannelLink interface.
-func (l *channelLink) Bandwidth() lnwire.MilliSatoshi {
- // Get the balance available on the channel for new HTLCs. This takes
- // the channel reserve into account so HTLCs up to this value won't
- // violate it.
- return l.channel.AvailableBalance()
-}
-
-// AttachMailBox updates the current mailbox used by this link, and hooks up
-// the mailbox's message and packet outboxes to the link's upstream and
-// downstream chans, respectively.
-func (l *channelLink) AttachMailBox(mailbox MailBox) {
- l.Lock()
- l.mailBox = mailbox
- l.upstream = mailbox.MessageOutBox()
- l.downstream = mailbox.PacketOutBox()
- l.Unlock()
-}
-
-// UpdateForwardingPolicy updates the forwarding policy for the target
-// ChannelLink. Once updated, the link will use the new forwarding policy to
-// govern if it an incoming HTLC should be forwarded or not. We assume that
-// fields that are zero are intentionally set to zero, so we'll use newPolicy to
-// update all of the link's FwrdingPolicy's values.
-//
-// NOTE: Part of the ChannelLink interface.
-func (l *channelLink) UpdateForwardingPolicy(newPolicy ForwardingPolicy) {
- l.Lock()
- defer l.Unlock()
-
- l.cfg.FwrdingPolicy = newPolicy
-}
-
-// CheckHtlcForward should return a nil error if the passed HTLC details
-// satisfy the current forwarding policy fo the target link. Otherwise,
-// a LinkError with a valid protocol failure message should be returned
-// in order to signal to the source of the HTLC, the policy consistency
-// issue.
-//
-// NOTE: Part of the ChannelLink interface.
-func (l *channelLink) CheckHtlcForward(payHash [32]byte,
- incomingHtlcAmt, amtToForward lnwire.MilliSatoshi,
- incomingTimeout, outgoingTimeout uint32,
- heightNow uint32) *LinkError {
-
- l.RLock()
- policy := l.cfg.FwrdingPolicy
- l.RUnlock()
-
- // First check whether the outgoing htlc satisfies the channel policy.
- err := l.canSendHtlc(
- policy, payHash, amtToForward, outgoingTimeout, heightNow,
- )
- if err != nil {
- return err
- }
-
- // Next, using the amount of the incoming HTLC, we'll calculate the
- // expected fee this incoming HTLC must carry in order to satisfy the
- // constraints of the outgoing link.
- expectedFee := ExpectedFee(policy, amtToForward)
-
- // If the actual fee is less than our expected fee, then we'll reject
- // this HTLC as it didn't provide a sufficient amount of fees, or the
- // values have been tampered with, or the send used incorrect/dated
- // information to construct the forwarding information for this hop. In
- // any case, we'll cancel this HTLC.
- actualFee := incomingHtlcAmt - amtToForward
- if incomingHtlcAmt < amtToForward || actualFee < expectedFee {
- log.Errorf("outgoing htlc(%x) has insufficient fee: "+
- "expected %v, got %v",
- payHash[:], int64(expectedFee), int64(actualFee))
-
- // As part of the returned error, we'll send our latest routing
- // policy so the sending node obtains the most up to date data.
- failure := l.createFailureWithUpdate(
- func(upd *lnwire.ChannelUpdate) lnwire.FailureMessage {
- return lnwire.NewFeeInsufficient(
- amtToForward, *upd,
- )
- },
- )
- return NewLinkError(failure)
- }
-
- // Finally, we'll ensure that the time-lock on the outgoing HTLC meets
- // the following constraint: the incoming time-lock minus our time-lock
- // delta should equal the outgoing time lock. Otherwise, whether the
- // sender messed up, or an intermediate node tampered with the HTLC.
- timeDelta := policy.TimeLockDelta
- if incomingTimeout < outgoingTimeout+timeDelta {
- log.Errorf("incoming htlc(%x) has incorrect time-lock value: "+
- "expected at least %v block delta, got %v block delta",
- payHash[:], timeDelta, incomingTimeout-outgoingTimeout)
-
- // Grab the latest routing policy so the sending node is up to
- // date with our current policy.
- failure := l.createFailureWithUpdate(
- func(upd *lnwire.ChannelUpdate) lnwire.FailureMessage {
- return lnwire.NewIncorrectCltvExpiry(
- incomingTimeout, *upd,
- )
- },
- )
- return NewLinkError(failure)
- }
-
- return nil
-}
-
-// CheckHtlcTransit should return a nil error if the passed HTLC details
-// satisfy the current channel policy. Otherwise, a LinkError with a
-// valid protocol failure message should be returned in order to signal
-// the violation. This call is intended to be used for locally initiated
-// payments for which there is no corresponding incoming htlc.
-func (l *channelLink) CheckHtlcTransit(payHash [32]byte,
- amt lnwire.MilliSatoshi, timeout uint32,
- heightNow uint32) *LinkError {
-
- l.RLock()
- policy := l.cfg.FwrdingPolicy
- l.RUnlock()
-
- return l.canSendHtlc(
- policy, payHash, amt, timeout, heightNow,
- )
-}
-
-// htlcSatifiesPolicyOutgoing checks whether the given htlc parameters satisfy
-// the channel's amount and time lock constraints.
-func (l *channelLink) canSendHtlc(policy ForwardingPolicy,
- payHash [32]byte, amt lnwire.MilliSatoshi, timeout uint32,
- heightNow uint32) *LinkError {
-
- // As our first sanity check, we'll ensure that the passed HTLC isn't
- // too small for the next hop. If so, then we'll cancel the HTLC
- // directly.
- if amt < policy.MinHTLCOut {
- log.Errorf("outgoing htlc(%x) is too small: min_htlc=%v, "+
- "htlc_value=%v", payHash[:], policy.MinHTLCOut,
- amt)
-
- // As part of the returned error, we'll send our latest routing
- // policy so the sending node obtains the most up to date data.
- failure := l.createFailureWithUpdate(
- func(upd *lnwire.ChannelUpdate) lnwire.FailureMessage {
- return lnwire.NewAmountBelowMinimum(
- amt, *upd,
- )
- },
- )
- return NewLinkError(failure)
- }
-
- // Next, ensure that the passed HTLC isn't too large. If so, we'll
- // cancel the HTLC directly.
- if policy.MaxHTLC != 0 && amt > policy.MaxHTLC {
- log.Errorf("outgoing htlc(%x) is too large: max_htlc=%v, "+
- "htlc_value=%v", payHash[:], policy.MaxHTLC, amt)
-
- // As part of the returned error, we'll send our latest routing
- // policy so the sending node obtains the most up-to-date data.
- failure := l.createFailureWithUpdate(
- func(upd *lnwire.ChannelUpdate) lnwire.FailureMessage {
- return lnwire.NewTemporaryChannelFailure(upd)
- },
- )
- return NewDetailedLinkError(failure, OutgoingFailureHTLCExceedsMax)
- }
-
- // We want to avoid offering an HTLC which will expire in the near
- // future, so we'll reject an HTLC if the outgoing expiration time is
- // too close to the current height.
- if timeout <= heightNow+l.cfg.OutgoingCltvRejectDelta {
- log.Errorf("htlc(%x) has an expiry that's too soon: "+
- "outgoing_expiry=%v, best_height=%v", payHash[:],
- timeout, heightNow)
- failure := l.createFailureWithUpdate(
- func(upd *lnwire.ChannelUpdate) lnwire.FailureMessage {
- return lnwire.NewExpiryTooSoon(*upd)
- },
- )
- return NewLinkError(failure)
- }
-
- // Check absolute max delta.
- if timeout > l.cfg.MaxOutgoingCltvExpiry+heightNow {
- log.Errorf("outgoing htlc(%x) has a time lock too far in "+
- "the future: got %v, but maximum is %v", payHash[:],
- timeout-heightNow, l.cfg.MaxOutgoingCltvExpiry)
-
- return NewLinkError(&lnwire.FailExpiryTooFar{})
- }
-
- // Check to see if there is enough balance in this channel.
- if amt > l.Bandwidth() {
- failure := l.createFailureWithUpdate(
- func(upd *lnwire.ChannelUpdate) lnwire.FailureMessage {
- return lnwire.NewTemporaryChannelFailure(upd)
- },
- )
- return NewDetailedLinkError(
- failure, OutgoingFailureInsufficientBalance,
- )
- }
-
- return nil
-}
-
-// Stats returns the statistics of channel link.
-//
-// NOTE: Part of the ChannelLink interface.
-func (l *channelLink) Stats() (uint64, lnwire.MilliSatoshi, lnwire.MilliSatoshi) {
- snapshot := l.channel.StateSnapshot()
-
- return snapshot.ChannelCommitment.CommitHeight,
- snapshot.TotalMSatSent,
- snapshot.TotalMSatReceived
-}
-
-// String returns the string representation of channel link.
-//
-// NOTE: Part of the ChannelLink interface.
-func (l *channelLink) String() string {
- return l.channel.ChannelPoint().String()
-}
-
-// HandleSwitchPacket handles the switch packets. This packets which might be
-// forwarded to us from another channel link in case the htlc update came from
-// another peer or if the update was created by user
-//
-// NOTE: Part of the ChannelLink interface.
-func (l *channelLink) HandleSwitchPacket(pkt *htlcPacket) er.R {
- log.Tracef("received switch packet inkey=%v, outkey=%v",
- pkt.inKey(), pkt.outKey())
-
- return l.mailBox.AddPacket(pkt)
-}
-
-// HandleLocalAddPacket handles a locally-initiated UpdateAddHTLC packet. It
-// will be processed synchronously.
-//
-// NOTE: Part of the ChannelLink interface.
-func (l *channelLink) HandleLocalAddPacket(pkt *htlcPacket) er.R {
- log.Tracef("received switch packet outkey=%v", pkt.outKey())
-
- // Create a buffered result channel to prevent the link from blocking.
- errChan := make(chan er.R, 1)
-
- select {
- case l.localUpdateAdd <- &localUpdateAddMsg{
- pkt: pkt,
- err: errChan,
- }:
- case <-l.quit:
- return ErrLinkShuttingDown.Default()
- }
-
- select {
- case err := <-errChan:
- return err
- case <-l.quit:
- return ErrLinkShuttingDown.Default()
- }
-}
-
-// HandleChannelUpdate handles the htlc requests as settle/add/fail which sent
-// to us from remote peer we have a channel with.
-//
-// NOTE: Part of the ChannelLink interface.
-func (l *channelLink) HandleChannelUpdate(message lnwire.Message) {
- l.mailBox.AddMessage(message)
-}
-
-// updateChannelFee updates the commitment fee-per-kw on this channel by
-// committing to an update_fee message.
-func (l *channelLink) updateChannelFee(feePerKw chainfee.SatPerKWeight) er.R {
-
- log.Infof("updating commit fee to %v sat/kw", feePerKw)
-
- // We skip sending the UpdateFee message if the channel is not
- // currently eligible to forward messages.
- if !l.EligibleToForward() {
- log.Debugf("skipping fee update for inactive channel")
- return nil
- }
-
- // First, we'll update the local fee on our commitment.
- if err := l.channel.UpdateFee(feePerKw); err != nil {
- return err
- }
-
- // We'll then attempt to send a new UpdateFee message, and also lock it
- // in immediately by triggering a commitment update.
- msg := lnwire.NewUpdateFee(l.ChanID(), uint32(feePerKw))
- if err := l.cfg.Peer.SendMessage(false, msg); err != nil {
- return err
- }
- return l.updateCommitTx()
-}
-
-// processRemoteSettleFails accepts a batch of settle/fail payment descriptors
-// after receiving a revocation from the remote party, and reprocesses them in
-// the context of the provided forwarding package. Any settles or fails that
-// have already been acknowledged in the forwarding package will not be sent to
-// the switch.
-func (l *channelLink) processRemoteSettleFails(fwdPkg *channeldb.FwdPkg,
- settleFails []*lnwallet.PaymentDescriptor) {
-
- if len(settleFails) == 0 {
- return
- }
-
- log.Debugf("settle-fail-filter %v", fwdPkg.SettleFailFilter)
-
- var switchPackets []*htlcPacket
- for i, pd := range settleFails {
- // Skip any settles or fails that have already been
- // acknowledged by the incoming link that originated the
- // forwarded Add.
- if fwdPkg.SettleFailFilter.Contains(uint16(i)) {
- continue
- }
-
- // TODO(roasbeef): rework log entries to a shared
- // interface.
-
- switch pd.EntryType {
-
- // A settle for an HTLC we previously forwarded HTLC has been
- // received. So we'll forward the HTLC to the switch which will
- // handle propagating the settle to the prior hop.
- case lnwallet.Settle:
- // If hodl.SettleIncoming is requested, we will not
- // forward the SETTLE to the switch and will not signal
- // a free slot on the commitment transaction.
- if l.cfg.HodlMask.Active(hodl.SettleIncoming) {
- log.Warnf(hodl.SettleIncoming.Warning())
- continue
- }
-
- settlePacket := &htlcPacket{
- outgoingChanID: l.ShortChanID(),
- outgoingHTLCID: pd.ParentIndex,
- destRef: pd.DestRef,
- htlc: &lnwire.UpdateFulfillHTLC{
- PaymentPreimage: pd.RPreimage,
- },
- }
-
- // Add the packet to the batch to be forwarded, and
- // notify the overflow queue that a spare spot has been
- // freed up within the commitment state.
- switchPackets = append(switchPackets, settlePacket)
-
- // A failureCode message for a previously forwarded HTLC has
- // been received. As a result a new slot will be freed up in
- // our commitment state, so we'll forward this to the switch so
- // the backwards undo can continue.
- case lnwallet.Fail:
- // If hodl.SettleIncoming is requested, we will not
- // forward the FAIL to the switch and will not signal a
- // free slot on the commitment transaction.
- if l.cfg.HodlMask.Active(hodl.FailIncoming) {
- log.Warnf(hodl.FailIncoming.Warning())
- continue
- }
-
- // Fetch the reason the HTLC was canceled so we can
- // continue to propagate it. This failure originated
- // from another node, so the linkFailure field is not
- // set on the packet.
- failPacket := &htlcPacket{
- outgoingChanID: l.ShortChanID(),
- outgoingHTLCID: pd.ParentIndex,
- destRef: pd.DestRef,
- htlc: &lnwire.UpdateFailHTLC{
- Reason: lnwire.OpaqueReason(
- pd.FailReason,
- ),
- },
- }
-
- // If the failure message lacks an HMAC (but includes
- // the 4 bytes for encoding the message and padding
- // lengths, then this means that we received it as an
- // UpdateFailMalformedHTLC. As a result, we'll signal
- // that we need to convert this error within the switch
- // to an actual error, by encrypting it as if we were
- // the originating hop.
- convertedErrorSize := lnwire.FailureMessageLength + 4
- if len(pd.FailReason) == convertedErrorSize {
- failPacket.convertedError = true
- }
-
- // Add the packet to the batch to be forwarded, and
- // notify the overflow queue that a spare spot has been
- // freed up within the commitment state.
- switchPackets = append(switchPackets, failPacket)
- }
- }
-
- // Only spawn the task forward packets we have a non-zero number.
- if len(switchPackets) > 0 {
- go l.forwardBatch(switchPackets...)
- }
-}
-
-// processRemoteAdds serially processes each of the Add payment descriptors
-// which have been "locked-in" by receiving a revocation from the remote party.
-// The forwarding package provided instructs how to process this batch,
-// indicating whether this is the first time these Adds are being processed, or
-// whether we are reprocessing as a result of a failure or restart. Adds that
-// have already been acknowledged in the forwarding package will be ignored.
-func (l *channelLink) processRemoteAdds(fwdPkg *channeldb.FwdPkg,
- lockedInHtlcs []*lnwallet.PaymentDescriptor) {
-
- log.Tracef("processing %d remote adds for height %d",
- len(lockedInHtlcs), fwdPkg.Height)
-
- decodeReqs := make(
- []hop.DecodeHopIteratorRequest, 0, len(lockedInHtlcs),
- )
- for _, pd := range lockedInHtlcs {
- switch pd.EntryType {
-
- // TODO(conner): remove type switch?
- case lnwallet.Add:
- // Before adding the new htlc to the state machine,
- // parse the onion object in order to obtain the
- // routing information with DecodeHopIterator function
- // which process the Sphinx packet.
- onionReader := bytes.NewReader(pd.OnionBlob)
-
- req := hop.DecodeHopIteratorRequest{
- OnionReader: onionReader,
- RHash: pd.RHash[:],
- IncomingCltv: pd.Timeout,
- }
-
- decodeReqs = append(decodeReqs, req)
- }
- }
-
- // Atomically decode the incoming htlcs, simultaneously checking for
- // replay attempts. A particular index in the returned, spare list of
- // channel iterators should only be used if the failure code at the
- // same index is lnwire.FailCodeNone.
- decodeResps, sphinxErr := l.cfg.DecodeHopIterators(
- fwdPkg.ID(), decodeReqs,
- )
- if sphinxErr != nil {
- l.fail(LinkFailureError{code: ErrInternalError},
- "unable to decode hop iterators: %v", sphinxErr)
- return
- }
-
- var switchPackets []*htlcPacket
-
- for i, pd := range lockedInHtlcs {
- idx := uint16(i)
-
- if fwdPkg.State == channeldb.FwdStateProcessed &&
- fwdPkg.AckFilter.Contains(idx) {
-
- // If this index is already found in the ack filter,
- // the response to this forwarding decision has already
- // been committed by one of our commitment txns. ADDs
- // in this state are waiting for the rest of the fwding
- // package to get acked before being garbage collected.
- continue
- }
-
- // An incoming HTLC add has been full-locked in. As a result we
- // can now examine the forwarding details of the HTLC, and the
- // HTLC itself to decide if: we should forward it, cancel it,
- // or are able to settle it (and it adheres to our fee related
- // constraints).
-
- // Fetch the onion blob that was included within this processed
- // payment descriptor.
- var onionBlob [lnwire.OnionPacketSize]byte
- copy(onionBlob[:], pd.OnionBlob)
-
- // Before adding the new htlc to the state machine, parse the
- // onion object in order to obtain the routing information with
- // DecodeHopIterator function which process the Sphinx packet.
- chanIterator, failureCode := decodeResps[i].Result()
- if failureCode != lnwire.CodeNone {
- // If we're unable to process the onion blob than we
- // should send the malformed htlc error to payment
- // sender.
- l.sendMalformedHTLCError(pd.HtlcIndex, failureCode,
- onionBlob[:], pd.SourceRef)
-
- log.Errorf("unable to decode onion hop "+
- "iterator: %v", failureCode)
- continue
- }
-
- // Retrieve onion obfuscator from onion blob in order to
- // produce initial obfuscation of the onion failureCode.
- obfuscator, failureCode := chanIterator.ExtractErrorEncrypter(
- l.cfg.ExtractErrorEncrypter,
- )
- if failureCode != lnwire.CodeNone {
- // If we're unable to process the onion blob than we
- // should send the malformed htlc error to payment
- // sender.
- l.sendMalformedHTLCError(
- pd.HtlcIndex, failureCode, onionBlob[:], pd.SourceRef,
- )
-
- log.Errorf("unable to decode onion "+
- "obfuscator: %v", failureCode)
- continue
- }
-
- heightNow := l.cfg.Switch.BestHeight()
-
- pld, err := chanIterator.HopPayload()
- if err != nil {
- // If we're unable to process the onion payload, or we
- // received invalid onion payload failure, then we
- // should send an error back to the caller so the HTLC
- // can be canceled.
- var failedType uint64
- if e, ok := er.Wrapped(err).(hop.ErrInvalidPayload); ok {
- failedType = uint64(e.Type)
- }
-
- // TODO: currently none of the test unit infrastructure
- // is setup to handle TLV payloads, so testing this
- // would require implementing a separate mock iterator
- // for TLV payloads that also supports injecting invalid
- // payloads. Deferring this non-trival effort till a
- // later date
- failure := lnwire.NewInvalidOnionPayload(failedType, 0)
- l.sendHTLCError(
- pd, NewLinkError(failure), obfuscator, false,
- )
-
- log.Errorf("unable to decode forwarding "+
- "instructions: %v", err)
- continue
- }
-
- fwdInfo := pld.ForwardingInfo()
-
- switch fwdInfo.NextHop {
- case hop.Exit:
- err := l.processExitHop(
- pd, obfuscator, fwdInfo, heightNow, pld,
- )
- if err != nil {
- l.fail(LinkFailureError{code: ErrInternalError},
- err.String(),
- )
-
- return
- }
-
- // There are additional channels left within this route. So
- // we'll simply do some forwarding package book-keeping.
- default:
- // If hodl.AddIncoming is requested, we will not
- // validate the forwarded ADD, nor will we send the
- // packet to the htlc switch.
- if l.cfg.HodlMask.Active(hodl.AddIncoming) {
- log.Warnf(hodl.AddIncoming.Warning())
- continue
- }
-
- switch fwdPkg.State {
- case channeldb.FwdStateProcessed:
- // This add was not forwarded on the previous
- // processing phase, run it through our
- // validation pipeline to reproduce an error.
- // This may trigger a different error due to
- // expiring timelocks, but we expect that an
- // error will be reproduced.
- if !fwdPkg.FwdFilter.Contains(idx) {
- break
- }
-
- // Otherwise, it was already processed, we can
- // can collect it and continue.
- addMsg := &lnwire.UpdateAddHTLC{
- Expiry: fwdInfo.OutgoingCTLV,
- Amount: fwdInfo.AmountToForward,
- PaymentHash: pd.RHash,
- }
-
- // Finally, we'll encode the onion packet for
- // the _next_ hop using the hop iterator
- // decoded for the current hop.
- buf := bytes.NewBuffer(addMsg.OnionBlob[0:0])
-
- // We know this cannot fail, as this ADD
- // was marked forwarded in a previous
- // round of processing.
- chanIterator.EncodeNextHop(buf)
-
- updatePacket := &htlcPacket{
- incomingChanID: l.ShortChanID(),
- incomingHTLCID: pd.HtlcIndex,
- outgoingChanID: fwdInfo.NextHop,
- sourceRef: pd.SourceRef,
- incomingAmount: pd.Amount,
- amount: addMsg.Amount,
- htlc: addMsg,
- obfuscator: obfuscator,
- incomingTimeout: pd.Timeout,
- outgoingTimeout: fwdInfo.OutgoingCTLV,
- customRecords: pld.CustomRecords(),
- }
- switchPackets = append(
- switchPackets, updatePacket,
- )
-
- continue
- }
-
- // TODO(roasbeef): ensure don't accept outrageous
- // timeout for htlc
-
- // With all our forwarding constraints met, we'll
- // create the outgoing HTLC using the parameters as
- // specified in the forwarding info.
- addMsg := &lnwire.UpdateAddHTLC{
- Expiry: fwdInfo.OutgoingCTLV,
- Amount: fwdInfo.AmountToForward,
- PaymentHash: pd.RHash,
- }
-
- // Finally, we'll encode the onion packet for the
- // _next_ hop using the hop iterator decoded for the
- // current hop.
- buf := bytes.NewBuffer(addMsg.OnionBlob[0:0])
- err := chanIterator.EncodeNextHop(buf)
- if err != nil {
- log.Errorf("unable to encode the "+
- "remaining route %v", err)
-
- failure := l.createFailureWithUpdate(
- func(upd *lnwire.ChannelUpdate) lnwire.FailureMessage {
- return lnwire.NewTemporaryChannelFailure(
- upd,
- )
- },
- )
-
- l.sendHTLCError(
- pd, NewLinkError(failure), obfuscator, false,
- )
- continue
- }
-
- // Now that this add has been reprocessed, only append
- // it to our list of packets to forward to the switch
- // this is the first time processing the add. If the
- // fwd pkg has already been processed, then we entered
- // the above section to recreate a previous error. If
- // the packet had previously been forwarded, it would
- // have been added to switchPackets at the top of this
- // section.
- if fwdPkg.State == channeldb.FwdStateLockedIn {
- updatePacket := &htlcPacket{
- incomingChanID: l.ShortChanID(),
- incomingHTLCID: pd.HtlcIndex,
- outgoingChanID: fwdInfo.NextHop,
- sourceRef: pd.SourceRef,
- incomingAmount: pd.Amount,
- amount: addMsg.Amount,
- htlc: addMsg,
- obfuscator: obfuscator,
- incomingTimeout: pd.Timeout,
- outgoingTimeout: fwdInfo.OutgoingCTLV,
- customRecords: pld.CustomRecords(),
- }
-
- fwdPkg.FwdFilter.Set(idx)
- switchPackets = append(switchPackets,
- updatePacket)
- }
- }
- }
-
- // Commit the htlcs we are intending to forward if this package has not
- // been fully processed.
- if fwdPkg.State == channeldb.FwdStateLockedIn {
- err := l.channel.SetFwdFilter(fwdPkg.Height, fwdPkg.FwdFilter)
- if err != nil {
- l.fail(LinkFailureError{code: ErrInternalError},
- "unable to set fwd filter: %v", err)
- return
- }
- }
-
- if len(switchPackets) == 0 {
- return
- }
-
- log.Debugf("forwarding %d packets to switch", len(switchPackets))
-
- // NOTE: This call is made synchronous so that we ensure all circuits
- // are committed in the exact order that they are processed in the link.
- // Failing to do this could cause reorderings/gaps in the range of
- // opened circuits, which violates assumptions made by the circuit
- // trimming.
- l.forwardBatch(switchPackets...)
-}
-
-// processExitHop handles an htlc for which this link is the exit hop. It
-// returns a boolean indicating whether the commitment tx needs an update.
-func (l *channelLink) processExitHop(pd *lnwallet.PaymentDescriptor,
- obfuscator hop.ErrorEncrypter, fwdInfo hop.ForwardingInfo,
- heightNow uint32, payload invoices.Payload) er.R {
-
- // If hodl.ExitSettle is requested, we will not validate the final hop's
- // ADD, nor will we settle the corresponding invoice or respond with the
- // preimage.
- if l.cfg.HodlMask.Active(hodl.ExitSettle) {
- log.Warnf(hodl.ExitSettle.Warning())
-
- return nil
- }
-
- // As we're the exit hop, we'll double check the hop-payload included in
- // the HTLC to ensure that it was crafted correctly by the sender and
- // matches the HTLC we were extended.
- if pd.Amount != fwdInfo.AmountToForward {
-
- log.Errorf("onion payload of incoming htlc(%x) has incorrect "+
- "value: expected %v, got %v", pd.RHash,
- pd.Amount, fwdInfo.AmountToForward)
-
- failure := NewLinkError(
- lnwire.NewFinalIncorrectHtlcAmount(pd.Amount),
- )
- l.sendHTLCError(pd, failure, obfuscator, true)
-
- return nil
- }
-
- // We'll also ensure that our time-lock value has been computed
- // correctly.
- if pd.Timeout != fwdInfo.OutgoingCTLV {
- log.Errorf("onion payload of incoming htlc(%x) has incorrect "+
- "time-lock: expected %v, got %v",
- pd.RHash[:], pd.Timeout, fwdInfo.OutgoingCTLV)
-
- failure := NewLinkError(
- lnwire.NewFinalIncorrectCltvExpiry(pd.Timeout),
- )
- l.sendHTLCError(pd, failure, obfuscator, true)
-
- return nil
- }
-
- // Notify the invoiceRegistry of the exit hop htlc. If we crash right
- // after this, this code will be re-executed after restart. We will
- // receive back a resolution event.
- invoiceHash := lntypes.Hash(pd.RHash)
-
- circuitKey := channeldb.CircuitKey{
- ChanID: l.ShortChanID(),
- HtlcID: pd.HtlcIndex,
- }
-
- event, err := l.cfg.Registry.NotifyExitHopHtlc(
- invoiceHash, pd.Amount, pd.Timeout, int32(heightNow),
- circuitKey, l.hodlQueue.ChanIn(), payload,
- )
- if err != nil {
- return err
- }
-
- // Create a hodlHtlc struct and decide either resolved now or later.
- htlc := hodlHtlc{
- pd: pd,
- obfuscator: obfuscator,
- }
-
- // If the event is nil, the invoice is being held, so we save payment
- // descriptor for future reference.
- if event == nil {
- l.hodlMap[circuitKey] = htlc
- return nil
- }
-
- // Process the received resolution.
- return l.processHtlcResolution(event, htlc)
-}
-
-// settleHTLC settles the HTLC on the channel.
-func (l *channelLink) settleHTLC(preimage lntypes.Preimage,
- pd *lnwallet.PaymentDescriptor) er.R {
-
- hash := preimage.Hash()
-
- log.Infof("settling htlc %v as exit hop", hash)
-
- err := l.channel.SettleHTLC(
- preimage, pd.HtlcIndex, pd.SourceRef, nil, nil,
- )
- if err != nil {
- return er.Errorf("unable to settle htlc: %v", err)
- }
-
- // If the link is in hodl.BogusSettle mode, replace the preimage with a
- // fake one before sending it to the peer.
- if l.cfg.HodlMask.Active(hodl.BogusSettle) {
- log.Warnf(hodl.BogusSettle.Warning())
- preimage = [32]byte{}
- copy(preimage[:], bytes.Repeat([]byte{2}, 32))
- }
-
- // HTLC was successfully settled locally send notification about it
- // remote peer.
- l.cfg.Peer.SendMessage(false, &lnwire.UpdateFulfillHTLC{
- ChanID: l.ChanID(),
- ID: pd.HtlcIndex,
- PaymentPreimage: preimage,
- })
-
- // Once we have successfully settled the htlc, notify a settle event.
- l.cfg.HtlcNotifier.NotifySettleEvent(
- HtlcKey{
- IncomingCircuit: channeldb.CircuitKey{
- ChanID: l.ShortChanID(),
- HtlcID: pd.HtlcIndex,
- },
- },
- HtlcEventTypeReceive,
- )
-
- return nil
-}
-
-// forwardBatch forwards the given htlcPackets to the switch, and waits on the
-// err chan for the individual responses. This method is intended to be spawned
-// as a goroutine so the responses can be handled in the background.
-func (l *channelLink) forwardBatch(packets ...*htlcPacket) {
- // Don't forward packets for which we already have a response in our
- // mailbox. This could happen if a packet fails and is buffered in the
- // mailbox, and the incoming link flaps.
- var filteredPkts = make([]*htlcPacket, 0, len(packets))
- for _, pkt := range packets {
- if l.mailBox.HasPacket(pkt.inKey()) {
- continue
- }
-
- filteredPkts = append(filteredPkts, pkt)
- }
-
- if err := l.cfg.ForwardPackets(l.quit, filteredPkts...); err != nil {
- log.Errorf("Unhandled error while reforwarding htlc "+
- "settle/fail over htlcswitch: %v", err)
- }
-}
-
-// sendHTLCError functions cancels HTLC and send cancel message back to the
-// peer from which HTLC was received.
-func (l *channelLink) sendHTLCError(pd *lnwallet.PaymentDescriptor,
- failure *LinkError, e hop.ErrorEncrypter, isReceive bool) {
-
- reason, err := e.EncryptFirstHop(failure.WireMessage())
- if err != nil {
- log.Errorf("unable to obfuscate error: %v", err)
- return
- }
-
- err = l.channel.FailHTLC(pd.HtlcIndex, reason, pd.SourceRef, nil, nil)
- if err != nil {
- log.Errorf("unable cancel htlc: %v", err)
- return
- }
-
- l.cfg.Peer.SendMessage(false, &lnwire.UpdateFailHTLC{
- ChanID: l.ChanID(),
- ID: pd.HtlcIndex,
- Reason: reason,
- })
-
- // Notify a link failure on our incoming link. Outgoing htlc information
- // is not available at this point, because we have not decrypted the
- // onion, so it is excluded.
- var eventType HtlcEventType
- if isReceive {
- eventType = HtlcEventTypeReceive
- } else {
- eventType = HtlcEventTypeForward
- }
-
- l.cfg.HtlcNotifier.NotifyLinkFailEvent(
- HtlcKey{
- IncomingCircuit: channeldb.CircuitKey{
- ChanID: l.ShortChanID(),
- HtlcID: pd.HtlcIndex,
- },
- },
- HtlcInfo{
- IncomingTimeLock: pd.Timeout,
- IncomingAmt: pd.Amount,
- },
- eventType,
- failure,
- true,
- )
-}
-
-// sendMalformedHTLCError helper function which sends the malformed HTLC update
-// to the payment sender.
-func (l *channelLink) sendMalformedHTLCError(htlcIndex uint64,
- code lnwire.FailCode, onionBlob []byte, sourceRef *channeldb.AddRef) {
-
- shaOnionBlob := sha256.Sum256(onionBlob)
- err := l.channel.MalformedFailHTLC(htlcIndex, code, shaOnionBlob, sourceRef)
- if err != nil {
- log.Errorf("unable cancel htlc: %v", err)
- return
- }
-
- l.cfg.Peer.SendMessage(false, &lnwire.UpdateFailMalformedHTLC{
- ChanID: l.ChanID(),
- ID: htlcIndex,
- ShaOnionBlob: shaOnionBlob,
- FailureCode: code,
- })
-}
-
-// fail is a function which is used to encapsulate the action necessary for
-// properly failing the link. It takes a LinkFailureError, which will be passed
-// to the OnChannelFailure closure, in order for it to determine if we should
-// force close the channel, and if we should send an error message to the
-// remote peer.
-func (l *channelLink) fail(linkErr LinkFailureError,
- format string, a ...interface{}) {
- reason := errors.Errorf(format, a...)
-
- // Return if we have already notified about a failure.
- if l.failed {
- log.Warnf("ignoring link failure (%v), as link already "+
- "failed", reason)
- return
- }
-
- log.Errorf("failing link: %s with error: %v", reason, linkErr)
-
- // Set failed, such that we won't process any more updates, and notify
- // the peer about the failure.
- l.failed = true
- l.cfg.OnChannelFailure(l.ChanID(), l.ShortChanID(), linkErr)
-}
diff --git a/lnd/htlcswitch/link_isolated_test.go b/lnd/htlcswitch/link_isolated_test.go
deleted file mode 100644
index 81dd85da..00000000
--- a/lnd/htlcswitch/link_isolated_test.go
+++ /dev/null
@@ -1,268 +0,0 @@
-package htlcswitch
-
-import (
- "crypto/sha256"
- "testing"
- "time"
-
- "github.com/pkt-cash/pktd/lnd/lntypes"
- "github.com/pkt-cash/pktd/lnd/lnwallet"
- "github.com/pkt-cash/pktd/lnd/lnwire"
-)
-
-type linkTestContext struct {
- t *testing.T
-
- aliceLink ChannelLink
- bobChannel *lnwallet.LightningChannel
- aliceMsgs <-chan lnwire.Message
-}
-
-// sendHtlcBobToAlice sends an HTLC from Bob to Alice, that pays to a preimage
-// already in Alice's registry.
-func (l *linkTestContext) sendHtlcBobToAlice(htlc *lnwire.UpdateAddHTLC) {
- l.t.Helper()
-
- _, err := l.bobChannel.AddHTLC(htlc, nil)
- if err != nil {
- l.t.Fatalf("bob failed adding htlc: %v", err)
- }
-
- l.aliceLink.HandleChannelUpdate(htlc)
-}
-
-// sendHtlcAliceToBob sends an HTLC from Alice to Bob, by first committing the
-// HTLC in the circuit map, then delivering the outgoing packet to Alice's link.
-// The HTLC will be sent to Bob via Alice's message stream.
-func (l *linkTestContext) sendHtlcAliceToBob(htlcID int,
- htlc *lnwire.UpdateAddHTLC) {
-
- l.t.Helper()
-
- circuitMap := l.aliceLink.(*channelLink).cfg.Switch.circuits
- fwdActions, err := circuitMap.CommitCircuits(
- &PaymentCircuit{
- Incoming: CircuitKey{
- HtlcID: uint64(htlcID),
- },
- PaymentHash: htlc.PaymentHash,
- },
- )
- if err != nil {
- l.t.Fatalf("unable to commit circuit: %v", err)
- }
-
- if len(fwdActions.Adds) != 1 {
- l.t.Fatalf("expected 1 adds, found %d", len(fwdActions.Adds))
- }
-
- err = l.aliceLink.HandleSwitchPacket(&htlcPacket{
- incomingHTLCID: uint64(htlcID),
- htlc: htlc,
- })
- if err != nil {
- l.t.Fatal(err)
- }
-}
-
-// receiveHtlcAliceToBob pulls the next message from Alice's message stream,
-// asserts that it is an UpdateAddHTLC, then applies it to Bob's state machine.
-func (l *linkTestContext) receiveHtlcAliceToBob() {
- l.t.Helper()
-
- var msg lnwire.Message
- select {
- case msg = <-l.aliceMsgs:
- case <-time.After(15 * time.Second):
- l.t.Fatalf("did not received htlc from alice")
- }
-
- htlcAdd, ok := msg.(*lnwire.UpdateAddHTLC)
- if !ok {
- l.t.Fatalf("expected UpdateAddHTLC, got %T", msg)
- }
-
- _, err := l.bobChannel.ReceiveHTLC(htlcAdd)
- if err != nil {
- l.t.Fatalf("bob failed receiving htlc: %v", err)
- }
-}
-
-// sendCommitSigBobToAlice makes Bob sign a new commitment and send it to
-// Alice, asserting that it signs expHtlcs number of HTLCs.
-func (l *linkTestContext) sendCommitSigBobToAlice(expHtlcs int) {
- l.t.Helper()
-
- sig, htlcSigs, _, err := l.bobChannel.SignNextCommitment()
- if err != nil {
- l.t.Fatalf("error signing commitment: %v", err)
- }
-
- commitSig := &lnwire.CommitSig{
- CommitSig: sig,
- HtlcSigs: htlcSigs,
- }
-
- if len(commitSig.HtlcSigs) != expHtlcs {
- l.t.Fatalf("Expected %d htlc sigs, got %d", expHtlcs,
- len(commitSig.HtlcSigs))
- }
-
- l.aliceLink.HandleChannelUpdate(commitSig)
-}
-
-// receiveRevAndAckAliceToBob waits for Alice to send a RevAndAck to Bob, then
-// hands this to Bob.
-func (l *linkTestContext) receiveRevAndAckAliceToBob() {
- l.t.Helper()
-
- var msg lnwire.Message
- select {
- case msg = <-l.aliceMsgs:
- case <-time.After(15 * time.Second):
- l.t.Fatalf("did not receive message")
- }
-
- rev, ok := msg.(*lnwire.RevokeAndAck)
- if !ok {
- l.t.Fatalf("expected RevokeAndAck, got %T", msg)
- }
-
- _, _, _, _, err := l.bobChannel.ReceiveRevocation(rev)
- if err != nil {
- l.t.Fatalf("bob failed receiving revocation: %v", err)
- }
-}
-
-// receiveCommitSigAliceToBob waits for Alice to send a CommitSig to Bob,
-// signing expHtlcs numbers of HTLCs, then hands this to Bob.
-func (l *linkTestContext) receiveCommitSigAliceToBob(expHtlcs int) {
- l.t.Helper()
-
- comSig := l.receiveCommitSigAlice(expHtlcs)
-
- err := l.bobChannel.ReceiveNewCommitment(
- comSig.CommitSig, comSig.HtlcSigs,
- )
- if err != nil {
- l.t.Fatalf("bob failed receiving commitment: %v", err)
- }
-}
-
-// receiveCommitSigAlice waits for Alice to send a CommitSig, signing expHtlcs
-// numbers of HTLCs.
-func (l *linkTestContext) receiveCommitSigAlice(expHtlcs int) *lnwire.CommitSig {
- l.t.Helper()
-
- var msg lnwire.Message
- select {
- case msg = <-l.aliceMsgs:
- case <-time.After(15 * time.Second):
- l.t.Fatalf("did not receive message")
- }
-
- comSig, ok := msg.(*lnwire.CommitSig)
- if !ok {
- l.t.Fatalf("expected CommitSig, got %T", msg)
- }
-
- if len(comSig.HtlcSigs) != expHtlcs {
- l.t.Fatalf("expected %d htlc sigs, got %d", expHtlcs,
- len(comSig.HtlcSigs))
- }
-
- return comSig
-}
-
-// sendRevAndAckBobToAlice make Bob revoke his current commitment, then hand
-// the RevokeAndAck to Alice.
-func (l *linkTestContext) sendRevAndAckBobToAlice() {
- l.t.Helper()
-
- rev, _, err := l.bobChannel.RevokeCurrentCommitment()
- if err != nil {
- l.t.Fatalf("unable to revoke commitment: %v", err)
- }
-
- l.aliceLink.HandleChannelUpdate(rev)
-}
-
-// receiveSettleAliceToBob waits for Alice to send a HTLC settle message to
-// Bob, then hands this to Bob.
-func (l *linkTestContext) receiveSettleAliceToBob() {
- l.t.Helper()
-
- var msg lnwire.Message
- select {
- case msg = <-l.aliceMsgs:
- case <-time.After(15 * time.Second):
- l.t.Fatalf("did not receive message")
- }
-
- settleMsg, ok := msg.(*lnwire.UpdateFulfillHTLC)
- if !ok {
- l.t.Fatalf("expected UpdateFulfillHTLC, got %T", msg)
- }
-
- err := l.bobChannel.ReceiveHTLCSettle(settleMsg.PaymentPreimage,
- settleMsg.ID)
- if err != nil {
- l.t.Fatalf("failed settling htlc: %v", err)
- }
-}
-
-// sendSettleBobToAlice settles an HTLC on Bob's state machine, then sends an
-// UpdateFulfillHTLC message to Alice's upstream inbox.
-func (l *linkTestContext) sendSettleBobToAlice(htlcID uint64,
- preimage lntypes.Preimage) {
-
- l.t.Helper()
-
- err := l.bobChannel.SettleHTLC(preimage, htlcID, nil, nil, nil)
- if err != nil {
- l.t.Fatalf("alice failed settling htlc id=%d hash=%x",
- htlcID, sha256.Sum256(preimage[:]))
- }
-
- settle := &lnwire.UpdateFulfillHTLC{
- ID: htlcID,
- PaymentPreimage: preimage,
- }
-
- l.aliceLink.HandleChannelUpdate(settle)
-}
-
-// receiveSettleAliceToBob waits for Alice to send a HTLC settle message to
-// Bob, then hands this to Bob.
-func (l *linkTestContext) receiveFailAliceToBob() {
- l.t.Helper()
-
- var msg lnwire.Message
- select {
- case msg = <-l.aliceMsgs:
- case <-time.After(15 * time.Second):
- l.t.Fatalf("did not receive message")
- }
-
- failMsg, ok := msg.(*lnwire.UpdateFailHTLC)
- if !ok {
- l.t.Fatalf("expected UpdateFailHTLC, got %T", msg)
- }
-
- err := l.bobChannel.ReceiveFailHTLC(failMsg.ID, failMsg.Reason)
- if err != nil {
- l.t.Fatalf("unable to apply received fail htlc: %v", err)
- }
-}
-
-// assertNoMsgFromAlice asserts that Alice hasn't sent a message. Before
-// calling, make sure that Alice has had the opportunity to send the message.
-func (l *linkTestContext) assertNoMsgFromAlice(timeout time.Duration) {
- l.t.Helper()
-
- select {
- case msg := <-l.aliceMsgs:
- l.t.Fatalf("unexpected message from Alice: %v", msg)
- case <-time.After(timeout):
- }
-}
diff --git a/lnd/htlcswitch/link_test.go b/lnd/htlcswitch/link_test.go
deleted file mode 100644
index ee13f74e..00000000
--- a/lnd/htlcswitch/link_test.go
+++ /dev/null
@@ -1,6293 +0,0 @@
-package htlcswitch
-
-import (
- "bytes"
- "crypto/rand"
- "crypto/sha256"
- "encoding/binary"
- "fmt"
- "net"
- "os"
- "reflect"
- "runtime"
- "sync"
- "testing"
- "time"
-
- "github.com/davecgh/go-spew/spew"
- "github.com/pkt-cash/pktd/btcec"
- "github.com/pkt-cash/pktd/btcutil"
- "github.com/pkt-cash/pktd/btcutil/er"
- "github.com/pkt-cash/pktd/btcutil/util"
- "github.com/pkt-cash/pktd/chaincfg/chainhash"
- "github.com/pkt-cash/pktd/chaincfg/globalcfg"
- sphinx "github.com/pkt-cash/pktd/lightning-onion"
- "github.com/pkt-cash/pktd/lnd/build"
- "github.com/pkt-cash/pktd/lnd/channeldb"
- "github.com/pkt-cash/pktd/lnd/channeldb/kvdb"
- "github.com/pkt-cash/pktd/lnd/contractcourt"
- "github.com/pkt-cash/pktd/lnd/htlcswitch/hodl"
- "github.com/pkt-cash/pktd/lnd/htlcswitch/hop"
- "github.com/pkt-cash/pktd/lnd/input"
- "github.com/pkt-cash/pktd/lnd/lnpeer"
- "github.com/pkt-cash/pktd/lnd/lntest/wait"
- "github.com/pkt-cash/pktd/lnd/lntypes"
- "github.com/pkt-cash/pktd/lnd/lnwallet"
- "github.com/pkt-cash/pktd/lnd/lnwallet/chainfee"
- "github.com/pkt-cash/pktd/lnd/lnwire"
- "github.com/pkt-cash/pktd/lnd/ticker"
- "github.com/pkt-cash/pktd/wire"
-)
-
-const (
- testStartingHeight = 100
- testDefaultDelta = 6
-)
-
-// concurrentTester is a thread-safe wrapper around the Fatalf method of a
-// *testing.T instance. With this wrapper multiple goroutines can safely
-// attempt to fail a test concurrently.
-type concurrentTester struct {
- mtx sync.Mutex
- *testing.T
-}
-
-func newConcurrentTester(t *testing.T) *concurrentTester {
- return &concurrentTester{
- T: t,
- }
-}
-
-func (c *concurrentTester) Fatalf(format string, args ...interface{}) {
- c.T.Helper()
-
- c.mtx.Lock()
- defer c.mtx.Unlock()
-
- c.T.Fatalf(format, args...)
-}
-
-// messageToString is used to produce less spammy log messages in trace mode by
-// setting the 'Curve" parameter to nil. Doing this avoids printing out each of
-// the field elements in the curve parameters for secp256k1.
-func messageToString(msg lnwire.Message) string {
- switch m := msg.(type) {
- case *lnwire.RevokeAndAck:
- m.NextRevocationKey.Curve = nil
- case *lnwire.AcceptChannel:
- m.FundingKey.Curve = nil
- m.RevocationPoint.Curve = nil
- m.PaymentPoint.Curve = nil
- m.DelayedPaymentPoint.Curve = nil
- m.FirstCommitmentPoint.Curve = nil
- case *lnwire.OpenChannel:
- m.FundingKey.Curve = nil
- m.RevocationPoint.Curve = nil
- m.PaymentPoint.Curve = nil
- m.DelayedPaymentPoint.Curve = nil
- m.FirstCommitmentPoint.Curve = nil
- case *lnwire.FundingLocked:
- m.NextPerCommitmentPoint.Curve = nil
- }
-
- return spew.Sdump(msg)
-}
-
-// expectedMessage struct holds the message which travels from one peer to
-// another, and additional information like, should this message we skipped for
-// handling.
-type expectedMessage struct {
- from string
- to string
- message lnwire.Message
- skip bool
-}
-
-// createLogFunc is a helper function which returns the function which will be
-// used for logging message are received from another peer.
-func createLogFunc(name string, channelID lnwire.ChannelID) messageInterceptor {
- return func(m lnwire.Message) (bool, er.R) {
- chanID, err := getChanID(m)
- if err != nil {
- return false, err
- }
-
- if chanID == channelID {
- fmt.Printf("---------------------- \n %v received: "+
- "%v", name, messageToString(m))
- }
- return false, nil
- }
-}
-
-// createInterceptorFunc creates the function by the given set of messages
-// which, checks the order of the messages and skip the ones which were
-// indicated to be intercepted.
-func createInterceptorFunc(prefix, receiver string, messages []expectedMessage,
- chanID lnwire.ChannelID, debug bool) messageInterceptor {
-
- // Filter message which should be received with given peer name.
- var expectToReceive []expectedMessage
- for _, message := range messages {
- if message.to == receiver {
- expectToReceive = append(expectToReceive, message)
- }
- }
-
- // Return function which checks the message order and skip the
- // messages.
- return func(m lnwire.Message) (bool, er.R) {
- messageChanID, err := getChanID(m)
- if err != nil {
- return false, err
- }
-
- if messageChanID == chanID {
- if len(expectToReceive) == 0 {
- return false, er.Errorf("%v received "+
- "unexpected message out of range: %v",
- receiver, m.MsgType())
- }
-
- expectedMessage := expectToReceive[0]
- expectToReceive = expectToReceive[1:]
-
- if expectedMessage.message.MsgType() != m.MsgType() {
- return false, er.Errorf("%v received wrong message: \n"+
- "real: %v\nexpected: %v", receiver, m.MsgType(),
- expectedMessage.message.MsgType())
- }
-
- if debug {
- var postfix string
- if revocation, ok := m.(*lnwire.RevokeAndAck); ok {
- var zeroHash chainhash.Hash
- if bytes.Equal(zeroHash[:], revocation.Revocation[:]) {
- postfix = "- empty revocation"
- }
- }
-
- if expectedMessage.skip {
- fmt.Printf("skipped: %v: %v %v \n", prefix,
- m.MsgType(), postfix)
- } else {
- fmt.Printf("%v: %v %v \n", prefix, m.MsgType(), postfix)
- }
- }
-
- return expectedMessage.skip, nil
- }
- return false, nil
- }
-}
-
-// TestChannelLinkSingleHopPayment in this test we checks the interaction
-// between Alice and Bob within scope of one channel.
-func TestChannelLinkSingleHopPayment(t *testing.T) {
- t.Parallel()
-
- // Setup a alice-bob network.
- alice, bob, cleanUp, err := createTwoClusterChannels(
- btcutil.UnitsPerCoin()*3,
- btcutil.UnitsPerCoin()*5)
- if err != nil {
- t.Fatalf("unable to create channel: %v", err)
- }
- defer cleanUp()
-
- n := newTwoHopNetwork(
- t, alice.channel, bob.channel, testStartingHeight,
- )
- if err := n.start(); err != nil {
- t.Fatal(err)
- }
- defer n.stop()
-
- aliceBandwidthBefore := n.aliceChannelLink.Bandwidth()
- bobBandwidthBefore := n.bobChannelLink.Bandwidth()
-
- debug := false
- if debug {
- // Log message that alice receives.
- n.aliceServer.intersect(createLogFunc("alice",
- n.aliceChannelLink.ChanID()))
-
- // Log message that bob receives.
- n.bobServer.intersect(createLogFunc("bob",
- n.bobChannelLink.ChanID()))
- }
-
- amount := lnwire.NewMSatFromSatoshis(btcutil.UnitsPerCoin())
- htlcAmt, totalTimelock, hops := generateHops(amount, testStartingHeight,
- n.bobChannelLink)
-
- // Wait for:
- // * HTLC add request to be sent to bob.
- // * alice<->bob commitment state to be updated.
- // * settle request to be sent back from bob to alice.
- // * alice<->bob commitment state to be updated.
- // * user notification to be sent.
- receiver := n.bobServer
- firstHop := n.bobChannelLink.ShortChanID()
- rhash, err := makePayment(
- n.aliceServer, receiver, firstHop, hops, amount, htlcAmt,
- totalTimelock,
- ).Wait(30 * time.Second)
- if err != nil {
- t.Fatalf("unable to make the payment: %v", err)
- }
-
- // Wait for Alice to receive the revocation.
- //
- // TODO(roasbeef); replace with select over returned err chan
- time.Sleep(2 * time.Second)
-
- // Check that alice invoice was settled and bandwidth of HTLC
- // links was changed.
- invoice, err := receiver.registry.LookupInvoice(rhash)
- if err != nil {
- t.Fatalf("unable to get invoice: %v", err)
- }
- if invoice.State != channeldb.ContractSettled {
- t.Fatal("alice invoice wasn't settled")
- }
-
- if aliceBandwidthBefore-amount != n.aliceChannelLink.Bandwidth() {
- t.Fatal("alice bandwidth should have decrease on payment " +
- "amount")
- }
-
- if bobBandwidthBefore+amount != n.bobChannelLink.Bandwidth() {
- t.Fatalf("bob bandwidth isn't match: expected %v, got %v",
- bobBandwidthBefore+amount,
- n.bobChannelLink.Bandwidth())
- }
-}
-
-// TestChannelLinkMultiHopPayment checks the ability to send payment over two
-// hops. In this test we send the payment from Carol to Alice over Bob peer.
-// (Carol -> Bob -> Alice) and checking that HTLC was settled properly and
-// balances were changed in two channels.
-//
-// The test is executed with two different OutgoingCltvRejectDelta values for
-// bob. In addition to a normal positive value, we also test the zero case
-// because this is currently the configured value in lnd
-// (defaultOutgoingCltvRejectDelta).
-func TestChannelLinkMultiHopPayment(t *testing.T) {
- t.Run(
- "bobOutgoingCltvRejectDelta 3",
- func(t *testing.T) {
- testChannelLinkMultiHopPayment(t, 3)
- },
- )
- t.Run(
- "bobOutgoingCltvRejectDelta 0",
- func(t *testing.T) {
- testChannelLinkMultiHopPayment(t, 0)
- },
- )
-}
-
-func testChannelLinkMultiHopPayment(t *testing.T,
- bobOutgoingCltvRejectDelta uint32) {
-
- t.Parallel()
-
- channels, cleanUp, _, err := createClusterChannels(
- btcutil.UnitsPerCoin()*3,
- btcutil.UnitsPerCoin()*5)
- if err != nil {
- t.Fatalf("unable to create channel: %v", err)
- }
- defer cleanUp()
-
- n := newThreeHopNetwork(t, channels.aliceToBob, channels.bobToAlice,
- channels.bobToCarol, channels.carolToBob, testStartingHeight)
-
- n.firstBobChannelLink.cfg.OutgoingCltvRejectDelta =
- bobOutgoingCltvRejectDelta
-
- n.secondBobChannelLink.cfg.OutgoingCltvRejectDelta =
- bobOutgoingCltvRejectDelta
-
- if err := n.start(); err != nil {
- t.Fatal(err)
- }
- defer n.stop()
-
- carolBandwidthBefore := n.carolChannelLink.Bandwidth()
- firstBobBandwidthBefore := n.firstBobChannelLink.Bandwidth()
- secondBobBandwidthBefore := n.secondBobChannelLink.Bandwidth()
- aliceBandwidthBefore := n.aliceChannelLink.Bandwidth()
-
- debug := false
- if debug {
- // Log messages that alice receives from bob.
- n.aliceServer.intersect(createLogFunc("[alice]<-bob<-carol: ",
- n.aliceChannelLink.ChanID()))
-
- // Log messages that bob receives from alice.
- n.bobServer.intersect(createLogFunc("alice->[bob]->carol: ",
- n.firstBobChannelLink.ChanID()))
-
- // Log messages that bob receives from carol.
- n.bobServer.intersect(createLogFunc("alice<-[bob]<-carol: ",
- n.secondBobChannelLink.ChanID()))
-
- // Log messages that carol receives from bob.
- n.carolServer.intersect(createLogFunc("alice->bob->[carol]",
- n.carolChannelLink.ChanID()))
- }
-
- amount := lnwire.NewMSatFromSatoshis(btcutil.UnitsPerCoin())
- htlcAmt, totalTimelock, hops := generateHops(amount,
- testStartingHeight,
- n.firstBobChannelLink, n.carolChannelLink)
-
- // Wait for:
- // * HTLC add request to be sent from Alice to Bob.
- // * Alice<->Bob commitment states to be updated.
- // * HTLC add request to be propagated to Carol.
- // * Bob<->Carol commitment state to be updated.
- // * settle request to be sent back from Carol to Bob.
- // * Alice<->Bob commitment state to be updated.
- // * settle request to be sent back from Bob to Alice.
- // * Alice<->Bob commitment states to be updated.
- // * user notification to be sent.
- receiver := n.carolServer
- firstHop := n.firstBobChannelLink.ShortChanID()
- rhash, err := makePayment(
- n.aliceServer, n.carolServer, firstHop, hops, amount, htlcAmt,
- totalTimelock,
- ).Wait(30 * time.Second)
- if err != nil {
- t.Fatalf("unable to send payment: %v", err)
- }
-
- // Wait for Alice and Bob's second link to receive the revocation.
- time.Sleep(2 * time.Second)
-
- // Check that Carol invoice was settled and bandwidth of HTLC
- // links were changed.
- invoice, err := receiver.registry.LookupInvoice(rhash)
- if err != nil {
- t.Fatalf("unable to get invoice: %v", err)
- }
- if invoice.State != channeldb.ContractSettled {
- t.Fatal("carol invoice haven't been settled")
- }
-
- expectedAliceBandwidth := aliceBandwidthBefore - htlcAmt
- if expectedAliceBandwidth != n.aliceChannelLink.Bandwidth() {
- t.Fatalf("channel bandwidth incorrect: expected %v, got %v",
- expectedAliceBandwidth, n.aliceChannelLink.Bandwidth())
- }
-
- expectedBobBandwidth1 := firstBobBandwidthBefore + htlcAmt
- if expectedBobBandwidth1 != n.firstBobChannelLink.Bandwidth() {
- t.Fatalf("channel bandwidth incorrect: expected %v, got %v",
- expectedBobBandwidth1, n.firstBobChannelLink.Bandwidth())
- }
-
- expectedBobBandwidth2 := secondBobBandwidthBefore - amount
- if expectedBobBandwidth2 != n.secondBobChannelLink.Bandwidth() {
- t.Fatalf("channel bandwidth incorrect: expected %v, got %v",
- expectedBobBandwidth2, n.secondBobChannelLink.Bandwidth())
- }
-
- expectedCarolBandwidth := carolBandwidthBefore + amount
- if expectedCarolBandwidth != n.carolChannelLink.Bandwidth() {
- t.Fatalf("channel bandwidth incorrect: expected %v, got %v",
- expectedCarolBandwidth, n.carolChannelLink.Bandwidth())
- }
-}
-
-// TestChannelLinkCancelFullCommitment tests the ability for links to cancel
-// forwarded HTLCs once all of their commitment slots are full.
-func TestChannelLinkCancelFullCommitment(t *testing.T) {
- t.Parallel()
-
- channels, cleanUp, _, err := createClusterChannels(
- btcutil.UnitsPerCoin()*3,
- btcutil.UnitsPerCoin()*5)
- if err != nil {
- t.Fatalf("unable to create channel: %v", err)
- }
- defer cleanUp()
-
- n := newTwoHopNetwork(
- t, channels.aliceToBob, channels.bobToAlice, testStartingHeight,
- )
- if err := n.start(); err != nil {
- t.Fatal(err)
- }
- defer n.stop()
-
- // Fill up the commitment from Alice's side with 20 sat payments.
- count := (input.MaxHTLCNumber / 2)
- amt := lnwire.NewMSatFromSatoshis(20000)
-
- htlcAmt, totalTimelock, hopsForwards := generateHops(amt,
- testStartingHeight, n.bobChannelLink)
-
- firstHop := n.aliceChannelLink.ShortChanID()
-
- // Create channels to buffer the preimage and error channels used in
- // making the preliminary payments.
- preimages := make([]lntypes.Preimage, count)
- aliceErrChan := make(chan chan er.R, count)
-
- var wg sync.WaitGroup
- for i := 0; i < count; i++ {
- // Deterministically generate preimages. Avoid the all-zeroes
- // preimage because that will be rejected by the database.
- preimages[i] = lntypes.Preimage{byte(i >> 8), byte(i), 1}
-
- wg.Add(1)
- go func(i int) {
- defer wg.Done()
-
- errChan := n.makeHoldPayment(
- n.aliceServer, n.bobServer, firstHop,
- hopsForwards, amt, htlcAmt, totalTimelock,
- preimages[i],
- )
- aliceErrChan <- errChan
- }(i)
- }
-
- // Wait for Alice to finish filling her commitment.
- wg.Wait()
- close(aliceErrChan)
-
- // Now make an additional payment from Alice to Bob, this should be
- // canceled because the commitment in this direction is full.
- err = <-makePayment(
- n.aliceServer, n.bobServer, firstHop, hopsForwards, amt,
- htlcAmt, totalTimelock,
- ).err
- if err == nil {
- t.Fatalf("overflow payment should have failed")
- }
- errr := er.Wrapped(err)
- lerr, ok := errr.(*LinkError)
- if !ok {
- t.Fatalf("expected LinkError, got: %T", err)
- }
-
- msg := lerr.WireMessage()
- if _, ok := msg.(*lnwire.FailTemporaryChannelFailure); !ok {
- t.Fatalf("expected TemporaryChannelFailure, got: %T", msg)
- }
-
- // Now, settle all htlcs held by bob and clear the commitment of htlcs.
- for _, preimage := range preimages {
- preimage := preimage
-
- // It's possible that the HTLCs have not been delivered to the
- // invoice registry at this point, so we poll until we are able
- // to settle.
- err = wait.NoError(func() er.R {
- return n.bobServer.registry.SettleHodlInvoice(preimage)
- }, time.Minute)
- if err != nil {
- t.Fatal(err)
- }
- }
-
- // Ensure that all of the payments sent by alice eventually succeed.
- for errChan := range aliceErrChan {
- err := <-errChan
- if err != nil {
- t.Fatalf("alice payment failed: %v", err)
- }
- }
-}
-
-// TestExitNodeTimelockPayloadMismatch tests that when an exit node receives an
-// incoming HTLC, if the time lock encoded in the payload of the forwarded HTLC
-// doesn't match the expected payment value, then the HTLC will be rejected
-// with the appropriate error.
-func TestExitNodeTimelockPayloadMismatch(t *testing.T) {
- t.Parallel()
-
- channels, cleanUp, _, err := createClusterChannels(
- btcutil.UnitsPerCoin()*5,
- btcutil.UnitsPerCoin()*5)
- if err != nil {
- t.Fatalf("unable to create channel: %v", err)
- }
- defer cleanUp()
-
- n := newThreeHopNetwork(t, channels.aliceToBob, channels.bobToAlice,
- channels.bobToCarol, channels.carolToBob, testStartingHeight)
- if err := n.start(); err != nil {
- t.Fatal(err)
- }
- defer n.stop()
-
- amount := lnwire.NewMSatFromSatoshis(btcutil.UnitsPerCoin())
- htlcAmt, htlcExpiry, hops := generateHops(amount,
- testStartingHeight, n.firstBobChannelLink)
-
- // In order to exercise this case, we'll now _manually_ modify the
- // per-hop payload for outgoing time lock to be the incorrect value.
- // The proper value of the outgoing CLTV should be the policy set by
- // the receiving node, instead we set it to be a random value.
- hops[0].FwdInfo.OutgoingCTLV = 500
- firstHop := n.firstBobChannelLink.ShortChanID()
- _, err = makePayment(
- n.aliceServer, n.bobServer, firstHop, hops, amount, htlcAmt,
- htlcExpiry,
- ).Wait(30 * time.Second)
- if err == nil {
- t.Fatalf("payment should have failed but didn't")
- }
-
- errr := er.Wrapped(err)
- rtErr, ok := errr.(ClearTextError)
- if !ok {
- t.Fatalf("expected a ClearTextError, instead got: %T", err)
- }
-
- switch rtErr.WireMessage().(type) {
- case *lnwire.FailFinalIncorrectCltvExpiry:
- default:
- t.Fatalf("incorrect error, expected incorrect cltv expiry, "+
- "instead have: %v", err)
- }
-}
-
-// TestExitNodeAmountPayloadMismatch tests that when an exit node receives an
-// incoming HTLC, if the amount encoded in the onion payload of the forwarded
-// HTLC doesn't match the expected payment value, then the HTLC will be
-// rejected.
-func TestExitNodeAmountPayloadMismatch(t *testing.T) {
- t.Parallel()
-
- channels, cleanUp, _, err := createClusterChannels(
- btcutil.UnitsPerCoin()*5,
- btcutil.UnitsPerCoin()*5)
- if err != nil {
- t.Fatalf("unable to create channel: %v", err)
- }
- defer cleanUp()
-
- n := newThreeHopNetwork(t, channels.aliceToBob, channels.bobToAlice,
- channels.bobToCarol, channels.carolToBob, testStartingHeight)
- if err := n.start(); err != nil {
- t.Fatal(err)
- }
- defer n.stop()
-
- amount := lnwire.NewMSatFromSatoshis(btcutil.UnitsPerCoin())
- htlcAmt, htlcExpiry, hops := generateHops(amount, testStartingHeight,
- n.firstBobChannelLink)
-
- // In order to exercise this case, we'll now _manually_ modify the
- // per-hop payload for amount to be the incorrect value. The proper
- // value of the amount to forward should be the amount that the
- // receiving node expects to receive.
- hops[0].FwdInfo.AmountToForward = 1
- firstHop := n.firstBobChannelLink.ShortChanID()
- _, err = makePayment(
- n.aliceServer, n.bobServer, firstHop, hops, amount, htlcAmt,
- htlcExpiry,
- ).Wait(30 * time.Second)
- if err == nil {
- t.Fatalf("payment should have failed but didn't")
- }
- assertFailureCode(t, err, lnwire.CodeFinalIncorrectHtlcAmount)
-}
-
-// TestLinkForwardTimelockPolicyMismatch tests that if a node is an
-// intermediate node in a multi-hop payment, and receives an HTLC which
-// violates its specified multi-hop policy, then the HTLC is rejected.
-func TestLinkForwardTimelockPolicyMismatch(t *testing.T) {
- t.Parallel()
-
- channels, cleanUp, _, err := createClusterChannels(
- btcutil.UnitsPerCoin()*5,
- btcutil.UnitsPerCoin()*5)
- if err != nil {
- t.Fatalf("unable to create channel: %v", err)
- }
- defer cleanUp()
-
- n := newThreeHopNetwork(t, channels.aliceToBob, channels.bobToAlice,
- channels.bobToCarol, channels.carolToBob, testStartingHeight)
- if err := n.start(); err != nil {
- t.Fatal(err)
- }
- defer n.stop()
-
- // We'll be sending 1 BTC over a 2-hop (3 vertex) route.
- amount := lnwire.NewMSatFromSatoshis(btcutil.UnitsPerCoin())
-
- // Generate the route over two hops, ignoring the total time lock that
- // we'll need to use for the first HTLC in order to have a sufficient
- // time-lock value to account for the decrements over the entire route.
- htlcAmt, htlcExpiry, hops := generateHops(amount, testStartingHeight,
- n.firstBobChannelLink, n.carolChannelLink)
- htlcExpiry -= 2
-
- // Next, we'll make the payment which'll send an HTLC with our
- // specified parameters to the first hop in the route.
- firstHop := n.firstBobChannelLink.ShortChanID()
- _, err = makePayment(
- n.aliceServer, n.carolServer, firstHop, hops, amount, htlcAmt,
- htlcExpiry,
- ).Wait(30 * time.Second)
-
- // We should get an error, and that error should indicate that the HTLC
- // should be rejected due to a policy violation.
- if err == nil {
- t.Fatalf("payment should have failed but didn't")
- }
- errr := er.Wrapped(err)
- rtErr, ok := errr.(ClearTextError)
- if !ok {
- t.Fatalf("expected a ClearTextError, instead got: %T", err)
- }
-
- switch rtErr.WireMessage().(type) {
- case *lnwire.FailIncorrectCltvExpiry:
- default:
- t.Fatalf("incorrect error, expected incorrect cltv expiry, "+
- "instead have: %v", err)
- }
-}
-
-// TestLinkForwardFeePolicyMismatch tests that if a node is an intermediate
-// node in a multi-hop payment and receives an HTLC that violates its current
-// fee policy, then the HTLC is rejected with the proper error.
-func TestLinkForwardFeePolicyMismatch(t *testing.T) {
- t.Parallel()
-
- channels, cleanUp, _, err := createClusterChannels(
- btcutil.UnitsPerCoin()*3,
- btcutil.UnitsPerCoin()*5)
- if err != nil {
- t.Fatalf("unable to create channel: %v", err)
- }
- defer cleanUp()
-
- n := newThreeHopNetwork(t, channels.aliceToBob, channels.bobToAlice,
- channels.bobToCarol, channels.carolToBob, testStartingHeight)
- if err := n.start(); err != nil {
- t.Fatal(err)
- }
- defer n.stop()
-
- // We'll be sending 1 BTC over a 2-hop (3 vertex) route. Given the
- // current default fee of 1 SAT, if we just send a single BTC over in
- // an HTLC, it should be rejected.
- amountNoFee := lnwire.NewMSatFromSatoshis(btcutil.UnitsPerCoin())
-
- // Generate the route over two hops, ignoring the amount we _should_
- // actually send in order to be able to cover fees.
- _, htlcExpiry, hops := generateHops(amountNoFee, testStartingHeight,
- n.firstBobChannelLink, n.carolChannelLink)
-
- // Next, we'll make the payment which'll send an HTLC with our
- // specified parameters to the first hop in the route.
- firstHop := n.firstBobChannelLink.ShortChanID()
- _, err = makePayment(
- n.aliceServer, n.bobServer, firstHop, hops, amountNoFee,
- amountNoFee, htlcExpiry,
- ).Wait(30 * time.Second)
-
- // We should get an error, and that error should indicate that the HTLC
- // should be rejected due to a policy violation.
- if err == nil {
- t.Fatalf("payment should have failed but didn't")
- }
-
- errr := er.Wrapped(err)
- rtErr, ok := errr.(ClearTextError)
- if !ok {
- t.Fatalf("expected a ClearTextError, instead got: %T", err)
- }
-
- switch rtErr.WireMessage().(type) {
- case *lnwire.FailFeeInsufficient:
- default:
- t.Fatalf("incorrect error, expected fee insufficient, "+
- "instead have: %T", err)
- }
-}
-
-// TestLinkForwardFeePolicyMismatch tests that if a node is an intermediate
-// node and receives an HTLC which is _below_ its min HTLC policy, then the
-// HTLC will be rejected.
-func TestLinkForwardMinHTLCPolicyMismatch(t *testing.T) {
- t.Parallel()
-
- channels, cleanUp, _, err := createClusterChannels(
- btcutil.UnitsPerCoin()*5,
- btcutil.UnitsPerCoin()*5)
- if err != nil {
- t.Fatalf("unable to create channel: %v", err)
- }
- defer cleanUp()
-
- n := newThreeHopNetwork(t, channels.aliceToBob, channels.bobToAlice,
- channels.bobToCarol, channels.carolToBob, testStartingHeight)
- if err := n.start(); err != nil {
- t.Fatal(err)
- }
- defer n.stop()
-
- // The current default global min HTLC policy set in the default config
- // for the three-hop-network is 5 SAT. So in order to trigger this
- // failure mode, we'll create an HTLC with 1 satoshi.
- amountNoFee := lnwire.NewMSatFromSatoshis(1)
-
- // With the amount set, we'll generate a route over 2 hops within the
- // network that attempts to pay out our specified amount.
- htlcAmt, htlcExpiry, hops := generateHops(amountNoFee, testStartingHeight,
- n.firstBobChannelLink, n.carolChannelLink)
-
- // Next, we'll make the payment which'll send an HTLC with our
- // specified parameters to the first hop in the route.
- firstHop := n.firstBobChannelLink.ShortChanID()
- _, err = makePayment(
- n.aliceServer, n.bobServer, firstHop, hops, amountNoFee,
- htlcAmt, htlcExpiry,
- ).Wait(30 * time.Second)
-
- // We should get an error, and that error should indicate that the HTLC
- // should be rejected due to a policy violation (below min HTLC).
- if err == nil {
- t.Fatalf("payment should have failed but didn't")
- }
-
- errr := er.Wrapped(err)
- rtErr, ok := errr.(ClearTextError)
- if !ok {
- t.Fatalf("expected a ClearTextError, instead got: %T", err)
- }
-
- switch rtErr.WireMessage().(type) {
- case *lnwire.FailAmountBelowMinimum:
- default:
- t.Fatalf("incorrect error, expected amount below minimum, "+
- "instead have: %v", err)
- }
-}
-
-// TestLinkForwardMaxHTLCPolicyMismatch tests that if a node is an intermediate
-// node and receives an HTLC which is _above_ its max HTLC policy then the
-// HTLC will be rejected.
-func TestLinkForwardMaxHTLCPolicyMismatch(t *testing.T) {
- t.Parallel()
-
- channels, cleanUp, _, err := createClusterChannels(
- btcutil.UnitsPerCoin()*5, btcutil.UnitsPerCoin()*5,
- )
- if err != nil {
- t.Fatalf("unable to create channel: %v", err)
- }
- defer cleanUp()
-
- n := newThreeHopNetwork(
- t, channels.aliceToBob, channels.bobToAlice, channels.bobToCarol,
- channels.carolToBob, testStartingHeight,
- )
- if err := n.start(); err != nil {
- t.Fatal(err)
- }
- defer n.stop()
-
- // In order to trigger this failure mode, we'll update our policy to have
- // a new max HTLC of 10 satoshis.
- maxHtlc := lnwire.NewMSatFromSatoshis(10)
-
- // First we'll generate a route over 2 hops within the network that
- // attempts to pay out an amount greater than the max HTLC we're about to
- // set.
- amountNoFee := maxHtlc + 1
- htlcAmt, htlcExpiry, hops := generateHops(
- amountNoFee, testStartingHeight, n.firstBobChannelLink,
- n.carolChannelLink,
- )
-
- // We'll now update Bob's policy to set the max HTLC we chose earlier.
- n.secondBobChannelLink.cfg.FwrdingPolicy.MaxHTLC = maxHtlc
-
- // Finally, we'll make the payment which'll send an HTLC with our
- // specified parameters.
- firstHop := n.firstBobChannelLink.ShortChanID()
- _, err = makePayment(
- n.aliceServer, n.carolServer, firstHop, hops, amountNoFee,
- htlcAmt, htlcExpiry,
- ).Wait(30 * time.Second)
-
- // We should get an error indicating a temporary channel failure, The
- // failure is temporary because this payment would be allowed if Bob
- // updated his policy to increase the max HTLC.
- if err == nil {
- t.Fatalf("payment should have failed but didn't")
- }
-
- errr := er.Wrapped(err)
- rtErr, ok := errr.(ClearTextError)
- if !ok {
- t.Fatalf("expected a ClearTextError, instead got: %T", err)
- }
-
- switch rtErr.WireMessage().(type) {
- case *lnwire.FailTemporaryChannelFailure:
- default:
- t.Fatalf("incorrect error, expected temporary channel failure, "+
- "instead have: %v", err)
- }
-}
-
-// TestUpdateForwardingPolicy tests that the forwarding policy for a link is
-// able to be updated properly. We'll first create an HTLC that meets the
-// specified policy, assert that it succeeds, update the policy (to invalidate
-// the prior HTLC), and then ensure that the HTLC is rejected.
-func TestUpdateForwardingPolicy(t *testing.T) {
- t.Parallel()
-
- channels, cleanUp, _, err := createClusterChannels(
- btcutil.UnitsPerCoin()*5,
- btcutil.UnitsPerCoin()*5)
- if err != nil {
- t.Fatalf("unable to create channel: %v", err)
- }
- defer cleanUp()
-
- n := newThreeHopNetwork(t, channels.aliceToBob, channels.bobToAlice,
- channels.bobToCarol, channels.carolToBob, testStartingHeight)
- if err := n.start(); err != nil {
- t.Fatal(err)
- }
- defer n.stop()
-
- carolBandwidthBefore := n.carolChannelLink.Bandwidth()
- firstBobBandwidthBefore := n.firstBobChannelLink.Bandwidth()
- secondBobBandwidthBefore := n.secondBobChannelLink.Bandwidth()
- aliceBandwidthBefore := n.aliceChannelLink.Bandwidth()
-
- amountNoFee := lnwire.NewMSatFromSatoshis(10)
- htlcAmt, htlcExpiry, hops := generateHops(amountNoFee,
- testStartingHeight,
- n.firstBobChannelLink, n.carolChannelLink)
-
- // First, send this 10 mSAT payment over the three hops, the payment
- // should succeed, and all balances should be updated accordingly.
- firstHop := n.firstBobChannelLink.ShortChanID()
- payResp, err := makePayment(
- n.aliceServer, n.carolServer, firstHop, hops, amountNoFee,
- htlcAmt, htlcExpiry,
- ).Wait(30 * time.Second)
- if err != nil {
- t.Fatalf("unable to send payment: %v", err)
- }
-
- // Carol's invoice should now be shown as settled as the payment
- // succeeded.
- invoice, err := n.carolServer.registry.LookupInvoice(payResp)
- if err != nil {
- t.Fatalf("unable to get invoice: %v", err)
- }
- if invoice.State != channeldb.ContractSettled {
- t.Fatal("carol invoice haven't been settled")
- }
-
- expectedAliceBandwidth := aliceBandwidthBefore - htlcAmt
- if expectedAliceBandwidth != n.aliceChannelLink.Bandwidth() {
- t.Fatalf("channel bandwidth incorrect: expected %v, got %v",
- expectedAliceBandwidth, n.aliceChannelLink.Bandwidth())
- }
- expectedBobBandwidth1 := firstBobBandwidthBefore + htlcAmt
- if expectedBobBandwidth1 != n.firstBobChannelLink.Bandwidth() {
- t.Fatalf("channel bandwidth incorrect: expected %v, got %v",
- expectedBobBandwidth1, n.firstBobChannelLink.Bandwidth())
- }
- expectedBobBandwidth2 := secondBobBandwidthBefore - amountNoFee
- if expectedBobBandwidth2 != n.secondBobChannelLink.Bandwidth() {
- t.Fatalf("channel bandwidth incorrect: expected %v, got %v",
- expectedBobBandwidth2, n.secondBobChannelLink.Bandwidth())
- }
- expectedCarolBandwidth := carolBandwidthBefore + amountNoFee
- if expectedCarolBandwidth != n.carolChannelLink.Bandwidth() {
- t.Fatalf("channel bandwidth incorrect: expected %v, got %v",
- expectedCarolBandwidth, n.carolChannelLink.Bandwidth())
- }
-
- // Now we'll update Bob's policy to jack up his free rate to an extent
- // that'll cause him to reject the same HTLC that we just sent.
- //
- // TODO(roasbeef): should implement grace period within link policy
- // update logic
- newPolicy := n.globalPolicy
- newPolicy.BaseFee = lnwire.NewMSatFromSatoshis(1000)
- n.secondBobChannelLink.UpdateForwardingPolicy(newPolicy)
-
- // Next, we'll send the payment again, using the exact same per-hop
- // payload for each node. This payment should fail as it won't factor
- // in Bob's new fee policy.
- _, err = makePayment(
- n.aliceServer, n.carolServer, firstHop, hops, amountNoFee,
- htlcAmt, htlcExpiry,
- ).Wait(30 * time.Second)
- if err == nil {
- t.Fatalf("payment should've been rejected")
- }
-
- errr := er.Wrapped(err)
- rtErr, ok := errr.(ClearTextError)
- if !ok {
- t.Fatalf("expected a ClearTextError, instead got (%T): %v", err, err)
- }
-
- switch rtErr.WireMessage().(type) {
- case *lnwire.FailFeeInsufficient:
- default:
- t.Fatalf("expected FailFeeInsufficient instead got: %v", err)
- }
-
- // Reset the policy so we can then test updating the max HTLC policy.
- n.secondBobChannelLink.UpdateForwardingPolicy(n.globalPolicy)
-
- // As a sanity check, ensure the original payment now succeeds again.
- _, err = makePayment(
- n.aliceServer, n.carolServer, firstHop, hops, amountNoFee,
- htlcAmt, htlcExpiry,
- ).Wait(30 * time.Second)
- if err != nil {
- t.Fatalf("unable to send payment: %v", err)
- }
-
- // Now we'll update Bob's policy to lower his max HTLC to an extent
- // that'll cause him to reject the same HTLC that we just sent.
- newPolicy = n.globalPolicy
- newPolicy.MaxHTLC = amountNoFee - 1
- n.secondBobChannelLink.UpdateForwardingPolicy(newPolicy)
-
- // Next, we'll send the payment again, using the exact same per-hop
- // payload for each node. This payment should fail as it won't factor
- // in Bob's new max HTLC policy.
- _, err = makePayment(
- n.aliceServer, n.carolServer, firstHop, hops, amountNoFee,
- htlcAmt, htlcExpiry,
- ).Wait(30 * time.Second)
- if err == nil {
- t.Fatalf("payment should've been rejected")
- }
-
- errr = er.Wrapped(err)
- rtErr, ok = errr.(ClearTextError)
- if !ok {
- t.Fatalf("expected a ClearTextError, instead got (%T): %v",
- err, err)
- }
-
- switch rtErr.WireMessage().(type) {
- case *lnwire.FailTemporaryChannelFailure:
- default:
- t.Fatalf("expected TemporaryChannelFailure, instead got: %v",
- err)
- }
-}
-
-// TestChannelLinkMultiHopInsufficientPayment checks that we receive error if
-// bob<->alice channel has insufficient BTC capacity/bandwidth. In this test we
-// send the payment from Carol to Alice over Bob peer. (Carol -> Bob -> Alice)
-func TestChannelLinkMultiHopInsufficientPayment(t *testing.T) {
- t.Parallel()
-
- channels, cleanUp, _, err := createClusterChannels(
- btcutil.UnitsPerCoin()*3,
- btcutil.UnitsPerCoin()*5)
- if err != nil {
- t.Fatalf("unable to create channel: %v", err)
- }
- defer cleanUp()
-
- n := newThreeHopNetwork(t, channels.aliceToBob, channels.bobToAlice,
- channels.bobToCarol, channels.carolToBob, testStartingHeight)
- if err := n.start(); err != nil {
- t.Fatalf("unable to start three hop network: %v", err)
- }
- defer n.stop()
-
- carolBandwidthBefore := n.carolChannelLink.Bandwidth()
- firstBobBandwidthBefore := n.firstBobChannelLink.Bandwidth()
- secondBobBandwidthBefore := n.secondBobChannelLink.Bandwidth()
- aliceBandwidthBefore := n.aliceChannelLink.Bandwidth()
-
- // We'll attempt to send 4 BTC although the alice-to-bob channel only
- // has 3 BTC total capacity. As a result, this payment should be
- // rejected.
- amount := lnwire.NewMSatFromSatoshis(4 * btcutil.UnitsPerCoin())
- htlcAmt, totalTimelock, hops := generateHops(amount, testStartingHeight,
- n.firstBobChannelLink, n.carolChannelLink)
-
- // Wait for:
- // * HTLC add request to be sent to from Alice to Bob.
- // * Alice<->Bob commitment states to be updated.
- // * Bob trying to add HTLC add request in Bob<->Carol channel.
- // * Cancel HTLC request to be sent back from Bob to Alice.
- // * user notification to be sent.
-
- receiver := n.carolServer
- firstHop := n.firstBobChannelLink.ShortChanID()
- rhash, err := makePayment(
- n.aliceServer, n.carolServer, firstHop, hops, amount, htlcAmt,
- totalTimelock,
- ).Wait(30 * time.Second)
- if err == nil {
- t.Fatal("error haven't been received")
- }
- assertFailureCode(t, err, lnwire.CodeTemporaryChannelFailure)
-
- // Wait for Alice to receive the revocation.
- //
- // TODO(roasbeef): add in ntfn hook for state transition completion
- time.Sleep(100 * time.Millisecond)
-
- // Check that alice invoice wasn't settled and bandwidth of htlc
- // links hasn't been changed.
- invoice, err := receiver.registry.LookupInvoice(rhash)
- if err != nil {
- t.Fatalf("unable to get invoice: %v", err)
- }
- if invoice.State == channeldb.ContractSettled {
- t.Fatal("carol invoice have been settled")
- }
-
- if n.aliceChannelLink.Bandwidth() != aliceBandwidthBefore {
- t.Fatal("the bandwidth of alice channel link which handles " +
- "alice->bob channel should be the same")
- }
-
- if n.firstBobChannelLink.Bandwidth() != firstBobBandwidthBefore {
- t.Fatal("the bandwidth of bob channel link which handles " +
- "alice->bob channel should be the same")
- }
-
- if n.secondBobChannelLink.Bandwidth() != secondBobBandwidthBefore {
- t.Fatal("the bandwidth of bob channel link which handles " +
- "bob->carol channel should be the same")
- }
-
- if n.carolChannelLink.Bandwidth() != carolBandwidthBefore {
- t.Fatal("the bandwidth of carol channel link which handles " +
- "bob->carol channel should be the same")
- }
-}
-
-// TestChannelLinkMultiHopUnknownPaymentHash checks that we receive remote error
-// from Alice if she received not suitable payment hash for htlc.
-func TestChannelLinkMultiHopUnknownPaymentHash(t *testing.T) {
- t.Parallel()
-
- channels, cleanUp, _, err := createClusterChannels(
- btcutil.UnitsPerCoin()*5,
- btcutil.UnitsPerCoin()*5)
- if err != nil {
- t.Fatalf("unable to create channel: %v", err)
- }
- defer cleanUp()
-
- n := newThreeHopNetwork(t, channels.aliceToBob, channels.bobToAlice,
- channels.bobToCarol, channels.carolToBob, testStartingHeight)
- if err := n.start(); err != nil {
- t.Fatalf("unable to start three hop network: %v", err)
- }
- defer n.stop()
-
- carolBandwidthBefore := n.carolChannelLink.Bandwidth()
- firstBobBandwidthBefore := n.firstBobChannelLink.Bandwidth()
- secondBobBandwidthBefore := n.secondBobChannelLink.Bandwidth()
- aliceBandwidthBefore := n.aliceChannelLink.Bandwidth()
-
- amount := lnwire.NewMSatFromSatoshis(btcutil.UnitsPerCoin())
-
- htlcAmt, totalTimelock, hops := generateHops(amount, testStartingHeight,
- n.firstBobChannelLink, n.carolChannelLink)
- blob, err := generateRoute(hops...)
- if err != nil {
- t.Fatal(err)
- }
-
- // Generate payment invoice and htlc, but don't add this invoice to the
- // receiver registry. This should trigger an unknown payment hash
- // failure.
- _, htlc, pid, err := generatePayment(
- amount, htlcAmt, totalTimelock, blob,
- )
- if err != nil {
- t.Fatal(err)
- }
-
- // Send payment and expose err channel.
- err = n.aliceServer.htlcSwitch.SendHTLC(
- n.firstBobChannelLink.ShortChanID(), pid, htlc,
- )
- if err != nil {
- t.Fatalf("unable to get send payment: %v", err)
- }
-
- resultChan, err := n.aliceServer.htlcSwitch.GetPaymentResult(
- pid, htlc.PaymentHash, newMockDeobfuscator(),
- )
- if err != nil {
- t.Fatalf("unable to get payment result: %v", err)
- }
-
- var result *PaymentResult
- var ok bool
- select {
-
- case result, ok = <-resultChan:
- if !ok {
- t.Fatalf("unexpected shutdown")
- }
- case <-time.After(5 * time.Second):
- t.Fatalf("no result arrive")
- }
-
- assertFailureCode(
- t, result.Error, lnwire.CodeIncorrectOrUnknownPaymentDetails,
- )
-
- // Wait for Alice to receive the revocation.
- time.Sleep(100 * time.Millisecond)
-
- if n.aliceChannelLink.Bandwidth() != aliceBandwidthBefore {
- t.Fatal("the bandwidth of alice channel link which handles " +
- "alice->bob channel should be the same")
- }
-
- if n.firstBobChannelLink.Bandwidth() != firstBobBandwidthBefore {
- t.Fatal("the bandwidth of bob channel link which handles " +
- "alice->bob channel should be the same")
- }
-
- if n.secondBobChannelLink.Bandwidth() != secondBobBandwidthBefore {
- t.Fatal("the bandwidth of bob channel link which handles " +
- "bob->carol channel should be the same")
- }
-
- if n.carolChannelLink.Bandwidth() != carolBandwidthBefore {
- t.Fatal("the bandwidth of carol channel link which handles " +
- "bob->carol channel should be the same")
- }
-}
-
-// TestChannelLinkMultiHopUnknownNextHop construct the chain of hops
-// Carol<->Bob<->Alice and checks that we receive remote error from Bob if he
-// has no idea about next hop (hop might goes down and routing info not updated
-// yet).
-func TestChannelLinkMultiHopUnknownNextHop(t *testing.T) {
- t.Parallel()
-
- channels, cleanUp, _, err := createClusterChannels(
- btcutil.UnitsPerCoin()*5,
- btcutil.UnitsPerCoin()*5)
- if err != nil {
- t.Fatalf("unable to create channel: %v", err)
- }
- defer cleanUp()
-
- n := newThreeHopNetwork(t, channels.aliceToBob, channels.bobToAlice,
- channels.bobToCarol, channels.carolToBob, testStartingHeight)
- if err := n.start(); err != nil {
- t.Fatal(err)
- }
- defer n.stop()
-
- carolBandwidthBefore := n.carolChannelLink.Bandwidth()
- firstBobBandwidthBefore := n.firstBobChannelLink.Bandwidth()
- secondBobBandwidthBefore := n.secondBobChannelLink.Bandwidth()
- aliceBandwidthBefore := n.aliceChannelLink.Bandwidth()
-
- amount := lnwire.NewMSatFromSatoshis(btcutil.UnitsPerCoin())
- htlcAmt, totalTimelock, hops := generateHops(amount, testStartingHeight,
- n.firstBobChannelLink, n.carolChannelLink)
-
- // Remove bob's outgoing link with Carol. This will cause him to fail
- // back the payment to Alice since he is unaware of Carol when the
- // payment comes across.
- bobChanID := lnwire.NewChanIDFromOutPoint(
- &channels.bobToCarol.State().FundingOutpoint,
- )
- n.bobServer.htlcSwitch.RemoveLink(bobChanID)
-
- firstHop := n.firstBobChannelLink.ShortChanID()
- receiver := n.carolServer
- rhash, err := makePayment(
- n.aliceServer, receiver, firstHop, hops, amount, htlcAmt,
- totalTimelock).Wait(30 * time.Second)
- if err == nil {
- t.Fatal("error haven't been received")
- }
- errr := er.Wrapped(err)
- rtErr, ok := errr.(ClearTextError)
- if !ok {
- t.Fatalf("expected ClearTextError")
- }
-
- if _, ok = rtErr.WireMessage().(*lnwire.FailUnknownNextPeer); !ok {
- t.Fatalf("wrong error has been received: %T",
- rtErr.WireMessage())
- }
-
- // Wait for Alice to receive the revocation.
- //
- // TODO(roasbeef): add in ntfn hook for state transition completion
- time.Sleep(100 * time.Millisecond)
-
- // Check that alice invoice wasn't settled and bandwidth of htlc
- // links hasn't been changed.
- invoice, err := receiver.registry.LookupInvoice(rhash)
- if err != nil {
- t.Fatalf("unable to get invoice: %v", err)
- }
- if invoice.State == channeldb.ContractSettled {
- t.Fatal("carol invoice have been settled")
- }
-
- if n.aliceChannelLink.Bandwidth() != aliceBandwidthBefore {
- t.Fatal("the bandwidth of alice channel link which handles " +
- "alice->bob channel should be the same")
- }
-
- if n.firstBobChannelLink.Bandwidth() != firstBobBandwidthBefore {
- t.Fatal("the bandwidth of bob channel link which handles " +
- "alice->bob channel should be the same")
- }
-
- if n.secondBobChannelLink.Bandwidth() != secondBobBandwidthBefore {
- t.Fatal("the bandwidth of bob channel link which handles " +
- "bob->carol channel should be the same")
- }
-
- if n.carolChannelLink.Bandwidth() != carolBandwidthBefore {
- t.Fatal("the bandwidth of carol channel link which handles " +
- "bob->carol channel should be the same")
- }
-
- // Load the forwarding packages for Bob's incoming link. The payment
- // should have been rejected by the switch, and the AddRef in this link
- // should be acked by the failed payment.
- bobInFwdPkgs, err := channels.bobToAlice.State().LoadFwdPkgs()
- if err != nil {
- t.Fatalf("unable to load bob's fwd pkgs: %v", err)
- }
-
- // There should be exactly two forward packages, as a full state
- // transition requires two commitment dances.
- if len(bobInFwdPkgs) != 2 {
- t.Fatalf("bob should have exactly 2 fwdpkgs, has %d",
- len(bobInFwdPkgs))
- }
-
- // Only one of the forwarding package should have an Add in it, the
- // other will be empty. Either way, both AckFilters should be fully
- // acked.
- for _, fwdPkg := range bobInFwdPkgs {
- if !fwdPkg.AckFilter.IsFull() {
- t.Fatalf("fwdpkg chanid=%v height=%d AckFilter is not "+
- "fully acked", fwdPkg.Source, fwdPkg.Height)
- }
- }
-}
-
-// TestChannelLinkMultiHopDecodeError checks that we send HTLC cancel if
-// decoding of onion blob failed.
-func TestChannelLinkMultiHopDecodeError(t *testing.T) {
- t.Parallel()
-
- channels, cleanUp, _, err := createClusterChannels(
- btcutil.UnitsPerCoin()*3,
- btcutil.UnitsPerCoin()*5)
- if err != nil {
- t.Fatalf("unable to create channel: %v", err)
- }
- defer cleanUp()
-
- n := newThreeHopNetwork(t, channels.aliceToBob, channels.bobToAlice,
- channels.bobToCarol, channels.carolToBob, testStartingHeight)
- if err := n.start(); err != nil {
- t.Fatalf("unable to start three hop network: %v", err)
- }
- defer n.stop()
-
- // Replace decode function with another which throws an error.
- n.carolChannelLink.cfg.ExtractErrorEncrypter = func(
- *btcec.PublicKey) (hop.ErrorEncrypter, lnwire.FailCode) {
- return nil, lnwire.CodeInvalidOnionVersion
- }
-
- carolBandwidthBefore := n.carolChannelLink.Bandwidth()
- firstBobBandwidthBefore := n.firstBobChannelLink.Bandwidth()
- secondBobBandwidthBefore := n.secondBobChannelLink.Bandwidth()
- aliceBandwidthBefore := n.aliceChannelLink.Bandwidth()
-
- amount := lnwire.NewMSatFromSatoshis(btcutil.UnitsPerCoin())
- htlcAmt, totalTimelock, hops := generateHops(amount, testStartingHeight,
- n.firstBobChannelLink, n.carolChannelLink)
-
- receiver := n.carolServer
- firstHop := n.firstBobChannelLink.ShortChanID()
- rhash, err := makePayment(
- n.aliceServer, n.carolServer, firstHop, hops, amount, htlcAmt,
- totalTimelock,
- ).Wait(30 * time.Second)
- if err == nil {
- t.Fatal("error haven't been received")
- }
-
- errr := er.Wrapped(err)
- rtErr, ok := errr.(ClearTextError)
- if !ok {
- t.Fatalf("expected a ClearTextError, instead got: %T", err)
- }
-
- switch rtErr.WireMessage().(type) {
- case *lnwire.FailInvalidOnionVersion:
- default:
- t.Fatalf("wrong error have been received: %v", err)
- }
-
- // Wait for Bob to receive the revocation.
- time.Sleep(100 * time.Millisecond)
-
- // Check that alice invoice wasn't settled and bandwidth of htlc
- // links hasn't been changed.
- invoice, err := receiver.registry.LookupInvoice(rhash)
- if err != nil {
- t.Fatalf("unable to get invoice: %v", err)
- }
- if invoice.State == channeldb.ContractSettled {
- t.Fatal("carol invoice have been settled")
- }
-
- if n.aliceChannelLink.Bandwidth() != aliceBandwidthBefore {
- t.Fatal("the bandwidth of alice channel link which handles " +
- "alice->bob channel should be the same")
- }
-
- if n.firstBobChannelLink.Bandwidth() != firstBobBandwidthBefore {
- t.Fatal("the bandwidth of bob channel link which handles " +
- "alice->bob channel should be the same")
- }
-
- if n.secondBobChannelLink.Bandwidth() != secondBobBandwidthBefore {
- t.Fatal("the bandwidth of bob channel link which handles " +
- "bob->carol channel should be the same")
- }
-
- if n.carolChannelLink.Bandwidth() != carolBandwidthBefore {
- t.Fatal("the bandwidth of carol channel link which handles " +
- "bob->carol channel should be the same")
- }
-}
-
-// TestChannelLinkExpiryTooSoonExitNode tests that if we send an HTLC to a node
-// with an expiry that is already expired, or too close to the current block
-// height, then it will cancel the HTLC.
-func TestChannelLinkExpiryTooSoonExitNode(t *testing.T) {
- t.Parallel()
-
- // The starting height for this test will be 200. So we'll base all
- // HTLC starting points off of that.
- channels, cleanUp, _, err := createClusterChannels(
- btcutil.UnitsPerCoin()*3,
- btcutil.UnitsPerCoin()*5)
- if err != nil {
- t.Fatalf("unable to create channel: %v", err)
- }
- defer cleanUp()
-
- const startingHeight = 200
- n := newThreeHopNetwork(t, channels.aliceToBob, channels.bobToAlice,
- channels.bobToCarol, channels.carolToBob, startingHeight)
- if err := n.start(); err != nil {
- t.Fatalf("unable to start three hop network: %v", err)
- }
- defer n.stop()
-
- amount := lnwire.NewMSatFromSatoshis(btcutil.UnitsPerCoin())
-
- // We'll craft an HTLC packet, but set the final hop CLTV to 5 blocks
- // after the current true height. This is less than the test invoice
- // cltv delta of 6, so we expect the incoming htlc to be failed by the
- // exit hop.
- htlcAmt, totalTimelock, hops := generateHops(amount,
- startingHeight-1, n.firstBobChannelLink)
-
- // Now we'll send out the payment from Alice to Bob.
- firstHop := n.firstBobChannelLink.ShortChanID()
- _, err = makePayment(
- n.aliceServer, n.bobServer, firstHop, hops, amount, htlcAmt,
- totalTimelock,
- ).Wait(30 * time.Second)
-
- // The payment should've failed as the time lock value was in the
- // _past_.
- if err == nil {
- t.Fatalf("payment should have failed due to a too early " +
- "time lock value")
- }
-
- errr := er.Wrapped(err)
- rtErr, ok := errr.(ClearTextError)
- if !ok {
- t.Fatalf("expected a ClearTextError, instead got: %T %v",
- rtErr, err)
- }
-
- switch rtErr.WireMessage().(type) {
- case *lnwire.FailIncorrectDetails:
- default:
- t.Fatalf("expected incorrect_or_unknown_payment_details, "+
- "instead have: %v", err)
- }
-}
-
-// TestChannelLinkExpiryTooSoonExitNode tests that if we send a multi-hop HTLC,
-// and the time lock is too early for an intermediate node, then they cancel
-// the HTLC back to the sender.
-func TestChannelLinkExpiryTooSoonMidNode(t *testing.T) {
- t.Parallel()
-
- // The starting height for this test will be 200. So we'll base all
- // HTLC starting points off of that.
- channels, cleanUp, _, err := createClusterChannels(
- btcutil.UnitsPerCoin()*3,
- btcutil.UnitsPerCoin()*5)
- if err != nil {
- t.Fatalf("unable to create channel: %v", err)
- }
- defer cleanUp()
-
- const startingHeight = 200
- n := newThreeHopNetwork(t, channels.aliceToBob, channels.bobToAlice,
- channels.bobToCarol, channels.carolToBob, startingHeight)
- if err := n.start(); err != nil {
- t.Fatalf("unable to start three hop network: %v", err)
- }
- defer n.stop()
-
- amount := lnwire.NewMSatFromSatoshis(btcutil.UnitsPerCoin())
-
- // We'll craft an HTLC packet, but set the starting height to 3 blocks
- // before the current true height. This means that the outgoing time
- // lock of the middle hop will be at starting height + 3 blocks (channel
- // policy time lock delta is 6 blocks). There is an expiry grace delta
- // of 3 blocks relative to the current height, meaning that htlc will
- // not be sent out by the middle hop.
- htlcAmt, totalTimelock, hops := generateHops(amount,
- startingHeight-3, n.firstBobChannelLink, n.carolChannelLink)
-
- // Now we'll send out the payment from Alice to Bob.
- firstHop := n.firstBobChannelLink.ShortChanID()
- _, err = makePayment(
- n.aliceServer, n.bobServer, firstHop, hops, amount, htlcAmt,
- totalTimelock,
- ).Wait(30 * time.Second)
-
- // The payment should've failed as the time lock value was in the
- // _past_.
- if err == nil {
- t.Fatalf("payment should have failed due to a too early " +
- "time lock value")
- }
-
- errr := er.Wrapped(err)
- rtErr, ok := errr.(ClearTextError)
- if !ok {
- t.Fatalf("expected a ClearTextError, instead got: %T: %v",
- rtErr, err)
- }
-
- switch rtErr.WireMessage().(type) {
- case *lnwire.FailExpiryTooSoon:
- default:
- t.Fatalf("incorrect error, expected final time lock too "+
- "early, instead have: %v", err)
- }
-}
-
-// TestChannelLinkSingleHopMessageOrdering test checks ordering of message which
-// flying around between Alice and Bob are correct when Bob sends payments to
-// Alice.
-func TestChannelLinkSingleHopMessageOrdering(t *testing.T) {
- t.Parallel()
-
- channels, cleanUp, _, err := createClusterChannels(
- btcutil.UnitsPerCoin()*3,
- btcutil.UnitsPerCoin()*5)
- if err != nil {
- t.Fatalf("unable to create channel: %v", err)
- }
- defer cleanUp()
-
- n := newThreeHopNetwork(t, channels.aliceToBob, channels.bobToAlice,
- channels.bobToCarol, channels.carolToBob, testStartingHeight)
-
- chanID := n.aliceChannelLink.ChanID()
-
- messages := []expectedMessage{
- {"alice", "bob", &lnwire.ChannelReestablish{}, false},
- {"bob", "alice", &lnwire.ChannelReestablish{}, false},
-
- {"alice", "bob", &lnwire.FundingLocked{}, false},
- {"bob", "alice", &lnwire.FundingLocked{}, false},
-
- {"alice", "bob", &lnwire.UpdateAddHTLC{}, false},
- {"alice", "bob", &lnwire.CommitSig{}, false},
- {"bob", "alice", &lnwire.RevokeAndAck{}, false},
- {"bob", "alice", &lnwire.CommitSig{}, false},
- {"alice", "bob", &lnwire.RevokeAndAck{}, false},
-
- {"bob", "alice", &lnwire.UpdateFulfillHTLC{}, false},
- {"bob", "alice", &lnwire.CommitSig{}, false},
- {"alice", "bob", &lnwire.RevokeAndAck{}, false},
- {"alice", "bob", &lnwire.CommitSig{}, false},
- {"bob", "alice", &lnwire.RevokeAndAck{}, false},
- }
-
- debug := false
- if debug {
- // Log message that alice receives.
- n.aliceServer.intersect(createLogFunc("alice",
- n.aliceChannelLink.ChanID()))
-
- // Log message that bob receives.
- n.bobServer.intersect(createLogFunc("bob",
- n.firstBobChannelLink.ChanID()))
- }
-
- // Check that alice receives messages in right order.
- n.aliceServer.intersect(createInterceptorFunc("[alice] <-- [bob]",
- "alice", messages, chanID, false))
-
- // Check that bob receives messages in right order.
- n.bobServer.intersect(createInterceptorFunc("[alice] --> [bob]",
- "bob", messages, chanID, false))
-
- if err := n.start(); err != nil {
- t.Fatalf("unable to start three hop network: %v", err)
- }
- defer n.stop()
-
- amount := lnwire.NewMSatFromSatoshis(btcutil.UnitsPerCoin())
- htlcAmt, totalTimelock, hops := generateHops(amount, testStartingHeight,
- n.firstBobChannelLink)
-
- // Wait for:
- // * HTLC add request to be sent to bob.
- // * alice<->bob commitment state to be updated.
- // * settle request to be sent back from bob to alice.
- // * alice<->bob commitment state to be updated.
- // * user notification to be sent.
- firstHop := n.firstBobChannelLink.ShortChanID()
- _, err = makePayment(
- n.aliceServer, n.bobServer, firstHop, hops, amount, htlcAmt,
- totalTimelock,
- ).Wait(30 * time.Second)
- if err != nil {
- t.Fatalf("unable to make the payment: %v", err)
- }
-}
-
-type mockPeer struct {
- sync.Mutex
- disconnected bool
- sentMsgs chan lnwire.Message
- quit chan struct{}
-}
-
-func (m *mockPeer) QuitSignal() <-chan struct{} {
- return m.quit
-}
-
-var _ lnpeer.Peer = (*mockPeer)(nil)
-
-func (m *mockPeer) SendMessage(sync bool, msgs ...lnwire.Message) er.R {
- if m.disconnected {
- return er.Errorf("disconnected")
- }
-
- select {
- case m.sentMsgs <- msgs[0]:
- case <-m.quit:
- return er.Errorf("mockPeer shutting down")
- }
- return nil
-}
-func (m *mockPeer) SendMessageLazy(sync bool, msgs ...lnwire.Message) er.R {
- return m.SendMessage(sync, msgs...)
-}
-func (m *mockPeer) AddNewChannel(_ *channeldb.OpenChannel,
- _ <-chan struct{}) er.R {
- return nil
-}
-func (m *mockPeer) WipeChannel(*wire.OutPoint) {}
-func (m *mockPeer) PubKey() [33]byte {
- return [33]byte{}
-}
-func (m *mockPeer) IdentityKey() *btcec.PublicKey {
- return nil
-}
-func (m *mockPeer) Address() net.Addr {
- return nil
-}
-func (m *mockPeer) LocalFeatures() *lnwire.FeatureVector {
- return nil
-}
-func (m *mockPeer) RemoteFeatures() *lnwire.FeatureVector {
- return nil
-}
-
-func newSingleLinkTestHarness(chanAmt, chanReserve btcutil.Amount) (
- ChannelLink, *lnwallet.LightningChannel, chan time.Time, func() er.R,
- func(), func() (*lnwallet.LightningChannel, er.R), er.R) {
-
- var chanIDBytes [8]byte
- if _, err := util.ReadFull(rand.Reader, chanIDBytes[:]); err != nil {
- return nil, nil, nil, nil, nil, nil, err
- }
-
- chanID := lnwire.NewShortChanIDFromInt(
- binary.BigEndian.Uint64(chanIDBytes[:]))
-
- aliceLc, bobLc, fCleanUp, err := createTestChannel(
- alicePrivKey, bobPrivKey, chanAmt, chanAmt,
- chanReserve, chanReserve, chanID,
- )
- if err != nil {
- return nil, nil, nil, nil, nil, nil, err
- }
-
- var (
- decoder = newMockIteratorDecoder()
- obfuscator = NewMockObfuscator()
- alicePeer = &mockPeer{
- sentMsgs: make(chan lnwire.Message, 2000),
- quit: make(chan struct{}),
- }
- globalPolicy = ForwardingPolicy{
- MinHTLCOut: lnwire.NewMSatFromSatoshis(5),
- MaxHTLC: lnwire.NewMSatFromSatoshis(chanAmt),
- BaseFee: lnwire.NewMSatFromSatoshis(1),
- TimeLockDelta: 6,
- }
- invoiceRegistry = newMockRegistry(globalPolicy.TimeLockDelta)
- )
-
- pCache := newMockPreimageCache()
-
- aliceDb := aliceLc.channel.State().Db
- aliceSwitch, err := initSwitchWithDB(testStartingHeight, aliceDb)
- if err != nil {
- return nil, nil, nil, nil, nil, nil, err
- }
-
- // Instantiate with a long interval, so that we can precisely control
- // the firing via force feeding.
- bticker := ticker.NewForce(time.Hour)
- aliceCfg := ChannelLinkConfig{
- FwrdingPolicy: globalPolicy,
- Peer: alicePeer,
- Switch: aliceSwitch,
- Circuits: aliceSwitch.CircuitModifier(),
- ForwardPackets: aliceSwitch.ForwardPackets,
- DecodeHopIterators: decoder.DecodeHopIterators,
- ExtractErrorEncrypter: func(*btcec.PublicKey) (
- hop.ErrorEncrypter, lnwire.FailCode) {
- return obfuscator, lnwire.CodeNone
- },
- FetchLastChannelUpdate: mockGetChanUpdateMessage,
- PreimageCache: pCache,
- OnChannelFailure: func(lnwire.ChannelID,
- lnwire.ShortChannelID, LinkFailureError) {
- },
- UpdateContractSignals: func(*contractcourt.ContractSignals) er.R {
- return nil
- },
- Registry: invoiceRegistry,
- ChainEvents: &contractcourt.ChainEventSubscription{},
- BatchTicker: bticker,
- FwdPkgGCTicker: ticker.NewForce(15 * time.Second),
- PendingCommitTicker: ticker.New(time.Minute),
- // Make the BatchSize and Min/MaxFeeUpdateTimeout large enough
- // to not trigger commit updates automatically during tests.
- BatchSize: 10000,
- MinFeeUpdateTimeout: 30 * time.Minute,
- MaxFeeUpdateTimeout: 40 * time.Minute,
- MaxOutgoingCltvExpiry: DefaultMaxOutgoingCltvExpiry,
- MaxFeeAllocation: DefaultMaxLinkFeeAllocation,
- NotifyActiveLink: func(wire.OutPoint) {},
- NotifyActiveChannel: func(wire.OutPoint) {},
- NotifyInactiveChannel: func(wire.OutPoint) {},
- HtlcNotifier: aliceSwitch.cfg.HtlcNotifier,
- }
-
- aliceLink := NewChannelLink(aliceCfg, aliceLc.channel)
- start := func() er.R {
- return aliceSwitch.AddLink(aliceLink)
- }
- go func() {
- for {
- select {
- case <-aliceLink.(*channelLink).htlcUpdates:
- case <-aliceLink.(*channelLink).quit:
- return
- }
- }
- }()
-
- cleanUp := func() {
- close(alicePeer.quit)
- defer fCleanUp()
- }
-
- return aliceLink, bobLc.channel, bticker.Force, start, cleanUp,
- aliceLc.restore, nil
-}
-
-func assertLinkBandwidth(t *testing.T, link ChannelLink,
- expected lnwire.MilliSatoshi) {
-
- currentBandwidth := link.Bandwidth()
- _, _, line, _ := runtime.Caller(1)
- if currentBandwidth != expected {
- t.Fatalf("line %v: alice's link bandwidth is incorrect: "+
- "expected %v, got %v", line, expected, currentBandwidth)
- }
-}
-
-// handleStateUpdate handles the messages sent from the link after
-// the batch ticker has triggered a state update.
-func handleStateUpdate(link *channelLink,
- remoteChannel *lnwallet.LightningChannel) er.R {
- sentMsgs := link.cfg.Peer.(*mockPeer).sentMsgs
- var msg lnwire.Message
- select {
- case msg = <-sentMsgs:
- case <-time.After(60 * time.Second):
- return er.Errorf("did not receive CommitSig from Alice")
- }
-
- // The link should be sending a commit sig at this point.
- commitSig, ok := msg.(*lnwire.CommitSig)
- if !ok {
- return er.Errorf("expected CommitSig, got %T", msg)
- }
-
- // Let the remote channel receive the commit sig, and
- // respond with a revocation + commitsig.
- err := remoteChannel.ReceiveNewCommitment(
- commitSig.CommitSig, commitSig.HtlcSigs)
- if err != nil {
- return err
- }
-
- remoteRev, _, err := remoteChannel.RevokeCurrentCommitment()
- if err != nil {
- return err
- }
- link.HandleChannelUpdate(remoteRev)
-
- remoteSig, remoteHtlcSigs, _, err := remoteChannel.SignNextCommitment()
- if err != nil {
- return err
- }
- commitSig = &lnwire.CommitSig{
- CommitSig: remoteSig,
- HtlcSigs: remoteHtlcSigs,
- }
- link.HandleChannelUpdate(commitSig)
-
- // This should make the link respond with a revocation.
- select {
- case msg = <-sentMsgs:
- case <-time.After(60 * time.Second):
- return er.Errorf("did not receive RevokeAndAck from Alice")
- }
-
- revoke, ok := msg.(*lnwire.RevokeAndAck)
- if !ok {
- return er.Errorf("expected RevokeAndAck got %T", msg)
- }
- _, _, _, _, err = remoteChannel.ReceiveRevocation(revoke)
- if err != nil {
- return er.Errorf("unable to receive "+
- "revocation: %v", err)
- }
-
- return nil
-}
-
-// updateState is used exchange the messages necessary to do a full state
-// transition. If initiateUpdate=true, then this call will make the link
-// trigger an update by sending on the batchTick channel, if not, it will
-// make the remoteChannel initiate the state update.
-func updateState(batchTick chan time.Time, link *channelLink,
- remoteChannel *lnwallet.LightningChannel,
- initiateUpdate bool) er.R {
- sentMsgs := link.cfg.Peer.(*mockPeer).sentMsgs
-
- if initiateUpdate {
- // Trigger update by ticking the batchTicker.
- select {
- case batchTick <- time.Now():
- case <-link.quit:
- return er.Errorf("link shutting down")
- }
- return handleStateUpdate(link, remoteChannel)
- }
-
- // The remote is triggering the state update, emulate this by
- // signing and sending CommitSig to the link.
- remoteSig, remoteHtlcSigs, _, err := remoteChannel.SignNextCommitment()
- if err != nil {
- return err
- }
-
- commitSig := &lnwire.CommitSig{
- CommitSig: remoteSig,
- HtlcSigs: remoteHtlcSigs,
- }
- link.HandleChannelUpdate(commitSig)
-
- // The link should respond with a revocation + commit sig.
- var msg lnwire.Message
- select {
- case msg = <-sentMsgs:
- case <-time.After(60 * time.Second):
- return er.Errorf("did not receive RevokeAndAck from Alice")
- }
-
- revoke, ok := msg.(*lnwire.RevokeAndAck)
- if !ok {
- return er.Errorf("expected RevokeAndAck got %T",
- msg)
- }
- _, _, _, _, err = remoteChannel.ReceiveRevocation(revoke)
- if err != nil {
- return er.Errorf("unable to receive "+
- "revocation: %v", err)
- }
- select {
- case msg = <-sentMsgs:
- case <-time.After(60 * time.Second):
- return er.Errorf("did not receive CommitSig from Alice")
- }
-
- commitSig, ok = msg.(*lnwire.CommitSig)
- if !ok {
- return er.Errorf("expected CommitSig, got %T", msg)
- }
-
- err = remoteChannel.ReceiveNewCommitment(
- commitSig.CommitSig, commitSig.HtlcSigs)
- if err != nil {
- return err
- }
-
- // Lastly, send a revocation back to the link.
- remoteRev, _, err := remoteChannel.RevokeCurrentCommitment()
- if err != nil {
- return err
- }
- link.HandleChannelUpdate(remoteRev)
-
- // Sleep to make sure Alice has handled the remote revocation.
- time.Sleep(500 * time.Millisecond)
-
- return nil
-}
-
-// TestChannelLinkBandwidthConsistency ensures that the reported bandwidth of a
-// given ChannelLink is properly updated in response to downstream messages
-// from the switch, and upstream messages from its channel peer.
-//
-// TODO(roasbeef): add sync hook into packet processing so can eliminate all
-// sleep in this test and the one below
-func TestChannelLinkBandwidthConsistency(t *testing.T) {
- if !build.IsDevBuild() {
- t.Fatalf("htlcswitch tests must be run with '-tags debug")
- }
- t.Parallel()
-
- // TODO(roasbeef): replace manual bit twiddling with concept of
- // resource cost for packets?
- // * or also able to consult link
-
- // We'll start the test by creating a single instance of
- chanAmt := btcutil.UnitsPerCoin() * 5
-
- aliceLink, bobChannel, tmr, start, cleanUp, _, err :=
- newSingleLinkTestHarness(chanAmt, 0)
- if err != nil {
- t.Fatalf("unable to create link: %v", err)
- }
- defer cleanUp()
-
- if err := start(); err != nil {
- t.Fatalf("unable to start test harness: %v", err)
- }
-
- var (
- carolChanID = lnwire.NewShortChanIDFromInt(3)
- mockBlob [lnwire.OnionPacketSize]byte
- coreChan = aliceLink.(*channelLink).channel
- coreLink = aliceLink.(*channelLink)
- defaultCommitFee = coreChan.StateSnapshot().CommitFee
- aliceStartingBandwidth = aliceLink.Bandwidth()
- aliceMsgs = coreLink.cfg.Peer.(*mockPeer).sentMsgs
- )
-
- // We put Alice into hodl.ExitSettle mode, such that she won't settle
- // incoming HTLCs automatically.
- coreLink.cfg.HodlMask = hodl.MaskFromFlags(hodl.ExitSettle)
-
- estimator := chainfee.NewStaticEstimator(6000, 0)
- feePerKw, err := estimator.EstimateFeePerKW(1)
- if err != nil {
- t.Fatalf("unable to query fee estimator: %v", err)
- }
- htlcFee := lnwire.NewMSatFromSatoshis(
- feePerKw.FeeForWeight(input.HTLCWeight),
- )
-
- // The starting bandwidth of the channel should be exactly the amount
- // that we created the channel between her and Bob, minus the
- // commitment fee and fee for adding an additional HTLC.
- expectedBandwidth := lnwire.NewMSatFromSatoshis(
- chanAmt-defaultCommitFee,
- ) - htlcFee
- assertLinkBandwidth(t, aliceLink, expectedBandwidth)
-
- // Next, we'll create an HTLC worth 1 BTC, and send it into the link as
- // a switch initiated payment. The resulting bandwidth should
- // now be decremented to reflect the new HTLC.
- htlcAmt := lnwire.NewMSatFromSatoshis(btcutil.UnitsPerCoin())
- invoice, htlc, _, err := generatePayment(
- htlcAmt, htlcAmt, 5, mockBlob,
- )
- if err != nil {
- t.Fatalf("unable to create payment: %v", err)
- }
- addPkt := htlcPacket{
- htlc: htlc,
- incomingChanID: hop.Source,
- incomingHTLCID: 0,
- obfuscator: NewMockObfuscator(),
- }
-
- circuit := makePaymentCircuit(&htlc.PaymentHash, &addPkt)
- _, err = coreLink.cfg.Switch.commitCircuits(&circuit)
- if err != nil {
- t.Fatalf("unable to commit circuit: %v", err)
- }
-
- addPkt.circuit = &circuit
- if err := aliceLink.HandleSwitchPacket(&addPkt); err != nil {
- t.Fatalf("unable to handle switch packet: %v", err)
- }
- time.Sleep(time.Millisecond * 500)
-
- // The resulting bandwidth should reflect that Alice is paying the
- // htlc amount in addition to the htlc fee.
- assertLinkBandwidth(t, aliceLink, aliceStartingBandwidth-htlcAmt-htlcFee)
-
- // Alice should send the HTLC to Bob.
- var msg lnwire.Message
- select {
- case msg = <-aliceMsgs:
- case <-time.After(15 * time.Second):
- t.Fatalf("did not receive message")
- }
-
- addHtlc, ok := msg.(*lnwire.UpdateAddHTLC)
- if !ok {
- t.Fatalf("expected UpdateAddHTLC, got %T", msg)
- }
-
- bobIndex, err := bobChannel.ReceiveHTLC(addHtlc)
- if err != nil {
- t.Fatalf("bob failed receiving htlc: %v", err)
- }
-
- // Lock in the HTLC.
- if err := updateState(tmr, coreLink, bobChannel, true); err != nil {
- t.Fatalf("unable to update state: %v", err)
- }
- // Locking in the HTLC should not change Alice's bandwidth.
- assertLinkBandwidth(t, aliceLink, aliceStartingBandwidth-htlcAmt-htlcFee)
-
- // If we now send in a valid HTLC settle for the prior HTLC we added,
- // then the bandwidth should remain unchanged as the remote party will
- // gain additional channel balance.
- err = bobChannel.SettleHTLC(*invoice.Terms.PaymentPreimage, bobIndex, nil, nil, nil)
- if err != nil {
- t.Fatalf("unable to settle htlc: %v", err)
- }
- htlcSettle := &lnwire.UpdateFulfillHTLC{
- ID: 0,
- PaymentPreimage: *invoice.Terms.PaymentPreimage,
- }
- aliceLink.HandleChannelUpdate(htlcSettle)
- time.Sleep(time.Millisecond * 500)
-
- // Since the settle is not locked in yet, Alice's bandwidth should still
- // reflect that she has to pay the fee.
- assertLinkBandwidth(t, aliceLink, aliceStartingBandwidth-htlcAmt-htlcFee)
-
- // Lock in the settle.
- if err := updateState(tmr, coreLink, bobChannel, false); err != nil {
- t.Fatalf("unable to update state: %v", err)
- }
-
- // Now that it is settled, Alice should have gotten the htlc fee back.
- assertLinkBandwidth(t, aliceLink, aliceStartingBandwidth-htlcAmt)
-
- // Next, we'll add another HTLC initiated by the switch (of the same
- // amount as the prior one).
- _, htlc, _, err = generatePayment(htlcAmt, htlcAmt, 5, mockBlob)
- if err != nil {
- t.Fatalf("unable to create payment: %v", err)
- }
- addPkt = htlcPacket{
- htlc: htlc,
- incomingChanID: hop.Source,
- incomingHTLCID: 1,
- obfuscator: NewMockObfuscator(),
- }
-
- circuit = makePaymentCircuit(&htlc.PaymentHash, &addPkt)
- _, err = coreLink.cfg.Switch.commitCircuits(&circuit)
- if err != nil {
- t.Fatalf("unable to commit circuit: %v", err)
- }
-
- addPkt.circuit = &circuit
- if err := aliceLink.HandleSwitchPacket(&addPkt); err != nil {
- t.Fatalf("unable to handle switch packet: %v", err)
- }
- time.Sleep(time.Millisecond * 500)
-
- // Again, Alice's bandwidth decreases by htlcAmt+htlcFee.
- assertLinkBandwidth(t, aliceLink, aliceStartingBandwidth-2*htlcAmt-htlcFee)
-
- // Alice will send the HTLC to Bob.
- select {
- case msg = <-aliceMsgs:
- case <-time.After(15 * time.Second):
- t.Fatalf("did not receive message")
- }
-
- addHtlc, ok = msg.(*lnwire.UpdateAddHTLC)
- if !ok {
- t.Fatalf("expected UpdateAddHTLC, got %T", msg)
- }
-
- bobIndex, err = bobChannel.ReceiveHTLC(addHtlc)
- if err != nil {
- t.Fatalf("bob failed receiving htlc: %v", err)
- }
-
- // Lock in the HTLC, which should not affect the bandwidth.
- if err := updateState(tmr, coreLink, bobChannel, true); err != nil {
- t.Fatalf("unable to update state: %v", err)
- }
-
- assertLinkBandwidth(t, aliceLink, aliceStartingBandwidth-htlcAmt*2-htlcFee)
-
- // With that processed, we'll now generate an HTLC fail (sent by the
- // remote peer) to cancel the HTLC we just added. This should return us
- // back to the bandwidth of the link right before the HTLC was sent.
- err = bobChannel.FailHTLC(bobIndex, []byte("nop"), nil, nil, nil)
- if err != nil {
- t.Fatalf("unable to fail htlc: %v", err)
- }
- failMsg := &lnwire.UpdateFailHTLC{
- ID: 1,
- Reason: lnwire.OpaqueReason([]byte("nop")),
- }
-
- aliceLink.HandleChannelUpdate(failMsg)
- time.Sleep(time.Millisecond * 500)
-
- // Before the Fail gets locked in, the bandwidth should remain unchanged.
- assertLinkBandwidth(t, aliceLink, aliceStartingBandwidth-htlcAmt*2-htlcFee)
-
- // Lock in the Fail.
- if err := updateState(tmr, coreLink, bobChannel, false); err != nil {
- t.Fatalf("unable to update state: %v", err)
- }
-
- // Now the bandwidth should reflect the failed HTLC.
- assertLinkBandwidth(t, aliceLink, aliceStartingBandwidth-htlcAmt)
-
- // Moving along, we'll now receive a new HTLC from the remote peer,
- // with an ID of 0 as this is their first HTLC. The bandwidth should
- // remain unchanged (but Alice will need to pay the fee for the extra
- // HTLC).
- htlcAmt, totalTimelock, hops := generateHops(htlcAmt, testStartingHeight,
- coreLink)
- blob, err := generateRoute(hops...)
- if err != nil {
- t.Fatalf("unable to gen route: %v", err)
- }
- invoice, htlc, _, err = generatePayment(
- htlcAmt, htlcAmt, totalTimelock, blob,
- )
- if err != nil {
- t.Fatalf("unable to create payment: %v", err)
- }
-
- // We must add the invoice to the registry, such that Alice expects
- // this payment.
- err = coreLink.cfg.Registry.(*mockInvoiceRegistry).AddInvoice(
- *invoice, htlc.PaymentHash,
- )
- if err != nil {
- t.Fatalf("unable to add invoice to registry: %v", err)
- }
-
- htlc.ID = 0
- _, err = bobChannel.AddHTLC(htlc, nil)
- if err != nil {
- t.Fatalf("unable to add htlc: %v", err)
- }
- aliceLink.HandleChannelUpdate(htlc)
-
- // Alice's balance remains unchanged until this HTLC is locked in.
- assertLinkBandwidth(t, aliceLink, aliceStartingBandwidth-htlcAmt)
-
- // Lock in the HTLC.
- if err := updateState(tmr, coreLink, bobChannel, false); err != nil {
- t.Fatalf("unable to update state: %v", err)
- }
-
- // Since Bob is adding this HTLC, Alice only needs to pay the fee.
- assertLinkBandwidth(t, aliceLink, aliceStartingBandwidth-htlcAmt-htlcFee)
- time.Sleep(time.Millisecond * 500)
-
- addPkt = htlcPacket{
- htlc: htlc,
- incomingChanID: aliceLink.ShortChanID(),
- incomingHTLCID: 0,
- obfuscator: NewMockObfuscator(),
- }
-
- circuit = makePaymentCircuit(&htlc.PaymentHash, &addPkt)
- _, err = coreLink.cfg.Switch.commitCircuits(&circuit)
- if err != nil {
- t.Fatalf("unable to commit circuit: %v", err)
- }
-
- addPkt.outgoingChanID = carolChanID
- addPkt.outgoingHTLCID = 0
-
- err = coreLink.cfg.Switch.openCircuits(addPkt.keystone())
- if err != nil {
- t.Fatalf("unable to set keystone: %v", err)
- }
-
- // Next, we'll settle the HTLC with our knowledge of the pre-image that
- // we eventually learn (simulating a multi-hop payment). The bandwidth
- // of the channel should now be re-balanced to the starting point.
- settlePkt := htlcPacket{
- incomingChanID: aliceLink.ShortChanID(),
- incomingHTLCID: 0,
- circuit: &circuit,
- outgoingChanID: addPkt.outgoingChanID,
- outgoingHTLCID: addPkt.outgoingHTLCID,
- htlc: &lnwire.UpdateFulfillHTLC{
- ID: 0,
- PaymentPreimage: *invoice.Terms.PaymentPreimage,
- },
- obfuscator: NewMockObfuscator(),
- }
-
- if err := aliceLink.HandleSwitchPacket(&settlePkt); err != nil {
- t.Fatalf("unable to handle switch packet: %v", err)
- }
- time.Sleep(time.Millisecond * 500)
-
- // Settling this HTLC gives Alice all her original bandwidth back.
- assertLinkBandwidth(t, aliceLink, aliceStartingBandwidth)
-
- select {
- case msg = <-aliceMsgs:
- case <-time.After(15 * time.Second):
- t.Fatalf("did not receive message")
- }
-
- settleMsg, ok := msg.(*lnwire.UpdateFulfillHTLC)
- if !ok {
- t.Fatalf("expected UpdateFulfillHTLC, got %T", msg)
- }
- err = bobChannel.ReceiveHTLCSettle(settleMsg.PaymentPreimage, settleMsg.ID)
- if err != nil {
- t.Fatalf("failed receiving fail htlc: %v", err)
- }
-
- // After failing an HTLC, the link will automatically trigger
- // a state update.
- if err := handleStateUpdate(coreLink, bobChannel); err != nil {
- t.Fatalf("unable to update state: %v", err)
- }
-
- // Finally, we'll test the scenario of failing an HTLC received by the
- // remote node. This should result in no perceived bandwidth changes.
- htlcAmt, totalTimelock, hops = generateHops(htlcAmt, testStartingHeight,
- coreLink)
- blob, err = generateRoute(hops...)
- if err != nil {
- t.Fatalf("unable to gen route: %v", err)
- }
- invoice, htlc, _, err = generatePayment(
- htlcAmt, htlcAmt, totalTimelock, blob,
- )
- if err != nil {
- t.Fatalf("unable to create payment: %v", err)
- }
- err = coreLink.cfg.Registry.(*mockInvoiceRegistry).AddInvoice(
- *invoice, htlc.PaymentHash,
- )
- if err != nil {
- t.Fatalf("unable to add invoice to registry: %v", err)
- }
-
- // Since we are not using the link to handle HTLC IDs for the
- // remote channel, we must set this manually. This is the second
- // HTLC we add, hence it should have an ID of 1 (Alice's channel
- // link will set this automatically for her side).
- htlc.ID = 1
- _, err = bobChannel.AddHTLC(htlc, nil)
- if err != nil {
- t.Fatalf("unable to add htlc: %v", err)
- }
- aliceLink.HandleChannelUpdate(htlc)
- time.Sleep(time.Millisecond * 500)
-
- // No changes before the HTLC is locked in.
- assertLinkBandwidth(t, aliceLink, aliceStartingBandwidth)
- if err := updateState(tmr, coreLink, bobChannel, false); err != nil {
- t.Fatalf("unable to update state: %v", err)
- }
-
- // After lock-in, Alice will have to pay the htlc fee.
- assertLinkBandwidth(t, aliceLink, aliceStartingBandwidth-htlcFee)
-
- addPkt = htlcPacket{
- htlc: htlc,
- incomingChanID: aliceLink.ShortChanID(),
- incomingHTLCID: 1,
- obfuscator: NewMockObfuscator(),
- }
-
- circuit = makePaymentCircuit(&htlc.PaymentHash, &addPkt)
- _, err = coreLink.cfg.Switch.commitCircuits(&circuit)
- if err != nil {
- t.Fatalf("unable to commit circuit: %v", err)
- }
-
- addPkt.outgoingChanID = carolChanID
- addPkt.outgoingHTLCID = 1
-
- err = coreLink.cfg.Switch.openCircuits(addPkt.keystone())
- if err != nil {
- t.Fatalf("unable to set keystone: %v", err)
- }
-
- failPkt := htlcPacket{
- incomingChanID: aliceLink.ShortChanID(),
- incomingHTLCID: 1,
- circuit: &circuit,
- outgoingChanID: addPkt.outgoingChanID,
- outgoingHTLCID: addPkt.outgoingHTLCID,
- htlc: &lnwire.UpdateFailHTLC{
- ID: 1,
- },
- obfuscator: NewMockObfuscator(),
- }
-
- if err := aliceLink.HandleSwitchPacket(&failPkt); err != nil {
- t.Fatalf("unable to handle switch packet: %v", err)
- }
- time.Sleep(time.Millisecond * 500)
-
- // Alice should get all her bandwidth back.
- assertLinkBandwidth(t, aliceLink, aliceStartingBandwidth)
-
- // Message should be sent to Bob.
- select {
- case msg = <-aliceMsgs:
- case <-time.After(15 * time.Second):
- t.Fatalf("did not receive message")
- }
- failMsg, ok = msg.(*lnwire.UpdateFailHTLC)
- if !ok {
- t.Fatalf("expected UpdateFailHTLC, got %T", msg)
- }
- err = bobChannel.ReceiveFailHTLC(failMsg.ID, []byte("fail"))
- if err != nil {
- t.Fatalf("failed receiving fail htlc: %v", err)
- }
-
- // After failing an HTLC, the link will automatically trigger
- // a state update.
- if err := handleStateUpdate(coreLink, bobChannel); err != nil {
- t.Fatalf("unable to update state: %v", err)
- }
- assertLinkBandwidth(t, aliceLink, aliceStartingBandwidth)
-}
-
-// genAddsAndCircuits creates `numHtlcs` sequential ADD packets and there
-// corresponding circuits. The provided `htlc` is used in all test packets.
-func genAddsAndCircuits(numHtlcs int, htlc *lnwire.UpdateAddHTLC) (
- []*htlcPacket, []*PaymentCircuit) {
-
- addPkts := make([]*htlcPacket, 0, numHtlcs)
- circuits := make([]*PaymentCircuit, 0, numHtlcs)
- for i := 0; i < numHtlcs; i++ {
- addPkt := htlcPacket{
- htlc: htlc,
- incomingChanID: hop.Source,
- incomingHTLCID: uint64(i),
- obfuscator: NewMockObfuscator(),
- }
-
- circuit := makePaymentCircuit(&htlc.PaymentHash, &addPkt)
- addPkt.circuit = &circuit
-
- addPkts = append(addPkts, &addPkt)
- circuits = append(circuits, &circuit)
- }
-
- return addPkts, circuits
-}
-
-// TestChannelLinkTrimCircuitsPending checks that the switch and link properly
-// trim circuits if there are open circuits corresponding to ADDs on a pending
-// commmitment transaction.
-func TestChannelLinkTrimCircuitsPending(t *testing.T) {
- t.Parallel()
-
- chanAmt := btcutil.UnitsPerCoin() * 5
- const (
- numHtlcs = 4
- halfHtlcs = numHtlcs / 2
- )
-
- // We'll start by creating a new link with our chanAmt (5 BTC). We will
- // only be testing Alice's behavior, so the reference to Bob's channel
- // state is unnecessary.
- aliceLink, _, batchTicker, start, cleanUp, restore, err :=
- newSingleLinkTestHarness(chanAmt, 0)
- if err != nil {
- t.Fatalf("unable to create link: %v", err)
- }
- defer cleanUp()
-
- if err := start(); err != nil {
- t.Fatalf("unable to start test harness: %v", err)
- }
-
- alice := newPersistentLinkHarness(
- t, aliceLink, batchTicker, restore,
- )
-
- // Compute the static fees that will be used to determine the
- // correctness of Alice's bandwidth when forwarding HTLCs.
- estimator := chainfee.NewStaticEstimator(6000, 0)
- feePerKw, err := estimator.EstimateFeePerKW(1)
- if err != nil {
- t.Fatalf("unable to query fee estimator: %v", err)
- }
-
- defaultCommitFee := alice.channel.StateSnapshot().CommitFee
- htlcFee := lnwire.NewMSatFromSatoshis(
- feePerKw.FeeForWeight(input.HTLCWeight),
- )
-
- // The starting bandwidth of the channel should be exactly the amount
- // that we created the channel between her and Bob, minus the commitment
- // fee and fee of adding an HTLC.
- expectedBandwidth := lnwire.NewMSatFromSatoshis(
- chanAmt-defaultCommitFee,
- ) - htlcFee
- assertLinkBandwidth(t, alice.link, expectedBandwidth)
-
- // Capture Alice's starting bandwidth to perform later, relative
- // bandwidth assertions.
- aliceStartingBandwidth := alice.link.Bandwidth()
-
- // Next, we'll create an HTLC worth 1 BTC that will be used as a dummy
- // message for the test.
- var mockBlob [lnwire.OnionPacketSize]byte
- htlcAmt := lnwire.NewMSatFromSatoshis(btcutil.UnitsPerCoin())
- _, htlc, _, err := generatePayment(htlcAmt, htlcAmt, 5, mockBlob)
- if err != nil {
- t.Fatalf("unable to create payment: %v", err)
- }
-
- // Create `numHtlc` htlcPackets and payment circuits that will be used
- // to drive the test. All of the packets will use the same dummy HTLC.
- addPkts, circuits := genAddsAndCircuits(numHtlcs, htlc)
-
- // To begin the test, start by committing the circuits belong to our
- // first two HTLCs.
- fwdActions := alice.commitCircuits(circuits[:halfHtlcs])
-
- // Both of these circuits should have successfully added, as this is the
- // first attempt to send them.
- if len(fwdActions.Adds) != halfHtlcs {
- t.Fatalf("expected %d circuits to be added", halfHtlcs)
- }
- alice.assertNumPendingNumOpenCircuits(2, 0)
-
- // Since both were committed successfully, we will now deliver them to
- // Alice's link.
- for _, addPkt := range addPkts[:halfHtlcs] {
- if err := alice.link.HandleSwitchPacket(addPkt); err != nil {
- t.Fatalf("unable to handle switch packet: %v", err)
- }
- }
-
- // Wait until Alice's link has sent both HTLCs via the peer.
- alice.checkSent(addPkts[:halfHtlcs])
-
- // The resulting bandwidth should reflect that Alice is paying both
- // htlc amounts, in addition to both htlc fees.
- assertLinkBandwidth(t, alice.link,
- aliceStartingBandwidth-halfHtlcs*(htlcAmt+htlcFee),
- )
-
- // Now, initiate a state transition by Alice so that the pending HTLCs
- // are locked in. This will *not* involve any participation by Bob,
- // which ensures the commitment will remain in a pending state.
- alice.trySignNextCommitment()
- alice.assertNumPendingNumOpenCircuits(2, 2)
-
- // Restart Alice's link, which simulates a disconnection with the remote
- // peer.
- cleanUp = alice.restart(false)
- defer cleanUp()
-
- alice.assertNumPendingNumOpenCircuits(2, 2)
-
- // Make a second attempt to commit the first two circuits. This can
- // happen if the incoming link flaps, but also allows us to verify that
- // the circuits were trimmed properly.
- fwdActions = alice.commitCircuits(circuits[:halfHtlcs])
-
- // Since Alice has a pending commitment with the first two HTLCs, the
- // restart should not have trimmed them from the circuit map.
- // Therefore, we expect both of these circuits to be dropped by the
- // switch, as keystones should still be set.
- if len(fwdActions.Drops) != halfHtlcs {
- t.Fatalf("expected %d packets to be dropped", halfHtlcs)
- }
-
- // The resulting bandwidth should remain unchanged from before,
- // reflecting that Alice is paying both htlc amounts, in addition to
- // both htlc fees.
- assertLinkBandwidth(t, alice.link,
- aliceStartingBandwidth-halfHtlcs*(htlcAmt+htlcFee),
- )
-
- // Now, restart Alice's link *and* the entire switch. This will ensure
- // that entire circuit map is reloaded from disk, and we can now test
- // against the behavioral differences of committing circuits that
- // conflict with duplicate circuits after a restart.
- cleanUp = alice.restart(true)
- defer cleanUp()
-
- alice.assertNumPendingNumOpenCircuits(2, 2)
-
- // Alice should not send out any messages. Even though Alice has a
- // pending commitment transaction, channel reestablishment is not
- // enabled in this test.
- select {
- case <-alice.msgs:
- t.Fatalf("message should not have been sent by Alice")
- case <-time.After(time.Second):
- }
-
- // We will now try to commit the circuits for all of our HTLCs. The
- // first two are already on the pending commitment transaction, the
- // latter two are new HTLCs.
- fwdActions = alice.commitCircuits(circuits)
-
- // The first two circuits should have been dropped, as they are still on
- // the pending commitment transaction, and the restart should not have
- // trimmed the circuits for these valid HTLCs.
- if len(fwdActions.Drops) != halfHtlcs {
- t.Fatalf("expected %d packets to be dropped", halfHtlcs)
- }
- // The latter two circuits are unknown the circuit map, and should
- // report being added.
- if len(fwdActions.Adds) != halfHtlcs {
- t.Fatalf("expected %d packets to be added", halfHtlcs)
- }
-
- // Deliver the latter two HTLCs to Alice's links so that they can be
- // processed and added to the in-memory commitment state.
- for _, addPkt := range addPkts[halfHtlcs:] {
- if err := alice.link.HandleSwitchPacket(addPkt); err != nil {
- t.Fatalf("unable to handle switch packet: %v", err)
- }
- }
-
- // Wait for Alice to send the two latter HTLCs via the peer.
- alice.checkSent(addPkts[halfHtlcs:])
-
- // With two HTLCs on the pending commit, and two added to the in-memory
- // commitment state, the resulting bandwidth should reflect that Alice
- // is paying the all htlc amounts in addition to all htlc fees.
- assertLinkBandwidth(t, alice.link,
- aliceStartingBandwidth-numHtlcs*(htlcAmt+htlcFee),
- )
-
- // We will try to initiate a state transition for Alice, which will
- // ensure the circuits for the two in-memory HTLCs are opened. However,
- // since we have a pending commitment, these HTLCs will not actually be
- // included in a commitment.
- alice.trySignNextCommitment()
- alice.assertNumPendingNumOpenCircuits(4, 4)
-
- // Restart Alice's link to simulate a disconnect. Since the switch
- // remains up throughout, the two latter HTLCs will remain in the link's
- // mailbox, and will reprocessed upon being reattached to the link.
- cleanUp = alice.restart(false)
- defer cleanUp()
-
- alice.assertNumPendingNumOpenCircuits(4, 2)
-
- // Again, try to recommit all of our circuits.
- fwdActions = alice.commitCircuits(circuits)
-
- // It is expected that all of these will get dropped by the switch.
- // The first two circuits are still open as a result of being on the
- // commitment transaction. The latter two should have had their open
- // circuits trimmed, *but* since the HTLCs are still in Alice's mailbox,
- // the switch knows not to fail them as a result of the latter two
- // circuits never having been loaded from disk.
- if len(fwdActions.Drops) != numHtlcs {
- t.Fatalf("expected %d packets to be dropped", numHtlcs)
- }
-
- // Wait for the latter two htlcs to be pulled from the mailbox, added to
- // the in-memory channel state, and sent out via the peer.
- alice.checkSent(addPkts[halfHtlcs:])
-
- // This should result in reconstructing the same bandwidth as our last
- // assertion. There are two HTLCs on the pending commit, and two added
- // to the in-memory commitment state, the resulting bandwidth should
- // reflect that Alice is paying the all htlc amounts in addition to all
- // htlc fees.
- assertLinkBandwidth(t, alice.link,
- aliceStartingBandwidth-numHtlcs*(htlcAmt+htlcFee),
- )
-
- // Again, we will try to initiate a state transition for Alice, which
- // will ensure the circuits for the two in-memory HTLCs are opened.
- // As before, these HTLCs will not actually be included in a commitment
- // since we have a pending commitment.
- alice.trySignNextCommitment()
- alice.assertNumPendingNumOpenCircuits(4, 4)
-
- // As a final persistence check, we will restart the link and switch,
- // wiping the latter two HTLCs from memory, and forcing their circuits
- // to be reloaded from disk.
- cleanUp = alice.restart(true)
- defer cleanUp()
-
- alice.assertNumPendingNumOpenCircuits(4, 2)
-
- // Alice's mailbox will be empty after the restart, and no channel
- // reestablishment is configured, so no messages will be sent upon
- // restart.
- select {
- case <-alice.msgs:
- t.Fatalf("message should not have been sent by Alice")
- case <-time.After(time.Second):
- }
-
- // Finally, make one last attempt to commit all circuits.
- fwdActions = alice.commitCircuits(circuits)
-
- // The first two HTLCs should still be dropped by the htlcswitch. Their
- // existence on the pending commitment transaction should prevent their
- // open circuits from being trimmed.
- if len(fwdActions.Drops) != halfHtlcs {
- t.Fatalf("expected %d packets to be dropped", halfHtlcs)
- }
- // The latter two HTLCs should now be failed by the switch. These will
- // have been trimmed by the link or switch restarting, and since the
- // HTLCs are known to be lost from memory (since their circuits were
- // loaded from disk), it is safe fail them back as they won't ever be
- // delivered to the outgoing link.
- if len(fwdActions.Fails) != halfHtlcs {
- t.Fatalf("expected %d packets to be dropped", halfHtlcs)
- }
-
- // Since the latter two HTLCs have been completely dropped from memory,
- // only the first two HTLCs we added should still be reflected in the
- // channel bandwidth.
- assertLinkBandwidth(t, alice.link,
- aliceStartingBandwidth-halfHtlcs*(htlcAmt+htlcFee),
- )
-}
-
-// TestChannelLinkTrimCircuitsNoCommit checks that the switch and link properly trim
-// circuits if the ADDs corresponding to open circuits are never committed.
-func TestChannelLinkTrimCircuitsNoCommit(t *testing.T) {
- if !build.IsDevBuild() {
- t.Fatalf("htlcswitch tests must be run with '-tags debug")
- }
-
- t.Parallel()
-
- chanAmt := btcutil.UnitsPerCoin() * 5
- const (
- numHtlcs = 4
- halfHtlcs = numHtlcs / 2
- )
-
- // We'll start by creating a new link with our chanAmt (5 BTC). We will
- // only be testing Alice's behavior, so the reference to Bob's channel
- // state is unnecessary.
- aliceLink, _, batchTicker, start, cleanUp, restore, err :=
- newSingleLinkTestHarness(chanAmt, 0)
- if err != nil {
- t.Fatalf("unable to create link: %v", err)
- }
- defer cleanUp()
-
- if err := start(); err != nil {
- t.Fatalf("unable to start test harness: %v", err)
- }
-
- alice := newPersistentLinkHarness(
- t, aliceLink, batchTicker, restore,
- )
-
- // We'll put Alice into hodl.Commit mode, such that the circuits for any
- // outgoing ADDs are opened, but the changes are not committed in the
- // channel state.
- alice.coreLink.cfg.HodlMask = hodl.Commit.Mask()
-
- // Compute the static fees that will be used to determine the
- // correctness of Alice's bandwidth when forwarding HTLCs.
- estimator := chainfee.NewStaticEstimator(6000, 0)
- feePerKw, err := estimator.EstimateFeePerKW(1)
- if err != nil {
- t.Fatalf("unable to query fee estimator: %v", err)
- }
-
- defaultCommitFee := alice.channel.StateSnapshot().CommitFee
- htlcFee := lnwire.NewMSatFromSatoshis(
- feePerKw.FeeForWeight(input.HTLCWeight),
- )
-
- // The starting bandwidth of the channel should be exactly the amount
- // that we created the channel between her and Bob, minus the commitment
- // fee and fee for adding an additional HTLC.
- expectedBandwidth := lnwire.NewMSatFromSatoshis(
- chanAmt-defaultCommitFee,
- ) - htlcFee
- assertLinkBandwidth(t, alice.link, expectedBandwidth)
-
- // Capture Alice's starting bandwidth to perform later, relative
- // bandwidth assertions.
- aliceStartingBandwidth := alice.link.Bandwidth()
-
- // Next, we'll create an HTLC worth 1 BTC that will be used as a dummy
- // message for the test.
- var mockBlob [lnwire.OnionPacketSize]byte
- htlcAmt := lnwire.NewMSatFromSatoshis(btcutil.UnitsPerCoin())
- _, htlc, _, err := generatePayment(htlcAmt, htlcAmt, 5, mockBlob)
- if err != nil {
- t.Fatalf("unable to create payment: %v", err)
- }
-
- // Create `numHtlc` htlcPackets and payment circuits that will be used
- // to drive the test. All of the packets will use the same dummy HTLC.
- addPkts, circuits := genAddsAndCircuits(numHtlcs, htlc)
-
- // To begin the test, start by committing the circuits belong to our
- // first two HTLCs.
- fwdActions := alice.commitCircuits(circuits[:halfHtlcs])
-
- // Both of these circuits should have successfully added, as this is the
- // first attempt to send them.
- if len(fwdActions.Adds) != halfHtlcs {
- t.Fatalf("expected %d circuits to be added", halfHtlcs)
- }
-
- // Since both were committed successfully, we will now deliver them to
- // Alice's link.
- for _, addPkt := range addPkts[:halfHtlcs] {
- if err := alice.link.HandleSwitchPacket(addPkt); err != nil {
- t.Fatalf("unable to handle switch packet: %v", err)
- }
- }
-
- // Wait until Alice's link has sent both HTLCs via the peer.
- alice.checkSent(addPkts[:halfHtlcs])
-
- // The resulting bandwidth should reflect that Alice is paying both
- // htlc amounts, in addition to both htlc fees.
- assertLinkBandwidth(t, alice.link,
- aliceStartingBandwidth-halfHtlcs*(htlcAmt+htlcFee),
- )
-
- alice.assertNumPendingNumOpenCircuits(2, 0)
-
- // Now, init a state transition by Alice to try and commit the HTLCs.
- // Since she is in hodl.Commit mode, this will fail, but the circuits
- // will be opened persistently.
- alice.trySignNextCommitment()
-
- alice.assertNumPendingNumOpenCircuits(2, 2)
-
- // Restart Alice's link, which simulates a disconnection with the remote
- // peer. Alice's link and switch should trim the circuits that were
- // opened but not committed.
- cleanUp = alice.restart(false, hodl.Commit)
- defer cleanUp()
-
- alice.assertNumPendingNumOpenCircuits(2, 0)
-
- // The first two HTLCs should have been reset in Alice's mailbox since
- // the switch was not shutdown. Knowing this the switch should drop the
- // two circuits, even if the circuits were trimmed.
- fwdActions = alice.commitCircuits(circuits[:halfHtlcs])
- if len(fwdActions.Drops) != halfHtlcs {
- t.Fatalf("expected %d packets to be dropped since "+
- "the switch has not been restarted", halfHtlcs)
- }
-
- // Wait for alice to process the first two HTLCs resend them via the
- // peer.
- alice.checkSent(addPkts[:halfHtlcs])
-
- // The resulting bandwidth should reflect that Alice is paying both htlc
- // amounts, in addition to both htlc fees.
- assertLinkBandwidth(t, alice.link,
- aliceStartingBandwidth-halfHtlcs*(htlcAmt+htlcFee),
- )
-
- // Again, initiate another state transition by Alice to try and commit
- // the HTLCs. Since she is in hodl.Commit mode, this will fail, but the
- // circuits will be opened persistently.
- alice.trySignNextCommitment()
- alice.assertNumPendingNumOpenCircuits(2, 2)
-
- // Now, we we will do a full restart of the link and switch, configuring
- // Alice again in hodl.Commit mode. Since none of the HTLCs were
- // actually committed, the previously opened circuits should be trimmed
- // by both the link and switch.
- cleanUp = alice.restart(true, hodl.Commit)
- defer cleanUp()
-
- alice.assertNumPendingNumOpenCircuits(2, 0)
-
- // Attempt another commit of our first two circuits. Both should fail,
- // as the opened circuits should have been trimmed, and circuit map
- // recognizes that these HTLCs were lost during the restart.
- fwdActions = alice.commitCircuits(circuits[:halfHtlcs])
- if len(fwdActions.Fails) != halfHtlcs {
- t.Fatalf("expected %d packets to be failed", halfHtlcs)
- }
-
- // Bob should not receive any HTLCs from Alice, since Alice's mailbox is
- // empty and there is no pending commitment.
- select {
- case <-alice.msgs:
- t.Fatalf("received unexpected message from Alice")
- case <-time.After(time.Second):
- }
-
- // Alice's bandwidth should have reverted back to her starting value.
- assertLinkBandwidth(t, alice.link, aliceStartingBandwidth)
-
- // Now, try to commit the last two payment circuits, which are unused
- // thus far. These should succeed without hesitation.
- fwdActions = alice.commitCircuits(circuits[halfHtlcs:])
- if len(fwdActions.Adds) != halfHtlcs {
- t.Fatalf("expected %d packets to be added", halfHtlcs)
- }
-
- // Deliver the last two HTLCs to the link via Alice's mailbox.
- for _, addPkt := range addPkts[halfHtlcs:] {
- if err := alice.link.HandleSwitchPacket(addPkt); err != nil {
- t.Fatalf("unable to handle switch packet: %v", err)
- }
- }
-
- // Verify that Alice processed and sent out the ADD packets via the
- // peer.
- alice.checkSent(addPkts[halfHtlcs:])
-
- // The resulting bandwidth should reflect that Alice is paying both htlc
- // amounts, in addition to both htlc fees.
- assertLinkBandwidth(t, alice.link,
- aliceStartingBandwidth-halfHtlcs*(htlcAmt+htlcFee),
- )
-
- // Now, initiate a state transition for Alice. Since we are hodl.Commit
- // mode, this will only open the circuits that were added to the
- // in-memory channel state.
- alice.trySignNextCommitment()
- alice.assertNumPendingNumOpenCircuits(4, 2)
-
- // Restart Alice's link, and place her back in hodl.Commit mode. On
- // restart, all previously opened circuits should be trimmed by both the
- // link and the switch.
- cleanUp = alice.restart(false, hodl.Commit)
- defer cleanUp()
-
- alice.assertNumPendingNumOpenCircuits(4, 0)
-
- // Now, try to commit all of known circuits.
- fwdActions = alice.commitCircuits(circuits)
-
- // The first two HTLCs will fail to commit for the same reason as
- // before, the circuits have been trimmed.
- if len(fwdActions.Fails) != halfHtlcs {
- t.Fatalf("expected %d packet to be failed", halfHtlcs)
- }
-
- // The last two HTLCs will be dropped, as thought the circuits are
- // trimmed, the switch is aware that the HTLCs are still in Alice's
- // mailbox.
- if len(fwdActions.Drops) != halfHtlcs {
- t.Fatalf("expected %d packet to be dropped", halfHtlcs)
- }
-
- // Wait until Alice reprocesses the last two HTLCs and sends them via
- // the peer.
- alice.checkSent(addPkts[halfHtlcs:])
-
- // Her bandwidth should now reflect having sent only those two HTLCs.
- assertLinkBandwidth(t, alice.link,
- aliceStartingBandwidth-halfHtlcs*(htlcAmt+htlcFee),
- )
-
- // Now, initiate a state transition for Alice. Since we are hodl.Commit
- // mode, this will only open the circuits that were added to the
- // in-memory channel state.
- alice.trySignNextCommitment()
- alice.assertNumPendingNumOpenCircuits(4, 2)
-
- // Finally, do one last restart of both the link and switch. This will
- // flush the HTLCs from the mailbox. The circuits should now be trimmed
- // for all of the HTLCs.
- cleanUp = alice.restart(true, hodl.Commit)
- defer cleanUp()
-
- alice.assertNumPendingNumOpenCircuits(4, 0)
-
- // Bob should not receive any HTLCs from Alice, as none of the HTLCs are
- // in Alice's mailbox, and channel reestablishment is disabled.
- select {
- case <-alice.msgs:
- t.Fatalf("received unexpected message from Alice")
- case <-time.After(time.Second):
- }
-
- // Attempt to commit the last two circuits, both should now fail since
- // though they were opened before shutting down, the circuits have been
- // properly trimmed.
- fwdActions = alice.commitCircuits(circuits[halfHtlcs:])
- if len(fwdActions.Fails) != halfHtlcs {
- t.Fatalf("expected %d packet to be failed", halfHtlcs)
- }
-
- // Alice balance should not have changed since the start.
- assertLinkBandwidth(t, alice.link, aliceStartingBandwidth)
-}
-
-// TestChannelLinkTrimCircuitsRemoteCommit checks that the switch and link
-// don't trim circuits if the ADD is locked in on the remote commitment but
-// not on our local commitment.
-func TestChannelLinkTrimCircuitsRemoteCommit(t *testing.T) {
- t.Parallel()
-
- chanAmt := btcutil.UnitsPerCoin() * 5
- const (
- numHtlcs = 2
- )
-
- // We'll start by creating a new link with our chanAmt (5 BTC).
- aliceLink, bobChan, batchTicker, start, cleanUp, restore, err :=
- newSingleLinkTestHarness(chanAmt, 0)
- if err != nil {
- t.Fatalf("unable to create link: %v", err)
- }
-
- if err := start(); err != nil {
- t.Fatalf("unable to start test harness: %v", err)
- }
- defer cleanUp()
-
- alice := newPersistentLinkHarness(
- t, aliceLink, batchTicker, restore,
- )
-
- // Compute the static fees that will be used to determine the
- // correctness of Alice's bandwidth when forwarding HTLCs.
- estimator := chainfee.NewStaticEstimator(6000, 0)
- feePerKw, err := estimator.EstimateFeePerKW(1)
- if err != nil {
- t.Fatalf("unable to query fee estimator: %v", err)
- }
-
- defaultCommitFee := alice.channel.StateSnapshot().CommitFee
- htlcFee := lnwire.NewMSatFromSatoshis(
- feePerKw.FeeForWeight(input.HTLCWeight),
- )
-
- // The starting bandwidth of the channel should be exactly the amount
- // that we created the channel between her and Bob, minus the commitment
- // fee and fee of adding an HTLC.
- expectedBandwidth := lnwire.NewMSatFromSatoshis(
- chanAmt-defaultCommitFee,
- ) - htlcFee
- assertLinkBandwidth(t, alice.link, expectedBandwidth)
-
- // Capture Alice's starting bandwidth to perform later, relative
- // bandwidth assertions.
- aliceStartingBandwidth := alice.link.Bandwidth()
-
- // Next, we'll create an HTLC worth 1 BTC that will be used as a dummy
- // message for the test.
- var mockBlob [lnwire.OnionPacketSize]byte
- htlcAmt := lnwire.NewMSatFromSatoshis(btcutil.UnitsPerCoin())
- _, htlc, _, err := generatePayment(htlcAmt, htlcAmt, 5, mockBlob)
- if err != nil {
- t.Fatalf("unable to create payment: %v", err)
- }
-
- // Create `numHtlc` htlcPackets and payment circuits that will be used
- // to drive the test. All of the packets will use the same dummy HTLC.
- addPkts, circuits := genAddsAndCircuits(numHtlcs, htlc)
-
- // To begin the test, start by committing the circuits for our first two
- // HTLCs.
- fwdActions := alice.commitCircuits(circuits)
-
- // Both of these circuits should have successfully added, as this is the
- // first attempt to send them.
- if len(fwdActions.Adds) != numHtlcs {
- t.Fatalf("expected %d circuits to be added", numHtlcs)
- }
- alice.assertNumPendingNumOpenCircuits(2, 0)
-
- // Since both were committed successfully, we will now deliver them to
- // Alice's link.
- for _, addPkt := range addPkts {
- if err := alice.link.HandleSwitchPacket(addPkt); err != nil {
- t.Fatalf("unable to handle switch packet: %v", err)
- }
- }
-
- // Wait until Alice's link has sent both HTLCs via the peer.
- alice.checkSent(addPkts)
-
- // Pass both of the htlcs to Bob.
- for i, addPkt := range addPkts {
- pkt, ok := addPkt.htlc.(*lnwire.UpdateAddHTLC)
- if !ok {
- t.Fatalf("unable to add packet")
- }
-
- pkt.ID = uint64(i)
-
- _, err := bobChan.ReceiveHTLC(pkt)
- if err != nil {
- t.Fatalf("unable to receive htlc: %v", err)
- }
- }
-
- // The resulting bandwidth should reflect that Alice is paying both
- // htlc amounts, in addition to both htlc fees.
- assertLinkBandwidth(t, alice.link,
- aliceStartingBandwidth-numHtlcs*(htlcAmt+htlcFee),
- )
-
- // Now, initiate a state transition by Alice so that the pending HTLCs
- // are locked in.
- alice.trySignNextCommitment()
- alice.assertNumPendingNumOpenCircuits(2, 2)
-
- select {
- case aliceMsg := <-alice.msgs:
- // Pass the commitment signature to Bob.
- sig, ok := aliceMsg.(*lnwire.CommitSig)
- if !ok {
- t.Fatalf("alice did not send commitment signature")
- }
-
- err := bobChan.ReceiveNewCommitment(sig.CommitSig, sig.HtlcSigs)
- if err != nil {
- t.Fatalf("unable to receive new commitment: %v", err)
- }
- case <-time.After(time.Second):
- }
-
- // Next, revoke Bob's current commitment and send it to Alice so that we
- // can test that Alice's circuits aren't trimmed.
- rev, _, err := bobChan.RevokeCurrentCommitment()
- if err != nil {
- t.Fatalf("unable to revoke current commitment: %v", err)
- }
-
- _, _, _, _, err = alice.channel.ReceiveRevocation(rev)
- if err != nil {
- t.Fatalf("unable to receive revocation: %v", err)
- }
-
- // Restart Alice's link, which simulates a disconnection with the remote
- // peer.
- cleanUp = alice.restart(false)
- defer cleanUp()
-
- alice.assertNumPendingNumOpenCircuits(2, 2)
-
- // Restart the link + switch and check that the number of open circuits
- // doesn't change.
- cleanUp = alice.restart(true)
- defer cleanUp()
-
- alice.assertNumPendingNumOpenCircuits(2, 2)
-}
-
-// TestChannelLinkBandwidthChanReserve checks that the bandwidth available
-// on the channel link reflects the channel reserve that must be kept
-// at all times.
-func TestChannelLinkBandwidthChanReserve(t *testing.T) {
- t.Parallel()
-
- // First start a link that has a balance greater than it's
- // channel reserve.
- chanAmt := btcutil.UnitsPerCoin() * 5
- chanReserve := btcutil.UnitsPerCoin() * 1
- aliceLink, bobChannel, batchTimer, start, cleanUp, _, err :=
- newSingleLinkTestHarness(chanAmt, chanReserve)
- if err != nil {
- t.Fatalf("unable to create link: %v", err)
- }
- defer cleanUp()
-
- if err := start(); err != nil {
- t.Fatalf("unable to start test harness: %v", err)
- }
-
- var (
- mockBlob [lnwire.OnionPacketSize]byte
- coreLink = aliceLink.(*channelLink)
- coreChan = coreLink.channel
- defaultCommitFee = coreChan.StateSnapshot().CommitFee
- aliceStartingBandwidth = aliceLink.Bandwidth()
- aliceMsgs = coreLink.cfg.Peer.(*mockPeer).sentMsgs
- )
-
- estimator := chainfee.NewStaticEstimator(6000, 0)
- feePerKw, err := estimator.EstimateFeePerKW(1)
- if err != nil {
- t.Fatalf("unable to query fee estimator: %v", err)
- }
- htlcFee := lnwire.NewMSatFromSatoshis(
- feePerKw.FeeForWeight(input.HTLCWeight),
- )
-
- // The starting bandwidth of the channel should be exactly the amount
- // that we created the channel between her and Bob, minus the channel
- // reserve, commitment fee and fee for adding an additional HTLC.
- expectedBandwidth := lnwire.NewMSatFromSatoshis(
- chanAmt-defaultCommitFee-chanReserve) - htlcFee
- assertLinkBandwidth(t, aliceLink, expectedBandwidth)
-
- // Next, we'll create an HTLC worth 3 BTC, and send it into the link as
- // a switch initiated payment. The resulting bandwidth should
- // now be decremented to reflect the new HTLC.
- htlcAmt := lnwire.NewMSatFromSatoshis(3 * btcutil.UnitsPerCoin())
- invoice, htlc, _, err := generatePayment(htlcAmt, htlcAmt, 5, mockBlob)
- if err != nil {
- t.Fatalf("unable to create payment: %v", err)
- }
-
- addPkt := &htlcPacket{
- htlc: htlc,
- obfuscator: NewMockObfuscator(),
- }
- circuit := makePaymentCircuit(&htlc.PaymentHash, addPkt)
- _, err = coreLink.cfg.Switch.commitCircuits(&circuit)
- if err != nil {
- t.Fatalf("unable to commit circuit: %v", err)
- }
-
- aliceLink.HandleSwitchPacket(addPkt)
- time.Sleep(time.Millisecond * 100)
- assertLinkBandwidth(t, aliceLink, aliceStartingBandwidth-htlcAmt-htlcFee)
-
- // Alice should send the HTLC to Bob.
- var msg lnwire.Message
- select {
- case msg = <-aliceMsgs:
- case <-time.After(15 * time.Second):
- t.Fatalf("did not receive message")
- }
-
- addHtlc, ok := msg.(*lnwire.UpdateAddHTLC)
- if !ok {
- t.Fatalf("expected UpdateAddHTLC, got %T", msg)
- }
-
- bobIndex, err := bobChannel.ReceiveHTLC(addHtlc)
- if err != nil {
- t.Fatalf("bob failed receiving htlc: %v", err)
- }
-
- // Lock in the HTLC.
- if err := updateState(batchTimer, coreLink, bobChannel, true); err != nil {
- t.Fatalf("unable to update state: %v", err)
- }
-
- assertLinkBandwidth(t, aliceLink, aliceStartingBandwidth-htlcAmt-htlcFee)
-
- // If we now send in a valid HTLC settle for the prior HTLC we added,
- // then the bandwidth should remain unchanged as the remote party will
- // gain additional channel balance.
- err = bobChannel.SettleHTLC(*invoice.Terms.PaymentPreimage, bobIndex, nil, nil, nil)
- if err != nil {
- t.Fatalf("unable to settle htlc: %v", err)
- }
- htlcSettle := &lnwire.UpdateFulfillHTLC{
- ID: bobIndex,
- PaymentPreimage: *invoice.Terms.PaymentPreimage,
- }
- aliceLink.HandleChannelUpdate(htlcSettle)
- time.Sleep(time.Millisecond * 500)
-
- // Since the settle is not locked in yet, Alice's bandwidth should still
- // reflect that she has to pay the fee.
- assertLinkBandwidth(t, aliceLink, aliceStartingBandwidth-htlcAmt-htlcFee)
-
- // Lock in the settle.
- if err := updateState(batchTimer, coreLink, bobChannel, false); err != nil {
- t.Fatalf("unable to update state: %v", err)
- }
-
- time.Sleep(time.Millisecond * 100)
- assertLinkBandwidth(t, aliceLink, aliceStartingBandwidth-htlcAmt)
-
- // Now we create a channel that has a channel reserve that is
- // greater than it's balance. In these case only payments can
- // be received on this channel, not sent. The available bandwidth
- // should therefore be 0.
- bobChanAmt := btcutil.UnitsPerCoin() * 1
- bobChanReserve := btcutil.Amount(btcutil.UnitsPerCoinF() * 1.5)
- bobLink, _, _, start, bobCleanUp, _, err :=
- newSingleLinkTestHarness(bobChanAmt, bobChanReserve)
- if err != nil {
- t.Fatalf("unable to create link: %v", err)
- }
- defer bobCleanUp()
-
- if err := start(); err != nil {
- t.Fatalf("unable to start test harness: %v", err)
- }
-
- // Make sure bandwidth is reported as 0.
- assertLinkBandwidth(t, bobLink, 0)
-}
-
-// TestChannelRetransmission tests the ability of the channel links to
-// synchronize theirs states after abrupt disconnect.
-func TestChannelRetransmission(t *testing.T) {
- t.Parallel()
-
- retransmissionTests := []struct {
- name string
- messages []expectedMessage
- }{
- {
- // Tests the ability of the channel links states to be
- // synchronized after remote node haven't receive
- // revoke and ack message.
- name: "intercept last alice revoke_and_ack",
- messages: []expectedMessage{
- // First initialization of the channel.
- {"alice", "bob", &lnwire.ChannelReestablish{}, false},
- {"bob", "alice", &lnwire.ChannelReestablish{}, false},
-
- {"alice", "bob", &lnwire.FundingLocked{}, false},
- {"bob", "alice", &lnwire.FundingLocked{}, false},
-
- // Send payment from Alice to Bob and intercept
- // the last revocation message, in this case
- // Bob should not proceed the payment farther.
- {"alice", "bob", &lnwire.UpdateAddHTLC{}, false},
- {"alice", "bob", &lnwire.CommitSig{}, false},
- {"bob", "alice", &lnwire.RevokeAndAck{}, false},
- {"bob", "alice", &lnwire.CommitSig{}, false},
- {"alice", "bob", &lnwire.RevokeAndAck{}, true},
-
- // Reestablish messages exchange on nodes restart.
- {"alice", "bob", &lnwire.ChannelReestablish{}, false},
- {"bob", "alice", &lnwire.ChannelReestablish{}, false},
-
- // Alice should resend the revoke_and_ack
- // message to Bob because Bob claimed it in the
- // re-establish message.
- {"alice", "bob", &lnwire.RevokeAndAck{}, false},
-
- // Proceed the payment farther by sending the
- // fulfilment message and trigger the state
- // update.
- {"bob", "alice", &lnwire.UpdateFulfillHTLC{}, false},
- {"bob", "alice", &lnwire.CommitSig{}, false},
- {"alice", "bob", &lnwire.RevokeAndAck{}, false},
- {"alice", "bob", &lnwire.CommitSig{}, false},
- {"bob", "alice", &lnwire.RevokeAndAck{}, false},
- },
- },
- {
- // Tests the ability of the channel links states to be
- // synchronized after remote node haven't receive
- // revoke and ack message.
- name: "intercept bob revoke_and_ack commit_sig messages",
- messages: []expectedMessage{
- {"alice", "bob", &lnwire.ChannelReestablish{}, false},
- {"bob", "alice", &lnwire.ChannelReestablish{}, false},
-
- {"alice", "bob", &lnwire.FundingLocked{}, false},
- {"bob", "alice", &lnwire.FundingLocked{}, false},
-
- // Send payment from Alice to Bob and intercept
- // the last revocation message, in this case
- // Bob should not proceed the payment farther.
- {"alice", "bob", &lnwire.UpdateAddHTLC{}, false},
- {"alice", "bob", &lnwire.CommitSig{}, false},
-
- // Intercept bob commit sig and revoke and ack
- // messages.
- {"bob", "alice", &lnwire.RevokeAndAck{}, true},
- {"bob", "alice", &lnwire.CommitSig{}, true},
-
- // Reestablish messages exchange on nodes restart.
- {"alice", "bob", &lnwire.ChannelReestablish{}, false},
- {"bob", "alice", &lnwire.ChannelReestablish{}, false},
-
- // Bob should resend previously intercepted messages.
- {"bob", "alice", &lnwire.RevokeAndAck{}, false},
- {"bob", "alice", &lnwire.CommitSig{}, false},
-
- // Proceed the payment farther by sending the
- // fulfilment message and trigger the state
- // update.
- {"alice", "bob", &lnwire.RevokeAndAck{}, false},
- {"bob", "alice", &lnwire.UpdateFulfillHTLC{}, false},
- {"bob", "alice", &lnwire.CommitSig{}, false},
- {"alice", "bob", &lnwire.RevokeAndAck{}, false},
- {"alice", "bob", &lnwire.CommitSig{}, false},
- {"bob", "alice", &lnwire.RevokeAndAck{}, false},
- },
- },
- {
- // Tests the ability of the channel links states to be
- // synchronized after remote node haven't receive
- // update and commit sig messages.
- name: "intercept update add htlc and commit sig messages",
- messages: []expectedMessage{
- {"alice", "bob", &lnwire.ChannelReestablish{}, false},
- {"bob", "alice", &lnwire.ChannelReestablish{}, false},
-
- {"alice", "bob", &lnwire.FundingLocked{}, false},
- {"bob", "alice", &lnwire.FundingLocked{}, false},
-
- // Attempt make a payment from Alice to Bob,
- // which is intercepted, emulating the Bob
- // server abrupt stop.
- {"alice", "bob", &lnwire.UpdateAddHTLC{}, true},
- {"alice", "bob", &lnwire.CommitSig{}, true},
-
- // Restart of the nodes, and after that nodes
- // should exchange the reestablish messages.
- {"alice", "bob", &lnwire.ChannelReestablish{}, false},
- {"bob", "alice", &lnwire.ChannelReestablish{}, false},
-
- {"alice", "bob", &lnwire.FundingLocked{}, false},
- {"bob", "alice", &lnwire.FundingLocked{}, false},
-
- // After Bob has notified Alice that he didn't
- // receive updates Alice should re-send them.
- {"alice", "bob", &lnwire.UpdateAddHTLC{}, false},
- {"alice", "bob", &lnwire.CommitSig{}, false},
-
- {"bob", "alice", &lnwire.RevokeAndAck{}, false},
- {"bob", "alice", &lnwire.CommitSig{}, false},
- {"alice", "bob", &lnwire.RevokeAndAck{}, false},
-
- {"bob", "alice", &lnwire.UpdateFulfillHTLC{}, false},
- {"bob", "alice", &lnwire.CommitSig{}, false},
- {"alice", "bob", &lnwire.RevokeAndAck{}, false},
- {"alice", "bob", &lnwire.CommitSig{}, false},
- {"bob", "alice", &lnwire.RevokeAndAck{}, false},
- },
- },
- }
- paymentWithRestart := func(t *testing.T, messages []expectedMessage) {
- channels, cleanUp, restoreChannelsFromDb, err := createClusterChannels(
- btcutil.UnitsPerCoin()*5,
- btcutil.UnitsPerCoin()*5)
- if err != nil {
- t.Fatalf("unable to create channel: %v", err)
- }
- defer cleanUp()
-
- chanID := lnwire.NewChanIDFromOutPoint(channels.aliceToBob.ChannelPoint())
- serverErr := make(chan error, 4)
-
- aliceInterceptor := createInterceptorFunc("[alice] <-- [bob]",
- "alice", messages, chanID, false)
- bobInterceptor := createInterceptorFunc("[alice] --> [bob]",
- "bob", messages, chanID, false)
-
- ct := newConcurrentTester(t)
-
- // Add interceptor to check the order of Bob and Alice
- // messages.
- n := newThreeHopNetwork(ct,
- channels.aliceToBob, channels.bobToAlice,
- channels.bobToCarol, channels.carolToBob,
- testStartingHeight,
- )
- n.aliceServer.intersect(aliceInterceptor)
- n.bobServer.intersect(bobInterceptor)
- if err := n.start(); err != nil {
- ct.Fatalf("unable to start three hop network: %v", err)
- }
- defer n.stop()
-
- bobBandwidthBefore := n.firstBobChannelLink.Bandwidth()
- aliceBandwidthBefore := n.aliceChannelLink.Bandwidth()
-
- amount := lnwire.NewMSatFromSatoshis(btcutil.UnitsPerCoin())
- htlcAmt, totalTimelock, hops := generateHops(amount, testStartingHeight,
- n.firstBobChannelLink)
-
- // Send payment which should fail because we intercept the
- // update and commit messages.
- //
- // TODO(roasbeef); increase timeout?
- receiver := n.bobServer
- firstHop := n.firstBobChannelLink.ShortChanID()
- rhash, err := makePayment(
- n.aliceServer, receiver, firstHop, hops, amount,
- htlcAmt, totalTimelock,
- ).Wait(time.Second * 5)
- if err == nil {
- ct.Fatalf("payment shouldn't haven been finished")
- }
-
- // Stop network cluster and create new one, with the old
- // channels states. Also do the *hack* - save the payment
- // receiver to pass it in new channel link, otherwise payment
- // will be failed because of the unknown payment hash. Hack
- // will be removed with sphinx payment.
- bobRegistry := n.bobServer.registry
- n.stop()
-
- channels, err = restoreChannelsFromDb()
- if err != nil {
- ct.Fatalf("unable to restore channels from database: %v", err)
- }
-
- n = newThreeHopNetwork(ct, channels.aliceToBob, channels.bobToAlice,
- channels.bobToCarol, channels.carolToBob, testStartingHeight)
- n.firstBobChannelLink.cfg.Registry = bobRegistry
- n.aliceServer.intersect(aliceInterceptor)
- n.bobServer.intersect(bobInterceptor)
-
- if err := n.start(); err != nil {
- ct.Fatalf("unable to start three hop network: %v", err)
- }
- defer n.stop()
-
- // Wait for reestablishment to be proceeded and invoice to be settled.
- // TODO(andrew.shvv) Will be removed if we move the notification center
- // to the channel link itself.
-
- var invoice channeldb.Invoice
- for i := 0; i < 20; i++ {
- select {
- case <-time.After(time.Millisecond * 200):
- case serverErr := <-serverErr:
- ct.Fatalf("server error: %v", serverErr)
- }
-
- // Check that alice invoice wasn't settled and
- // bandwidth of htlc links hasn't been changed.
- invoice, err = receiver.registry.LookupInvoice(rhash)
- if err != nil {
- err = er.Errorf("unable to get invoice: %v", err)
- continue
- }
- if invoice.State != channeldb.ContractSettled {
- err = er.Errorf("alice invoice haven't been settled")
- continue
- }
-
- aliceExpectedBandwidth := aliceBandwidthBefore - htlcAmt
- if aliceExpectedBandwidth != n.aliceChannelLink.Bandwidth() {
- err = er.Errorf("expected alice to have %v, instead has %v",
- aliceExpectedBandwidth, n.aliceChannelLink.Bandwidth())
- continue
- }
-
- bobExpectedBandwidth := bobBandwidthBefore + htlcAmt
- if bobExpectedBandwidth != n.firstBobChannelLink.Bandwidth() {
- err = er.Errorf("expected bob to have %v, instead has %v",
- bobExpectedBandwidth, n.firstBobChannelLink.Bandwidth())
- continue
- }
-
- break
- }
-
- if err != nil {
- ct.Fatal(err)
- }
- }
-
- for _, test := range retransmissionTests {
- passed := t.Run(test.name, func(t *testing.T) {
- paymentWithRestart(t, test.messages)
- })
-
- if !passed {
- break
- }
- }
-
-}
-
-// TestShouldAdjustCommitFee tests the shouldAdjustCommitFee pivot function to
-// ensure that ie behaves properly. We should only update the fee if it
-// deviates from our current fee by more 10% or more.
-func TestShouldAdjustCommitFee(t *testing.T) {
- tests := []struct {
- netFee chainfee.SatPerKWeight
- chanFee chainfee.SatPerKWeight
- shouldAdjust bool
- }{
-
- // The network fee is 3x lower than the current commitment
- // transaction. As a result, we should adjust our fee to match
- // it.
- {
- netFee: 100,
- chanFee: 3000,
- shouldAdjust: true,
- },
-
- // The network fee is lower than the current commitment fee,
- // but only slightly so, so we won't update the commitment fee.
- {
- netFee: 2999,
- chanFee: 3000,
- shouldAdjust: false,
- },
-
- // The network fee is lower than the commitment fee, but only
- // right before it crosses our current threshold.
- {
- netFee: 1000,
- chanFee: 1099,
- shouldAdjust: false,
- },
-
- // The network fee is lower than the commitment fee, and within
- // our range of adjustment, so we should adjust.
- {
- netFee: 1000,
- chanFee: 1100,
- shouldAdjust: true,
- },
-
- // The network fee is 2x higher than our commitment fee, so we
- // should adjust upwards.
- {
- netFee: 2000,
- chanFee: 1000,
- shouldAdjust: true,
- },
-
- // The network fee is higher than our commitment fee, but only
- // slightly so, so we won't update.
- {
- netFee: 1001,
- chanFee: 1000,
- shouldAdjust: false,
- },
-
- // The network fee is higher than our commitment fee, but
- // hasn't yet crossed our activation threshold.
- {
- netFee: 1100,
- chanFee: 1099,
- shouldAdjust: false,
- },
-
- // The network fee is higher than our commitment fee, and
- // within our activation threshold, so we should update our
- // fee.
- {
- netFee: 1100,
- chanFee: 1000,
- shouldAdjust: true,
- },
-
- // Our fees match exactly, so we shouldn't update it at all.
- {
- netFee: 1000,
- chanFee: 1000,
- shouldAdjust: false,
- },
- }
-
- for i, test := range tests {
- adjustedFee := shouldAdjustCommitFee(
- test.netFee, test.chanFee,
- )
-
- if adjustedFee && !test.shouldAdjust {
- t.Fatalf("test #%v failed: net_fee=%v, "+
- "chan_fee=%v, adjust_expect=%v, adjust_returned=%v",
- i, test.netFee, test.chanFee, test.shouldAdjust,
- adjustedFee)
- }
- }
-}
-
-// TestChannelLinkShutdownDuringForward asserts that a link can be fully
-// stopped when it is trying to send synchronously through the switch. The
-// specific case this can occur is when a link forwards incoming Adds. We test
-// this by forcing the switch into a state where it will not accept new packets,
-// and then killing the link, which can only succeed if forwarding can be
-// canceled by a call to Stop.
-func TestChannelLinkShutdownDuringForward(t *testing.T) {
- t.Parallel()
-
- // First, we'll create our traditional three hop network. We're
- // interested in testing the ability to stop the link when it is
- // synchronously forwarding to the switch, which happens when an
- // incoming link forwards Adds. Thus, the test will be performed
- // against Bob's first link.
- channels, cleanUp, _, err := createClusterChannels(
- btcutil.UnitsPerCoin()*3,
- btcutil.UnitsPerCoin()*5)
- if err != nil {
- t.Fatalf("unable to create channel: %v", err)
- }
- defer cleanUp()
-
- n := newThreeHopNetwork(t, channels.aliceToBob, channels.bobToAlice,
- channels.bobToCarol, channels.carolToBob, testStartingHeight)
-
- if err := n.start(); err != nil {
- t.Fatal(err)
- }
- defer n.stop()
- defer n.feeEstimator.Stop()
-
- // Define a helper method that strobes the switch's log ticker, and
- // unblocks after nothing has been pulled for two seconds.
- waitForBobsSwitchToBlock := func() {
- bobSwitch := n.firstBobChannelLink.cfg.Switch
- ticker := bobSwitch.cfg.LogEventTicker.(*ticker.Force)
- timeout := time.After(15 * time.Second)
- for {
- time.Sleep(50 * time.Millisecond)
- select {
- case ticker.Force <- time.Now():
-
- case <-time.After(2 * time.Second):
- return
-
- case <-timeout:
- t.Fatalf("switch did not block")
- }
- }
- }
-
- // Define a helper method that strobes the link's batch ticker, and
- // unblocks after nothing has been pulled for two seconds.
- waitForBobsIncomingLinkToBlock := func() {
- ticker := n.firstBobChannelLink.cfg.BatchTicker.(*ticker.Force)
- timeout := time.After(15 * time.Second)
- for {
- time.Sleep(50 * time.Millisecond)
- select {
- case ticker.Force <- time.Now():
-
- case <-time.After(2 * time.Second):
- // We'll give a little extra time here, to
- // ensure that the packet is being pressed
- // against the htlcPlex.
- time.Sleep(50 * time.Millisecond)
- return
-
- case <-timeout:
- t.Fatalf("link did not block")
- }
- }
- }
-
- // To test that the cancellation is happening properly, we will set the
- // switch's htlcPlex to nil, so that calls to routeAsync block, and can
- // only exit if the link (or switch) is exiting. We will only be testing
- // the link here.
- //
- // In order to avoid data races, we need to ensure the switch isn't
- // selecting on that channel in the meantime. We'll prevent this by
- // first acquiring the index mutex and forcing a log event so that the
- // htlcForwarder is blocked inside the logTicker case, which also needs
- // the indexMtx.
- n.firstBobChannelLink.cfg.Switch.indexMtx.Lock()
-
- // Strobe the log ticker, and wait for switch to stop accepting any more
- // log ticks.
- waitForBobsSwitchToBlock()
-
- // While the htlcForwarder is blocked, swap out the htlcPlex with a nil
- // channel, and unlock the indexMtx to allow return to the
- // htlcForwarder's main select. After this, any attempt to forward
- // through the switch will block.
- n.firstBobChannelLink.cfg.Switch.htlcPlex = nil
- n.firstBobChannelLink.cfg.Switch.indexMtx.Unlock()
-
- // Now, make a payment from Alice to Carol, which should cause Bob's
- // incoming link to block when it tries to submit the packet to the nil
- // htlcPlex.
- amount := lnwire.NewMSatFromSatoshis(btcutil.UnitsPerCoin())
- htlcAmt, totalTimelock, hops := generateHops(
- amount, testStartingHeight,
- n.firstBobChannelLink, n.carolChannelLink,
- )
-
- firstHop := n.firstBobChannelLink.ShortChanID()
- makePayment(
- n.aliceServer, n.carolServer, firstHop, hops, amount, htlcAmt,
- totalTimelock,
- )
-
- // Strobe the batch ticker of Bob's incoming link, waiting for it to
- // become fully blocked.
- waitForBobsIncomingLinkToBlock()
-
- // Finally, stop the link to test that it can exit while synchronously
- // forwarding Adds to the switch.
- done := make(chan struct{})
- go func() {
- n.firstBobChannelLink.Stop()
- close(done)
- }()
-
- select {
- case <-time.After(3 * time.Second):
- t.Fatalf("unable to shutdown link while fwding incoming Adds")
- case <-done:
- }
-}
-
-// TestChannelLinkUpdateCommitFee tests that when a new block comes in, the
-// channel link properly checks to see if it should update the commitment fee.
-func TestChannelLinkUpdateCommitFee(t *testing.T) {
- t.Parallel()
-
- // First, we'll create our traditional three hop network. We'll only be
- // interacting with and asserting the state of two of the end points
- // for this test.
- aliceInitialBalance := btcutil.UnitsPerCoin() * 3
- channels, cleanUp, _, err := createClusterChannels(
- aliceInitialBalance, btcutil.UnitsPerCoin()*5,
- )
- if err != nil {
- t.Fatalf("unable to create channel: %v", err)
- }
- defer cleanUp()
-
- n := newThreeHopNetwork(t, channels.aliceToBob, channels.bobToAlice,
- channels.bobToCarol, channels.carolToBob, testStartingHeight)
-
- // First, we'll set up some message interceptors to ensure that the
- // proper messages are sent when updating fees.
- chanID := n.aliceChannelLink.ChanID()
- messages := []expectedMessage{
- {"alice", "bob", &lnwire.ChannelReestablish{}, false},
- {"bob", "alice", &lnwire.ChannelReestablish{}, false},
-
- {"alice", "bob", &lnwire.FundingLocked{}, false},
- {"bob", "alice", &lnwire.FundingLocked{}, false},
-
- // First fee update.
- {"alice", "bob", &lnwire.UpdateFee{}, false},
- {"alice", "bob", &lnwire.CommitSig{}, false},
- {"bob", "alice", &lnwire.RevokeAndAck{}, false},
- {"bob", "alice", &lnwire.CommitSig{}, false},
- {"alice", "bob", &lnwire.RevokeAndAck{}, false},
-
- // Second fee update.
- {"alice", "bob", &lnwire.UpdateFee{}, false},
- {"alice", "bob", &lnwire.CommitSig{}, false},
- {"bob", "alice", &lnwire.RevokeAndAck{}, false},
- {"bob", "alice", &lnwire.CommitSig{}, false},
- {"alice", "bob", &lnwire.RevokeAndAck{}, false},
- }
- n.aliceServer.intersect(createInterceptorFunc("[alice] <-- [bob]",
- "alice", messages, chanID, false))
- n.bobServer.intersect(createInterceptorFunc("[alice] --> [bob]",
- "bob", messages, chanID, false))
-
- if err := n.start(); err != nil {
- t.Fatal(err)
- }
- defer n.stop()
- defer n.feeEstimator.Stop()
-
- startingFeeRate := channels.aliceToBob.CommitFeeRate()
-
- // triggerFeeUpdate is a helper closure to determine whether a fee
- // update was triggered and completed properly.
- triggerFeeUpdate := func(feeEstimate, newFeeRate chainfee.SatPerKWeight,
- shouldUpdate bool) {
-
- t.Helper()
-
- // Record the fee rates before the links process the fee update
- // to test the case where a fee update isn't triggered.
- aliceBefore := channels.aliceToBob.CommitFeeRate()
- bobBefore := channels.bobToAlice.CommitFeeRate()
-
- // For the sake of this test, we'll reset the timer so that
- // Alice's link queries for a new network fee.
- n.aliceChannelLink.updateFeeTimer.Reset(time.Millisecond)
-
- // Next, we'll send the first fee rate response to Alice.
- select {
- case n.feeEstimator.byteFeeIn <- feeEstimate:
- case <-time.After(time.Second * 5):
- t.Fatalf("alice didn't query for the new network fee")
- }
-
- // Give the links some time to process the fee update.
- time.Sleep(time.Second)
-
- // Record the fee rates after the links have processed the fee
- // update and ensure they are correct based on whether a fee
- // update should have been triggered.
- aliceAfter := channels.aliceToBob.CommitFeeRate()
- bobAfter := channels.bobToAlice.CommitFeeRate()
-
- switch {
- case shouldUpdate && aliceAfter != newFeeRate:
- t.Fatalf("alice's fee rate didn't change: expected %v, "+
- "got %v", newFeeRate, aliceAfter)
-
- case shouldUpdate && bobAfter != newFeeRate:
- t.Fatalf("bob's fee rate didn't change: expected %v, "+
- "got %v", newFeeRate, bobAfter)
-
- case !shouldUpdate && aliceAfter != aliceBefore:
- t.Fatalf("alice's fee rate shouldn't have changed: "+
- "expected %v, got %v", aliceAfter, aliceAfter)
-
- case !shouldUpdate && bobAfter != bobBefore:
- t.Fatalf("bob's fee rate shouldn't have changed: "+
- "expected %v, got %v", bobBefore, bobAfter)
- }
- }
-
- // Triggering the link to update the fee of the channel with the same
- // fee rate should not send a fee update.
- triggerFeeUpdate(startingFeeRate, startingFeeRate, false)
-
- // Triggering the link to update the fee of the channel with a much
- // larger fee rate _should_ send a fee update.
- newFeeRate := startingFeeRate * 3
- triggerFeeUpdate(newFeeRate, newFeeRate, true)
-
- // Triggering the link to update the fee of the channel with a fee rate
- // that exceeds its maximum fee allocation should result in a fee rate
- // corresponding to the maximum fee allocation.
- const maxFeeRate chainfee.SatPerKWeight = 207182320
- triggerFeeUpdate(maxFeeRate+1, maxFeeRate, true)
-}
-
-// TestChannelLinkAcceptDuplicatePayment tests that if a link receives an
-// incoming HTLC for a payment we have already settled, then it accepts the
-// HTLC. We do this to simplify the processing of settles after restarts or
-// failures, reducing ambiguity when a batch is only partially processed.
-func TestChannelLinkAcceptDuplicatePayment(t *testing.T) {
- t.Parallel()
-
- // First, we'll create our traditional three hop network. We'll only be
- // interacting with and asserting the state of two of the end points
- // for this test.
- channels, cleanUp, _, err := createClusterChannels(
- btcutil.UnitsPerCoin()*3,
- btcutil.UnitsPerCoin()*5)
- if err != nil {
- t.Fatalf("unable to create channel: %v", err)
- }
- defer cleanUp()
-
- n := newThreeHopNetwork(t, channels.aliceToBob, channels.bobToAlice,
- channels.bobToCarol, channels.carolToBob, testStartingHeight)
- if err := n.start(); err != nil {
- t.Fatalf("unable to start three hop network: %v", err)
- }
- defer n.stop()
-
- amount := lnwire.NewMSatFromSatoshis(btcutil.UnitsPerCoin())
-
- // We'll start off by making a payment from Alice to Carol. We'll
- // manually generate this request so we can control all the parameters.
- htlcAmt, totalTimelock, hops := generateHops(amount, testStartingHeight,
- n.firstBobChannelLink, n.carolChannelLink)
- blob, err := generateRoute(hops...)
- if err != nil {
- t.Fatal(err)
- }
- invoice, htlc, pid, err := generatePayment(
- amount, htlcAmt, totalTimelock, blob,
- )
- if err != nil {
- t.Fatal(err)
- }
-
- err = n.carolServer.registry.AddInvoice(*invoice, htlc.PaymentHash)
- if err != nil {
- t.Fatalf("unable to add invoice in carol registry: %v", err)
- }
-
- // With the invoice now added to Carol's registry, we'll send the
- // payment.
- err = n.aliceServer.htlcSwitch.SendHTLC(
- n.firstBobChannelLink.ShortChanID(), pid, htlc,
- )
- if err != nil {
- t.Fatalf("unable to send payment to carol: %v", err)
- }
-
- resultChan, err := n.aliceServer.htlcSwitch.GetPaymentResult(
- pid, htlc.PaymentHash, newMockDeobfuscator(),
- )
- if err != nil {
- t.Fatalf("unable to get payment result: %v", err)
- }
-
- // Now, if we attempt to send the payment *again* it should be rejected
- // as it's a duplicate request.
- err = n.aliceServer.htlcSwitch.SendHTLC(
- n.firstBobChannelLink.ShortChanID(), pid, htlc,
- )
- if !ErrDuplicateAdd.Is(err) {
- t.Fatalf("ErrDuplicateAdd should have been "+
- "received got: %v", err)
- }
-
- select {
- case result, ok := <-resultChan:
- if !ok {
- t.Fatalf("unexpected shutdown")
- }
-
- if result.Error != nil {
- t.Fatalf("payment failed: %v", result.Error)
- }
- case <-time.After(5 * time.Second):
- t.Fatalf("payment result did not arrive")
- }
-}
-
-// TestChannelLinkAcceptOverpay tests that if we create an invoice for sender,
-// and the sender sends *more* than specified in the invoice, then we'll still
-// accept it and settle as normal.
-func TestChannelLinkAcceptOverpay(t *testing.T) {
- t.Parallel()
-
- // First, we'll create our traditional three hop network. We'll only be
- // interacting with and asserting the state of two of the end points
- // for this test.
- channels, cleanUp, _, err := createClusterChannels(
- btcutil.UnitsPerCoin()*3,
- btcutil.UnitsPerCoin()*5)
- if err != nil {
- t.Fatalf("unable to create channel: %v", err)
- }
- defer cleanUp()
-
- n := newThreeHopNetwork(t, channels.aliceToBob, channels.bobToAlice,
- channels.bobToCarol, channels.carolToBob, testStartingHeight)
- if err := n.start(); err != nil {
- t.Fatalf("unable to start three hop network: %v", err)
- }
- defer n.stop()
-
- carolBandwidthBefore := n.carolChannelLink.Bandwidth()
- firstBobBandwidthBefore := n.firstBobChannelLink.Bandwidth()
- secondBobBandwidthBefore := n.secondBobChannelLink.Bandwidth()
- aliceBandwidthBefore := n.aliceChannelLink.Bandwidth()
-
- // We'll request a route to send 10k satoshis via Alice -> Bob ->
- // Carol.
- amount := lnwire.NewMSatFromSatoshis(btcutil.UnitsPerCoin())
- htlcAmt, totalTimelock, hops := generateHops(
- amount, testStartingHeight,
- n.firstBobChannelLink, n.carolChannelLink,
- )
-
- // When we actually go to send the payment, we'll actually create an
- // invoice at Carol for only half of this amount.
- receiver := n.carolServer
- firstHop := n.firstBobChannelLink.ShortChanID()
- rhash, err := makePayment(
- n.aliceServer, n.carolServer, firstHop, hops, amount/2, htlcAmt,
- totalTimelock,
- ).Wait(30 * time.Second)
- if err != nil {
- t.Fatalf("unable to send payment: %v", err)
- }
-
- // Wait for Alice and Bob's second link to receive the revocation.
- time.Sleep(2 * time.Second)
-
- // Even though we sent 2x what was asked for, Carol should still have
- // accepted the payment and marked it as settled.
- invoice, err := receiver.registry.LookupInvoice(rhash)
- if err != nil {
- t.Fatalf("unable to get invoice: %v", err)
- }
- if invoice.State != channeldb.ContractSettled {
- t.Fatal("carol invoice haven't been settled")
- }
-
- expectedAliceBandwidth := aliceBandwidthBefore - htlcAmt
- if expectedAliceBandwidth != n.aliceChannelLink.Bandwidth() {
- t.Fatalf("channel bandwidth incorrect: expected %v, got %v",
- expectedAliceBandwidth, n.aliceChannelLink.Bandwidth())
- }
-
- expectedBobBandwidth1 := firstBobBandwidthBefore + htlcAmt
- if expectedBobBandwidth1 != n.firstBobChannelLink.Bandwidth() {
- t.Fatalf("channel bandwidth incorrect: expected %v, got %v",
- expectedBobBandwidth1, n.firstBobChannelLink.Bandwidth())
- }
-
- expectedBobBandwidth2 := secondBobBandwidthBefore - amount
- if expectedBobBandwidth2 != n.secondBobChannelLink.Bandwidth() {
- t.Fatalf("channel bandwidth incorrect: expected %v, got %v",
- expectedBobBandwidth2, n.secondBobChannelLink.Bandwidth())
- }
-
- expectedCarolBandwidth := carolBandwidthBefore + amount
- if expectedCarolBandwidth != n.carolChannelLink.Bandwidth() {
- t.Fatalf("channel bandwidth incorrect: expected %v, got %v",
- expectedCarolBandwidth, n.carolChannelLink.Bandwidth())
- }
-
- // Finally, we'll ensure that the amount we paid is properly reflected
- // in the stored invoice.
- if invoice.AmtPaid != amount {
- t.Fatalf("expected amt paid to be %v, is instead %v", amount,
- invoice.AmtPaid)
- }
-}
-
-// persistentLinkHarness is used to control the lifecylce of a link and the
-// switch that operates it. It supports the ability to restart either the link
-// or both the link and the switch.
-type persistentLinkHarness struct {
- t *testing.T
-
- link ChannelLink
- coreLink *channelLink
- channel *lnwallet.LightningChannel
-
- batchTicker chan time.Time
- msgs chan lnwire.Message
-
- restoreChan func() (*lnwallet.LightningChannel, er.R)
-}
-
-// newPersistentLinkHarness initializes a new persistentLinkHarness and derives
-// the supporting references from the active link.
-func newPersistentLinkHarness(t *testing.T, link ChannelLink,
- batchTicker chan time.Time,
- restore func() (*lnwallet.LightningChannel,
- er.R)) *persistentLinkHarness {
-
- coreLink := link.(*channelLink)
-
- return &persistentLinkHarness{
- t: t,
- link: link,
- coreLink: coreLink,
- channel: coreLink.channel,
- batchTicker: batchTicker,
- msgs: coreLink.cfg.Peer.(*mockPeer).sentMsgs,
- restoreChan: restore,
- }
-}
-
-// restart facilitates a shutdown and restart of the link maintained by the
-// harness. The primary purpose of this method is to ensure the consistency of
-// the supporting references is maintained across restarts.
-//
-// If `restartSwitch` is set, the entire switch will also be restarted,
-// and will be reinitialized with the contents of the channeldb backing Alice's
-// channel.
-//
-// Any number of hodl flags can be passed as additional arguments to this
-// method. If none are provided, the mask will be extracted as hodl.MaskNone.
-func (h *persistentLinkHarness) restart(restartSwitch bool,
- hodlFlags ...hodl.Flag) func() {
-
- // First, remove the link from the switch.
- h.coreLink.cfg.Switch.RemoveLink(h.link.ChanID())
-
- if restartSwitch {
- // If a switch restart is requested, we will stop it. It will be
- // reinstantiated in restartLink.
- h.coreLink.cfg.Switch.Stop()
- }
-
- // Since our in-memory state may have diverged from our persistent
- // state, we will restore the persisted state to ensure we always start
- // the link in a consistent state.
- var err er.R
- h.channel, err = h.restoreChan()
- if err != nil {
- h.t.Fatalf("unable to restore channels: %v", err)
- }
-
- // Now, restart the link using the channel state. This will take care of
- // adding the link to an existing switch, or creating a new one using
- // the database owned by the link.
- var cleanUp func()
- h.link, h.batchTicker, cleanUp, err = h.restartLink(
- h.channel, restartSwitch, hodlFlags,
- )
- if err != nil {
- h.t.Fatalf("unable to restart alicelink: %v", err)
- }
-
- // Repopulate the remaining fields in the harness.
- h.coreLink = h.link.(*channelLink)
- h.msgs = h.coreLink.cfg.Peer.(*mockPeer).sentMsgs
-
- return cleanUp
-}
-
-// checkSent reads the links message stream and verify that the messages are
-// dequeued in the same order as provided by `pkts`.
-func (h *persistentLinkHarness) checkSent(pkts []*htlcPacket) {
- for _, pkt := range pkts {
- var msg lnwire.Message
- select {
- case msg = <-h.msgs:
- case <-time.After(15 * time.Second):
- h.t.Fatalf("did not receive message")
- }
-
- if !reflect.DeepEqual(msg, pkt.htlc) {
- h.t.Fatalf("unexpected packet, want %v, got %v",
- pkt.htlc, msg)
- }
- }
-}
-
-// commitCircuits accepts a list of circuits and tries to commit them to the
-// switch's circuit map. The forwarding actions are returned if there was no
-// failure.
-func (h *persistentLinkHarness) commitCircuits(circuits []*PaymentCircuit) *CircuitFwdActions {
- fwdActions, err := h.coreLink.cfg.Switch.commitCircuits(circuits...)
- if err != nil {
- h.t.Fatalf("unable to commit circuit: %v", err)
- }
-
- return fwdActions
-}
-
-func (h *persistentLinkHarness) assertNumPendingNumOpenCircuits(
- wantPending, wantOpen int) {
-
- _, _, line, _ := runtime.Caller(1)
-
- numPending := h.coreLink.cfg.Switch.circuits.NumPending()
- if numPending != wantPending {
- h.t.Fatalf("line: %d: wrong number of pending circuits: "+
- "want %d, got %d", line, wantPending, numPending)
- }
- numOpen := h.coreLink.cfg.Switch.circuits.NumOpen()
- if numOpen != wantOpen {
- h.t.Fatalf("line: %d: wrong number of open circuits: "+
- "want %d, got %d", line, wantOpen, numOpen)
- }
-}
-
-// trySignNextCommitment signals the batch ticker so that the link will try to
-// update its commitment transaction.
-func (h *persistentLinkHarness) trySignNextCommitment() {
- select {
- case h.batchTicker <- time.Now():
- // Give the link enough time to process the request.
- time.Sleep(time.Millisecond * 500)
-
- case <-time.After(15 * time.Second):
- h.t.Fatalf("did not initiate state transition")
- }
-}
-
-// restartLink creates a new channel link from the given channel state, and adds
-// to an htlcswitch. If none is provided by the caller, a new one will be
-// created using Alice's database.
-func (h *persistentLinkHarness) restartLink(
- aliceChannel *lnwallet.LightningChannel, restartSwitch bool,
- hodlFlags []hodl.Flag) (
- ChannelLink, chan time.Time, func(), er.R) {
-
- var (
- decoder = newMockIteratorDecoder()
- obfuscator = NewMockObfuscator()
- alicePeer = &mockPeer{
- sentMsgs: make(chan lnwire.Message, 2000),
- quit: make(chan struct{}),
- }
-
- globalPolicy = ForwardingPolicy{
- MinHTLCOut: lnwire.NewMSatFromSatoshis(5),
- BaseFee: lnwire.NewMSatFromSatoshis(1),
- TimeLockDelta: 6,
- }
-
- pCache = newMockPreimageCache()
- )
-
- aliceDb := aliceChannel.State().Db
- aliceSwitch := h.coreLink.cfg.Switch
- if restartSwitch {
- var err er.R
- aliceSwitch, err = initSwitchWithDB(testStartingHeight, aliceDb)
- if err != nil {
- return nil, nil, nil, err
- }
- }
-
- // Instantiate with a long interval, so that we can precisely control
- // the firing via force feeding.
- bticker := ticker.NewForce(time.Hour)
- aliceCfg := ChannelLinkConfig{
- FwrdingPolicy: globalPolicy,
- Peer: alicePeer,
- Switch: aliceSwitch,
- Circuits: aliceSwitch.CircuitModifier(),
- ForwardPackets: aliceSwitch.ForwardPackets,
- DecodeHopIterators: decoder.DecodeHopIterators,
- ExtractErrorEncrypter: func(*btcec.PublicKey) (
- hop.ErrorEncrypter, lnwire.FailCode) {
- return obfuscator, lnwire.CodeNone
- },
- FetchLastChannelUpdate: mockGetChanUpdateMessage,
- PreimageCache: pCache,
- OnChannelFailure: func(lnwire.ChannelID,
- lnwire.ShortChannelID, LinkFailureError) {
- },
- UpdateContractSignals: func(*contractcourt.ContractSignals) er.R {
- return nil
- },
- Registry: h.coreLink.cfg.Registry,
- ChainEvents: &contractcourt.ChainEventSubscription{},
- BatchTicker: bticker,
- FwdPkgGCTicker: ticker.New(5 * time.Second),
- PendingCommitTicker: ticker.New(time.Minute),
- // Make the BatchSize and Min/MaxFeeUpdateTimeout large enough
- // to not trigger commit updates automatically during tests.
- BatchSize: 10000,
- MinFeeUpdateTimeout: 30 * time.Minute,
- MaxFeeUpdateTimeout: 40 * time.Minute,
- // Set any hodl flags requested for the new link.
- HodlMask: hodl.MaskFromFlags(hodlFlags...),
- MaxOutgoingCltvExpiry: DefaultMaxOutgoingCltvExpiry,
- MaxFeeAllocation: DefaultMaxLinkFeeAllocation,
- NotifyActiveLink: func(wire.OutPoint) {},
- NotifyActiveChannel: func(wire.OutPoint) {},
- NotifyInactiveChannel: func(wire.OutPoint) {},
- HtlcNotifier: aliceSwitch.cfg.HtlcNotifier,
- }
-
- aliceLink := NewChannelLink(aliceCfg, aliceChannel)
- if err := aliceSwitch.AddLink(aliceLink); err != nil {
- return nil, nil, nil, err
- }
- go func() {
- for {
- select {
- case <-aliceLink.(*channelLink).htlcUpdates:
- case <-aliceLink.(*channelLink).quit:
- return
- }
- }
- }()
-
- cleanUp := func() {
- close(alicePeer.quit)
- defer aliceLink.Stop()
- }
-
- return aliceLink, bticker.Force, cleanUp, nil
-}
-
-// gnerateHtlc generates a simple payment from Bob to Alice.
-func generateHtlc(t *testing.T, coreLink *channelLink,
- id uint64) *lnwire.UpdateAddHTLC {
-
- t.Helper()
-
- htlc, invoice := generateHtlcAndInvoice(t, id)
-
- // We must add the invoice to the registry, such that Alice
- // expects this payment.
- err := coreLink.cfg.Registry.(*mockInvoiceRegistry).AddInvoice(
- *invoice, htlc.PaymentHash,
- )
- if err != nil {
- t.Fatalf("unable to add invoice to registry: %v", err)
- }
-
- return htlc
-}
-
-// generateHtlcAndInvoice generates an invoice and a single hop htlc to send to
-// the receiver.
-func generateHtlcAndInvoice(t *testing.T,
- id uint64) (*lnwire.UpdateAddHTLC, *channeldb.Invoice) {
-
- t.Helper()
-
- htlcAmt := lnwire.NewMSatFromSatoshis(10000)
- htlcExpiry := testStartingHeight + testInvoiceCltvExpiry
- hops := []*hop.Payload{
- hop.NewLegacyPayload(&sphinx.HopData{
- Realm: [1]byte{}, // hop.BitcoinNetwork
- NextAddress: [8]byte{}, // hop.Exit,
- ForwardAmount: uint64(htlcAmt),
- OutgoingCltv: uint32(htlcExpiry),
- }),
- }
- blob, err := generateRoute(hops...)
- if err != nil {
- t.Fatalf("unable to generate route: %v", err)
- }
-
- invoice, htlc, _, err := generatePayment(
- htlcAmt, htlcAmt, uint32(htlcExpiry), blob,
- )
- if err != nil {
- t.Fatalf("unable to create payment: %v", err)
- }
-
- htlc.ID = id
-
- return htlc, invoice
-}
-
-// TestChannelLinkNoMoreUpdates tests that we won't send a new commitment
-// when there are no new updates to sign.
-func TestChannelLinkNoMoreUpdates(t *testing.T) {
- t.Parallel()
-
- chanAmt := btcutil.UnitsPerCoin() * 5
- chanReserve := btcutil.UnitsPerCoin() * 1
- aliceLink, bobChannel, _, start, cleanUp, _, err :=
- newSingleLinkTestHarness(chanAmt, chanReserve)
- if err != nil {
- t.Fatalf("unable to create link: %v", err)
- }
- defer cleanUp()
-
- if err := start(); err != nil {
- t.Fatalf("unable to start test harness: %v", err)
- }
-
- var (
- coreLink = aliceLink.(*channelLink)
- aliceMsgs = coreLink.cfg.Peer.(*mockPeer).sentMsgs
- )
-
- // Add two HTLCs to Alice's registry, that Bob can pay.
- htlc1 := generateHtlc(t, coreLink, 0)
- htlc2 := generateHtlc(t, coreLink, 1)
-
- ctx := linkTestContext{
- t: t,
- aliceLink: aliceLink,
- aliceMsgs: aliceMsgs,
- bobChannel: bobChannel,
- }
-
- // We now play out the following scanario:
- //
- // (1) Alice receives htlc1 from Bob.
- // (2) Bob sends signature covering htlc1.
- // (3) Alice receives htlc2 from Bob.
- // (4) Since Bob has sent a new commitment signature, Alice should
- // first respond with a revocation.
- // (5) Alice should also send a commitment signature for the new state,
- // covering htlc1.
- // (6) Bob sends a new commitment signature, covering htlc2 that he sent
- // earlier. This signature should cover hltc1 + htlc2.
- // (7) Alice should revoke the old commitment. This ACKs htlc2.
- // (8) Bob can now revoke his old commitment in response to the
- // signature Alice sent covering htlc1.
- // (9) htlc1 is now locked in on Bob's commitment, and we expect Alice
- // to settle it.
- // (10) Alice should send a signature covering this settle to Bob. Only
- // htlc2 should now be covered by this signature.
- // (11) Bob can revoke his last state, which will also ACK the settle
- // of htlc1.
- // (12) Bob sends a new commitment signature. This signature should
- // cover htlc2.
- // (13) Alice will send a settle for htlc2.
- // (14) Alice will also send a signature covering the settle.
- // (15) Alice should send a revocation in response to the signature Bob
- // sent earlier.
- // (16) Bob will revoke his commitment in response to the commitment
- // Alice sent.
- // (17) Send a signature for the empty state. No HTLCs are left.
- // (18) Alice will revoke her previous state.
- // Alice Bob
- // | |
- // | ... |
- // | | <--- idle (no htlc on either side)
- // | |
- ctx.sendHtlcBobToAlice(htlc1) // |<----- add-1 ------| (1)
- ctx.sendCommitSigBobToAlice(1) // |<------ sig -------| (2)
- ctx.sendHtlcBobToAlice(htlc2) // |<----- add-2 ------| (3)
- ctx.receiveRevAndAckAliceToBob() // |------- rev ------>| (4) <--- Alice acks add-1
- ctx.receiveCommitSigAliceToBob(1) // |------- sig ------>| (5) <--- Alice signs add-1
- ctx.sendCommitSigBobToAlice(2) // |<------ sig -------| (6)
- ctx.receiveRevAndAckAliceToBob() // |------- rev ------>| (7) <--- Alice acks add-2
- ctx.sendRevAndAckBobToAlice() // |<------ rev -------| (8)
- ctx.receiveSettleAliceToBob() // |------ ful-1 ----->| (9)
- ctx.receiveCommitSigAliceToBob(1) // |------- sig ------>| (10) <--- Alice signs add-1 + add-2 + ful-1 = add-2
- ctx.sendRevAndAckBobToAlice() // |<------ rev -------| (11)
- ctx.sendCommitSigBobToAlice(1) // |<------ sig -------| (12)
- ctx.receiveSettleAliceToBob() // |------ ful-2 ----->| (13)
- ctx.receiveCommitSigAliceToBob(0) // |------- sig ------>| (14) <--- Alice signs add-2 + ful-2 = no htlcs
- ctx.receiveRevAndAckAliceToBob() // |------- rev ------>| (15)
- ctx.sendRevAndAckBobToAlice() // |<------ rev -------| (16) <--- Bob acks that there are no more htlcs
- ctx.sendCommitSigBobToAlice(0) // |<------ sig -------| (17)
- ctx.receiveRevAndAckAliceToBob() // |------- rev ------>| (18) <--- Alice acks that there are no htlcs on Alice's side
-
- // No there are no more changes to ACK or sign, make sure Alice doesn't
- // attempt to send any more messages.
- var msg lnwire.Message
- select {
- case msg = <-aliceMsgs:
- t.Fatalf("did not expect message %T", msg)
- case <-time.After(100 * time.Millisecond):
- }
-}
-
-// checkHasPreimages inspects Alice's preimage cache, and asserts whether the
-// preimages for the provided HTLCs are known and unknown, and that all of them
-// match the expected status of expOk.
-func checkHasPreimages(t *testing.T, coreLink *channelLink,
- htlcs []*lnwire.UpdateAddHTLC, expOk bool) {
-
- t.Helper()
-
- err := wait.NoError(func() er.R {
- for i := range htlcs {
- _, ok := coreLink.cfg.PreimageCache.LookupPreimage(
- htlcs[i].PaymentHash,
- )
- if ok == expOk {
- continue
- }
-
- return er.Errorf("expected to find witness: %v, "+
- "got %v for hash=%x", expOk, ok,
- htlcs[i].PaymentHash)
- }
-
- return nil
- }, 5*time.Second)
- if err != nil {
- t.Fatalf("unable to find preimages: %v", err)
- }
-}
-
-// TestChannelLinkWaitForRevocation tests that we will keep accepting updates
-// to our commitment transaction, even when we are waiting for a revocation
-// from the remote node.
-func TestChannelLinkWaitForRevocation(t *testing.T) {
- t.Parallel()
-
- chanAmt := btcutil.UnitsPerCoin() * 5
- chanReserve := btcutil.UnitsPerCoin() * 1
- aliceLink, bobChannel, _, start, cleanUp, _, err :=
- newSingleLinkTestHarness(chanAmt, chanReserve)
- if err != nil {
- t.Fatalf("unable to create link: %v", err)
- }
- defer cleanUp()
-
- if err := start(); err != nil {
- t.Fatalf("unable to start test harness: %v", err)
- }
-
- var (
- coreLink = aliceLink.(*channelLink)
- aliceMsgs = coreLink.cfg.Peer.(*mockPeer).sentMsgs
- )
-
- // We will send 10 HTLCs in total, from Bob to Alice.
- numHtlcs := 10
- var htlcs []*lnwire.UpdateAddHTLC
- for i := 0; i < numHtlcs; i++ {
- htlc := generateHtlc(t, coreLink, uint64(i))
- htlcs = append(htlcs, htlc)
- }
-
- ctx := linkTestContext{
- t: t,
- aliceLink: aliceLink,
- aliceMsgs: aliceMsgs,
- bobChannel: bobChannel,
- }
-
- assertNoMsgFromAlice := func() {
- select {
- case <-aliceMsgs:
- t.Fatalf("did not expect message from Alice")
- case <-time.After(50 * time.Millisecond):
- }
- }
-
- // We play out the following scenario:
- //
- // (1) Add the first HTLC.
- // (2) Bob sends signature covering the htlc.
- // (3) Since Bob has sent a new commitment signature, Alice should first
- // respond with a revocation. This revocation will ACK the first htlc.
- // (4) Alice should also send a commitment signature for the new state,
- // locking in the HTLC on Bob's commitment. Note that we don't
- // immediately let Bob respond with a revocation in this case.
- // (5.i) Now we send the rest of the HTLCs from Bob to Alice.
- // (6.i) Bob sends a new commitment signature, covering all HTLCs up
- // to this point.
- // (7.i) Alice should respond to Bob's state updates with revocations,
- // but cannot send any new signatures for Bob's state because her
- // revocation window is exhausted.
- // (8) Now let Bob finally send his revocation.
- // (9) We expect Alice to settle her first HTLC, since it was already
- // locked in.
- // (10) Now Alice should send a signature covering this settle + lock
- // in the rest of the HTLCs on Bob's commitment.
- // (11) Bob receives the new signature for his commitment, and can
- // revoke his old state, ACKing the settle.
- // (12.i) Now Alice can settle all the HTLCs, since they are locked in
- // on both parties' commitments.
- // (13) Bob can send a signature covering the first settle Alice sent.
- // Bob's signature should cover all the remaining HTLCs as well, since
- // he hasn't ACKed the last settles yet. Alice receives the signature
- // from Bob. Alice's commitment now has the first HTLC settled, and all
- // the other HTLCs locked in.
- // (14) Alice will send a signature for all the settles she just sent.
- // (15) Bob can revoke his previous state, in response to Alice's
- // signature.
- // (16) In response to the signature Bob sent, Alice can
- // revoke her previous state.
- // (17) Bob still hasn't sent a commitment covering all settles, so do
- // that now. Since Bob ACKed all settles, no HTLCs should be left on
- // the commitment.
- // (18) Alice will revoke her previous state.
- // Alice Bob
- // | |
- // | ... |
- // | | <--- idle (no htlc on either side)
- // | |
- ctx.sendHtlcBobToAlice(htlcs[0]) // |<----- add-1 ------| (1)
- ctx.sendCommitSigBobToAlice(1) // |<------ sig -------| (2)
- ctx.receiveRevAndAckAliceToBob() // |------- rev ------>| (3) <--- Alice acks add-1
- ctx.receiveCommitSigAliceToBob(1) // |------- sig ------>| (4) <--- Alice signs add-1
- for i := 1; i < numHtlcs; i++ { // | |
- ctx.sendHtlcBobToAlice(htlcs[i]) // |<----- add-i ------| (5.i)
- ctx.sendCommitSigBobToAlice(i + 1) // |<------ sig -------| (6.i)
- ctx.receiveRevAndAckAliceToBob() // |------- rev ------>| (7.i) <--- Alice acks add-i
- assertNoMsgFromAlice() // | |
- // | | Alice should not send a sig for
- // | | Bob's last state, since she is
- // | | still waiting for a revocation
- // | | for the previous one.
- } // | |
- ctx.sendRevAndAckBobToAlice() // |<------ rev -------| (8) Finally let Bob send rev
- ctx.receiveSettleAliceToBob() // |------ ful-1 ----->| (9)
- ctx.receiveCommitSigAliceToBob(numHtlcs - 1) // |------- sig ------>| (10) <--- Alice signs add-i
- ctx.sendRevAndAckBobToAlice() // |<------ rev -------| (11)
- for i := 1; i < numHtlcs; i++ { // | |
- ctx.receiveSettleAliceToBob() // |------ ful-1 ----->| (12.i)
- } // | |
- ctx.sendCommitSigBobToAlice(numHtlcs - 1) // |<------ sig -------| (13)
- ctx.receiveCommitSigAliceToBob(0) // |------- sig ------>| (14)
- ctx.sendRevAndAckBobToAlice() // |<------ rev -------| (15)
- ctx.receiveRevAndAckAliceToBob() // |------- rev ------>| (16)
- ctx.sendCommitSigBobToAlice(0) // |<------ sig -------| (17)
- ctx.receiveRevAndAckAliceToBob() // |------- rev ------>| (18)
-
- // Both side's state is now updated, no more messages should be sent.
- assertNoMsgFromAlice()
-}
-
-// TestChannelLinkNoEmptySig asserts that no empty commit sig message is sent
-// when the commitment txes are out of sync.
-func TestChannelLinkNoEmptySig(t *testing.T) {
- t.Parallel()
-
- chanAmt := btcutil.UnitsPerCoin() * 5
- chanReserve := btcutil.UnitsPerCoin() * 1
- aliceLink, bobChannel, batchTicker, start, cleanUp, _, err :=
- newSingleLinkTestHarness(chanAmt, chanReserve)
- if err != nil {
- t.Fatalf("unable to create link: %v", err)
- }
- defer cleanUp()
-
- if err := start(); err != nil {
- t.Fatalf("unable to start test harness: %v", err)
- }
- defer aliceLink.Stop()
-
- var (
- coreLink = aliceLink.(*channelLink)
- aliceMsgs = coreLink.cfg.Peer.(*mockPeer).sentMsgs
- )
-
- ctx := linkTestContext{
- t: t,
- aliceLink: aliceLink,
- aliceMsgs: aliceMsgs,
- bobChannel: bobChannel,
- }
-
- // Send htlc 1 from Alice to Bob.
- htlc1, _ := generateHtlcAndInvoice(t, 0)
- ctx.sendHtlcAliceToBob(0, htlc1)
- ctx.receiveHtlcAliceToBob()
-
- // Tick the batch ticker to trigger a commitsig from Alice->Bob.
- select {
- case batchTicker <- time.Now():
- case <-time.After(5 * time.Second):
- t.Fatalf("could not force commit sig")
- }
-
- // Receive a CommitSig from Alice covering the Add from above.
- ctx.receiveCommitSigAliceToBob(1)
-
- // Bob revokes previous commitment tx.
- ctx.sendRevAndAckBobToAlice()
-
- // Alice sends htlc 2 to Bob.
- htlc2, _ := generateHtlcAndInvoice(t, 0)
- ctx.sendHtlcAliceToBob(1, htlc2)
- ctx.receiveHtlcAliceToBob()
-
- // Tick the batch ticker to trigger a commitsig from Alice->Bob.
- select {
- case batchTicker <- time.Now():
- case <-time.After(5 * time.Second):
- t.Fatalf("could not force commit sig")
- }
-
- // Get the commit sig from Alice, but don't send it to Bob yet.
- commitSigAlice := ctx.receiveCommitSigAlice(2)
-
- // Bob adds htlc 1 to its remote commit tx.
- ctx.sendCommitSigBobToAlice(1)
-
- // Now send Bob the signature from Alice covering both htlcs.
- err = bobChannel.ReceiveNewCommitment(
- commitSigAlice.CommitSig, commitSigAlice.HtlcSigs,
- )
- if err != nil {
- t.Fatalf("bob failed receiving commitment: %v", err)
- }
-
- // Both Alice and Bob revoke their previous commitment txes.
- ctx.receiveRevAndAckAliceToBob()
- ctx.sendRevAndAckBobToAlice()
-
- // The commit txes are not in sync, but it is Bob's turn to send a new
- // signature. We don't expect Alice to send out any message. This check
- // allows some time for the log commit ticker to trigger for Alice.
- ctx.assertNoMsgFromAlice(time.Second)
-}
-
-// TestChannelLinkBatchPreimageWrite asserts that a link will batch preimage
-// writes when just as it receives a CommitSig to lock in any Settles, and also
-// if the link is aware of any uncommitted preimages if the link is stopped,
-// i.e. due to a disconnection or shutdown.
-func TestChannelLinkBatchPreimageWrite(t *testing.T) {
- t.Parallel()
-
- tests := []struct {
- name string
- disconnect bool
- }{
- {
- name: "flush on commit sig",
- disconnect: false,
- },
- {
- name: "flush on disconnect",
- disconnect: true,
- },
- }
-
- for _, test := range tests {
- t.Run(test.name, func(t *testing.T) {
- testChannelLinkBatchPreimageWrite(t, test.disconnect)
- })
- }
-}
-
-func testChannelLinkBatchPreimageWrite(t *testing.T, disconnect bool) {
- chanAmt := btcutil.UnitsPerCoin() * 5
- chanReserve := btcutil.UnitsPerCoin() * 1
- aliceLink, bobChannel, batchTicker, startUp, cleanUp, _, err :=
- newSingleLinkTestHarness(chanAmt, chanReserve)
- if err != nil {
- t.Fatalf("unable to create link: %v", err)
- }
- defer cleanUp()
-
- if err := startUp(); err != nil {
- t.Fatalf("unable to start test harness: %v", err)
- }
-
- var (
- coreLink = aliceLink.(*channelLink)
- aliceMsgs = coreLink.cfg.Peer.(*mockPeer).sentMsgs
- )
-
- // We will send 10 HTLCs in total, from Bob to Alice.
- numHtlcs := 10
- var htlcs []*lnwire.UpdateAddHTLC
- var invoices []*channeldb.Invoice
- for i := 0; i < numHtlcs; i++ {
- htlc, invoice := generateHtlcAndInvoice(t, uint64(i))
- htlcs = append(htlcs, htlc)
- invoices = append(invoices, invoice)
- }
-
- ctx := linkTestContext{
- t: t,
- aliceLink: aliceLink,
- aliceMsgs: aliceMsgs,
- bobChannel: bobChannel,
- }
-
- // First, send a batch of Adds from Alice to Bob.
- for i, htlc := range htlcs {
- ctx.sendHtlcAliceToBob(i, htlc)
- ctx.receiveHtlcAliceToBob()
- }
-
- // Assert that no preimages exist for these htlcs in Alice's cache.
- checkHasPreimages(t, coreLink, htlcs, false)
-
- // Force alice's link to sign a commitment covering the htlcs sent thus
- // far.
- select {
- case batchTicker <- time.Now():
- case <-time.After(15 * time.Second):
- t.Fatalf("could not force commit sig")
- }
-
- // Do a commitment dance to lock in the Adds, we expect numHtlcs htlcs
- // to be on each party's commitment transactions.
- ctx.receiveCommitSigAliceToBob(numHtlcs)
- ctx.sendRevAndAckBobToAlice()
- ctx.sendCommitSigBobToAlice(numHtlcs)
- ctx.receiveRevAndAckAliceToBob()
-
- // Check again that no preimages exist for these htlcs in Alice's cache.
- checkHasPreimages(t, coreLink, htlcs, false)
-
- // Now, have Bob settle the HTLCs back to Alice using the preimages in
- // the invoice corresponding to each of the HTLCs.
- for i, invoice := range invoices {
- ctx.sendSettleBobToAlice(
- uint64(i),
- *invoice.Terms.PaymentPreimage,
- )
- }
-
- // Assert that Alice has not yet written the preimages, even though she
- // has received them in the UpdateFulfillHTLC messages.
- checkHasPreimages(t, coreLink, htlcs, false)
-
- // If this is the disconnect run, we will having Bob send Alice his
- // CommitSig, and simply stop Alice's link. As she exits, we should
- // detect that she has uncommitted preimages and write them to disk.
- if disconnect {
- aliceLink.Stop()
- checkHasPreimages(t, coreLink, htlcs, true)
- return
- }
-
- // Otherwise, we are testing that Alice commits the preimages after
- // receiving a CommitSig from Bob. Bob's commitment should now have 0
- // HTLCs.
- ctx.sendCommitSigBobToAlice(0)
-
- // Since Alice will process the CommitSig asynchronously, we wait until
- // she replies with her RevokeAndAck to ensure the tests reliably
- // inspect her cache after advancing her state.
- select {
-
- // Received Alice's RevokeAndAck, assert that she has written all of the
- // uncommitted preimages learned in this commitment.
- case <-aliceMsgs:
- checkHasPreimages(t, coreLink, htlcs, true)
-
- // Alice didn't send her RevokeAndAck, something is wrong.
- case <-time.After(15 * time.Second):
- t.Fatalf("alice did not send her revocation")
- }
-}
-
-// TestChannelLinkCleanupSpuriousResponses tests that we properly cleanup
-// references in the event that internal retransmission continues as a result of
-// not properly cleaning up Add/SettleFailRefs.
-func TestChannelLinkCleanupSpuriousResponses(t *testing.T) {
- t.Parallel()
-
- chanAmt := btcutil.UnitsPerCoin() * 5
- chanReserve := btcutil.UnitsPerCoin() * 1
- aliceLink, bobChannel, _, start, cleanUp, _, err :=
- newSingleLinkTestHarness(chanAmt, chanReserve)
- if err != nil {
- t.Fatalf("unable to create link: %v", err)
- }
- defer cleanUp()
-
- if err := start(); err != nil {
- t.Fatalf("unable to start test harness: %v", err)
- }
-
- var (
- coreLink = aliceLink.(*channelLink)
- aliceMsgs = coreLink.cfg.Peer.(*mockPeer).sentMsgs
- )
-
- // Settle Alice in hodl ExitSettle mode so that she won't respond
- // immediately to the htlc's meant for her. This allows us to control
- // the responses she gives back to Bob.
- coreLink.cfg.HodlMask = hodl.ExitSettle.Mask()
-
- // Add two HTLCs to Alice's registry, that Bob can pay.
- htlc1 := generateHtlc(t, coreLink, 0)
- htlc2 := generateHtlc(t, coreLink, 1)
-
- ctx := linkTestContext{
- t: t,
- aliceLink: aliceLink,
- aliceMsgs: aliceMsgs,
- bobChannel: bobChannel,
- }
-
- // We start with he following scenario: Bob sends Alice two HTLCs, and a
- // commitment dance ensures, leaving two HTLCs that Alice can respond
- // to. Since Alice is in ExitSettle mode, we will then take over and
- // provide targeted fail messages to test the link's ability to cleanup
- // spurious responses.
- //
- // Bob Alice
- // |------ add-1 ----->|
- // |------ add-2 ----->|
- // |------ sig ----->| commits add-1 + add-2
- // |<----- rev ------|
- // |<----- sig ------| commits add-1 + add-2
- // |------ rev ----->|
- ctx.sendHtlcBobToAlice(htlc1)
- ctx.sendHtlcBobToAlice(htlc2)
- ctx.sendCommitSigBobToAlice(2)
- ctx.receiveRevAndAckAliceToBob()
- ctx.receiveCommitSigAliceToBob(2)
- ctx.sendRevAndAckBobToAlice()
-
- // Give Alice to time to process the revocation.
- time.Sleep(time.Second)
-
- aliceFwdPkgs, err := coreLink.channel.LoadFwdPkgs()
- if err != nil {
- t.Fatalf("unable to load alice's fwdpkgs: %v", err)
- }
-
- // Alice should have exactly one forwarding package.
- if len(aliceFwdPkgs) != 1 {
- t.Fatalf("alice should have 1 fwd pkgs, has %d instead",
- len(aliceFwdPkgs))
- }
-
- // We'll stash the height of these AddRefs, so that we can reconstruct
- // the proper references later.
- addHeight := aliceFwdPkgs[0].Height
-
- // The first fwdpkg should have exactly 2 entries, one for each Add that
- // was added during the last dance.
- if aliceFwdPkgs[0].AckFilter.Count() != 2 {
- t.Fatalf("alice fwdpkg should have 2 Adds, has %d instead",
- aliceFwdPkgs[0].AckFilter.Count())
- }
-
- // Both of the entries in the FwdFilter should be unacked.
- for i := 0; i < 2; i++ {
- if aliceFwdPkgs[0].AckFilter.Contains(uint16(i)) {
- t.Fatalf("alice fwdpkg index %d should not "+
- "have ack", i)
- }
- }
-
- // Now, construct a Fail packet for Bob settling the first HTLC. This
- // packet will NOT include a sourceRef, meaning the AddRef on disk will
- // not be acked after committing this response.
- fail0 := &htlcPacket{
- incomingChanID: bobChannel.ShortChanID(),
- incomingHTLCID: 0,
- obfuscator: NewMockObfuscator(),
- htlc: &lnwire.UpdateFailHTLC{},
- }
- aliceLink.HandleSwitchPacket(fail0)
-
- // Bob Alice
- // |<----- fal-1 ------|
- // |<----- sig ------| commits fal-1
- ctx.receiveFailAliceToBob()
- ctx.receiveCommitSigAliceToBob(1)
-
- aliceFwdPkgs, err = coreLink.channel.LoadFwdPkgs()
- if err != nil {
- t.Fatalf("unable to load alice's fwdpkgs: %v", err)
- }
-
- // Alice should still only have one fwdpkg, as she hasn't yet received
- // another revocation from Bob.
- if len(aliceFwdPkgs) != 1 {
- t.Fatalf("alice should have 1 fwd pkgs, has %d instead",
- len(aliceFwdPkgs))
- }
-
- // Assert the fwdpkg still has 2 entries for the original Adds.
- if aliceFwdPkgs[0].AckFilter.Count() != 2 {
- t.Fatalf("alice fwdpkg should have 2 Adds, has %d instead",
- aliceFwdPkgs[0].AckFilter.Count())
- }
-
- // Since the fail packet was missing the AddRef, the forward filter for
- // either HTLC should not have been modified.
- for i := 0; i < 2; i++ {
- if aliceFwdPkgs[0].AckFilter.Contains(uint16(i)) {
- t.Fatalf("alice fwdpkg index %d should not "+
- "have ack", i)
- }
- }
-
- // Complete the rest of the commitment dance, now that the forwarding
- // packages have been verified.
- //
- // Bob Alice
- // |------ rev ----->|
- // |------ sig ----->|
- // |<----- rev ------|
- ctx.sendRevAndAckBobToAlice()
- ctx.sendCommitSigBobToAlice(1)
- ctx.receiveRevAndAckAliceToBob()
-
- // Next, we'll construct a fail packet for add-2 (index 1), which we'll
- // send to Bob and lock in. Since the AddRef is set on this instance, we
- // should see the second HTLCs AddRef update the forward filter for the
- // first fwd pkg.
- fail1 := &htlcPacket{
- sourceRef: &channeldb.AddRef{
- Height: addHeight,
- Index: 1,
- },
- incomingChanID: bobChannel.ShortChanID(),
- incomingHTLCID: 1,
- obfuscator: NewMockObfuscator(),
- htlc: &lnwire.UpdateFailHTLC{},
- }
- aliceLink.HandleSwitchPacket(fail1)
-
- // Bob Alice
- // |<----- fal-1 ------|
- // |<----- sig ------| commits fal-1
- ctx.receiveFailAliceToBob()
- ctx.receiveCommitSigAliceToBob(0)
-
- aliceFwdPkgs, err = coreLink.channel.LoadFwdPkgs()
- if err != nil {
- t.Fatalf("unable to load alice's fwdpkgs: %v", err)
- }
-
- // Now that another commitment dance has completed, Alice should have 2
- // forwarding packages.
- if len(aliceFwdPkgs) != 2 {
- t.Fatalf("alice should have 2 fwd pkgs, has %d instead",
- len(aliceFwdPkgs))
- }
-
- // The most recent package should have no new HTLCs, so it should be
- // empty.
- if aliceFwdPkgs[1].AckFilter.Count() != 0 {
- t.Fatalf("alice fwdpkg height=%d should have 0 Adds, "+
- "has %d instead", aliceFwdPkgs[1].Height,
- aliceFwdPkgs[1].AckFilter.Count())
- }
-
- // The index for the first AddRef should still be unacked, as the
- // sourceRef was missing on the htlcPacket.
- if aliceFwdPkgs[0].AckFilter.Contains(0) {
- t.Fatalf("alice fwdpkg height=%d index=0 should not "+
- "have an ack", aliceFwdPkgs[0].Height)
- }
-
- // The index for the second AddRef should now be acked, as it was
- // properly constructed and committed in Alice's last commit sig.
- if !aliceFwdPkgs[0].AckFilter.Contains(1) {
- t.Fatalf("alice fwdpkg height=%d index=1 should have "+
- "an ack", aliceFwdPkgs[0].Height)
- }
-
- // Complete the rest of the commitment dance.
- //
- // Bob Alice
- // |------ rev ----->|
- // |------ sig ----->|
- // |<----- rev ------|
- ctx.sendRevAndAckBobToAlice()
- ctx.sendCommitSigBobToAlice(0)
- ctx.receiveRevAndAckAliceToBob()
-
- // We'll do a quick sanity check, and blindly send the same fail packet
- // for the first HTLC. Since this HTLC index has already been settled,
- // this should trigger an attempt to cleanup the spurious response.
- // However, we expect it to result in a NOP since it is still missing
- // its sourceRef.
- aliceLink.HandleSwitchPacket(fail0)
-
- // Allow the link enough time to process and reject the duplicate
- // packet, we'll also check that this doesn't trigger Alice to send the
- // fail to Bob.
- select {
- case <-aliceMsgs:
- t.Fatalf("message sent for duplicate fail")
- case <-time.After(time.Second):
- }
-
- aliceFwdPkgs, err = coreLink.channel.LoadFwdPkgs()
- if err != nil {
- t.Fatalf("unable to load alice's fwdpkgs: %v", err)
- }
-
- // Alice should now have 3 forwarding packages, and the latest should be
- // empty.
- if len(aliceFwdPkgs) != 3 {
- t.Fatalf("alice should have 3 fwd pkgs, has %d instead",
- len(aliceFwdPkgs))
- }
- if aliceFwdPkgs[2].AckFilter.Count() != 0 {
- t.Fatalf("alice fwdpkg height=%d should have 0 Adds, "+
- "has %d instead", aliceFwdPkgs[2].Height,
- aliceFwdPkgs[2].AckFilter.Count())
- }
-
- // The state of the forwarding packages should be unmodified from the
- // prior assertion, since the duplicate Fail for index 0 should have
- // been ignored.
- if aliceFwdPkgs[0].AckFilter.Contains(0) {
- t.Fatalf("alice fwdpkg height=%d index=0 should not "+
- "have an ack", aliceFwdPkgs[0].Height)
- }
- if !aliceFwdPkgs[0].AckFilter.Contains(1) {
- t.Fatalf("alice fwdpkg height=%d index=1 should have "+
- "an ack", aliceFwdPkgs[0].Height)
- }
-
- // Finally, construct a new Fail packet for the first HTLC, this time
- // with the sourceRef properly constructed. When the link handles this
- // duplicate, it should clean up the remaining AddRef state maintained
- // in Alice's link, but it should not result in anything being sent to
- // Bob.
- fail0 = &htlcPacket{
- sourceRef: &channeldb.AddRef{
- Height: addHeight,
- Index: 0,
- },
- incomingChanID: bobChannel.ShortChanID(),
- incomingHTLCID: 0,
- obfuscator: NewMockObfuscator(),
- htlc: &lnwire.UpdateFailHTLC{},
- }
- aliceLink.HandleSwitchPacket(fail0)
-
- // Allow the link enough time to process and reject the duplicate
- // packet, we'll also check that this doesn't trigger Alice to send the
- // fail to Bob.
- select {
- case <-aliceMsgs:
- t.Fatalf("message sent for duplicate fail")
- case <-time.After(time.Second):
- }
-
- aliceFwdPkgs, err = coreLink.channel.LoadFwdPkgs()
- if err != nil {
- t.Fatalf("unable to load alice's fwdpkgs: %v", err)
- }
-
- // Since no state transitions have been performed for the duplicate
- // packets, Alice should still have the same 3 forwarding packages.
- if len(aliceFwdPkgs) != 3 {
- t.Fatalf("alice should have 3 fwd pkgs, has %d instead",
- len(aliceFwdPkgs))
- }
-
- // Assert that all indices in our original forwarded have now been acked
- // as a result of our spurious cleanup logic.
- for i := 0; i < 2; i++ {
- if !aliceFwdPkgs[0].AckFilter.Contains(uint16(i)) {
- t.Fatalf("alice fwdpkg height=%d index=%d "+
- "should have ack", aliceFwdPkgs[0].Height, i)
- }
- }
-}
-
-type mockPackager struct {
- failLoadFwdPkgs bool
-}
-
-func (*mockPackager) AddFwdPkg(tx kvdb.RwTx, fwdPkg *channeldb.FwdPkg) er.R {
- return nil
-}
-
-func (*mockPackager) SetFwdFilter(tx kvdb.RwTx, height uint64,
- fwdFilter *channeldb.PkgFilter) er.R {
- return nil
-}
-
-func (*mockPackager) AckAddHtlcs(tx kvdb.RwTx,
- addRefs ...channeldb.AddRef) er.R {
- return nil
-}
-
-func (m *mockPackager) LoadFwdPkgs(tx kvdb.RTx) ([]*channeldb.FwdPkg, er.R) {
- if m.failLoadFwdPkgs {
- return nil, er.Errorf("failing LoadFwdPkgs")
- }
- return nil, nil
-}
-
-func (*mockPackager) RemovePkg(tx kvdb.RwTx, height uint64) er.R {
- return nil
-}
-
-func (*mockPackager) AckSettleFails(tx kvdb.RwTx,
- settleFailRefs ...channeldb.SettleFailRef) er.R {
- return nil
-}
-
-// TestChannelLinkFail tests that we will fail the channel, and force close the
-// channel in certain situations.
-func TestChannelLinkFail(t *testing.T) {
- t.Parallel()
-
- testCases := []struct {
- // options is used to set up mocks and configure the link
- // before it is started.
- options func(*channelLink)
-
- // link test is used to execute the given test on the channel
- // link after it is started.
- linkTest func(*testing.T, *channelLink, *lnwallet.LightningChannel)
-
- // shouldForceClose indicates whether we expect the link to
- // force close the channel in response to the actions performed
- // during the linkTest.
- shouldForceClose bool
-
- // permanentFailure indicates whether we expect the link to
- // consider the failure permanent in response to the actions
- // performed during the linkTest.
- permanentFailure bool
- }{
- {
- // Test that we don't force close if syncing states
- // fails at startup.
- func(c *channelLink) {
- c.cfg.SyncStates = true
-
- // Make the syncChanStateCall fail by making
- // the SendMessage call fail.
- c.cfg.Peer.(*mockPeer).disconnected = true
- },
- func(t *testing.T, c *channelLink, _ *lnwallet.LightningChannel) {
- // Should fail at startup.
- },
- false,
- false,
- },
- {
- // Test that we don't force closes the channel if
- // resolving forward packages fails at startup.
- func(c *channelLink) {
- // We make the call to resolveFwdPkgs fail by
- // making the underlying forwarder fail.
- pkg := &mockPackager{
- failLoadFwdPkgs: true,
- }
- c.channel.State().Packager = pkg
- },
- func(t *testing.T, c *channelLink, _ *lnwallet.LightningChannel) {
- // Should fail at startup.
- },
- false,
- false,
- },
- {
- // Test that we force close the channel if we receive
- // an invalid Settle message.
- func(c *channelLink) {
- },
- func(t *testing.T, c *channelLink, _ *lnwallet.LightningChannel) {
- // Recevive an htlc settle for an htlc that was
- // never added.
- htlcSettle := &lnwire.UpdateFulfillHTLC{
- ID: 0,
- PaymentPreimage: [32]byte{},
- }
- c.HandleChannelUpdate(htlcSettle)
- },
- true,
- false,
- },
- {
- // Test that we force close the channel if we receive
- // an invalid CommitSig, not containing enough HTLC
- // sigs.
- func(c *channelLink) {
- },
- func(t *testing.T, c *channelLink, remoteChannel *lnwallet.LightningChannel) {
-
- // Generate an HTLC and send to the link.
- htlc1 := generateHtlc(t, c, 0)
- ctx := linkTestContext{
- t: t,
- aliceLink: c,
- bobChannel: remoteChannel,
- }
- ctx.sendHtlcBobToAlice(htlc1)
-
- // Sign a commitment that will include
- // signature for the HTLC just sent.
- sig, htlcSigs, _, err :=
- remoteChannel.SignNextCommitment()
- if err != nil {
- t.Fatalf("error signing commitment: %v",
- err)
- }
-
- // Remove the HTLC sig, such that the commit
- // sig will be invalid.
- commitSig := &lnwire.CommitSig{
- CommitSig: sig,
- HtlcSigs: htlcSigs[1:],
- }
-
- c.HandleChannelUpdate(commitSig)
- },
- true,
- false,
- },
- {
- // Test that we force close the channel if we receive
- // an invalid CommitSig, where the sig itself is
- // corrupted.
- func(c *channelLink) {
- },
- func(t *testing.T, c *channelLink, remoteChannel *lnwallet.LightningChannel) {
-
- // Generate an HTLC and send to the link.
- htlc1 := generateHtlc(t, c, 0)
- ctx := linkTestContext{
- t: t,
- aliceLink: c,
- bobChannel: remoteChannel,
- }
-
- ctx.sendHtlcBobToAlice(htlc1)
-
- // Sign a commitment that will include
- // signature for the HTLC just sent.
- sig, htlcSigs, _, err :=
- remoteChannel.SignNextCommitment()
- if err != nil {
- t.Fatalf("error signing commitment: %v",
- err)
- }
-
- // Flip a bit on the signature, rendering it
- // invalid.
- sig[19] ^= 1
- commitSig := &lnwire.CommitSig{
- CommitSig: sig,
- HtlcSigs: htlcSigs,
- }
-
- c.HandleChannelUpdate(commitSig)
- },
- true,
- false,
- },
- {
- // Test that we consider the failure permanent if we
- // receive a link error from the remote.
- func(c *channelLink) {
- },
- func(t *testing.T, c *channelLink, remoteChannel *lnwallet.LightningChannel) {
- err := &lnwire.Error{}
- c.HandleChannelUpdate(err)
- },
- false,
- // TODO(halseth) For compatibility with CL we currently
- // don't treat Errors as permanent errors.
- false,
- },
- }
-
- chanAmt := btcutil.UnitsPerCoin() * 5
-
- // Execute each test case.
- for i, test := range testCases {
- link, remoteChannel, _, start, cleanUp, _, err :=
- newSingleLinkTestHarness(chanAmt, 0)
- if err != nil {
- t.Fatalf("unable to create link: %v", err)
- }
-
- coreLink := link.(*channelLink)
-
- // Set up a channel used to check whether the link error
- // force closed the channel.
- linkErrors := make(chan LinkFailureError, 1)
- coreLink.cfg.OnChannelFailure = func(_ lnwire.ChannelID,
- _ lnwire.ShortChannelID, linkErr LinkFailureError) {
- linkErrors <- linkErr
- }
-
- // Set up the link before starting it.
- test.options(coreLink)
- if err := start(); err != nil {
- t.Fatalf("unable to start test harness: %v", err)
- }
-
- // Execute the test case.
- test.linkTest(t, coreLink, remoteChannel)
-
- // Currently we expect all test cases to lead to link error.
- var linkErr LinkFailureError
- select {
- case linkErr = <-linkErrors:
- case <-time.After(10 * time.Second):
- t.Fatalf("%d) Alice did not fail"+
- "channel", i)
- }
-
- // If we expect the link to force close the channel in this
- // case, check that it happens. If not, make sure it does not
- // happen.
- if test.shouldForceClose != linkErr.ForceClose {
- t.Fatalf("%d) Expected Alice to force close(%v), "+
- "instead got(%v)", i, test.shouldForceClose,
- linkErr.ForceClose)
- }
-
- if test.permanentFailure != linkErr.PermanentFailure {
- t.Fatalf("%d) Expected Alice set permanent failure(%v), "+
- "instead got(%v)", i, test.permanentFailure,
- linkErr.PermanentFailure)
- }
-
- // Clean up before starting next test case.
- cleanUp()
- }
-}
-
-// TestExpectedFee tests calculation of ExpectedFee returns expected fee, given
-// a baseFee, a feeRate, and an htlc amount.
-func TestExpectedFee(t *testing.T) {
- testCases := []struct {
- baseFee lnwire.MilliSatoshi
- feeRate lnwire.MilliSatoshi
- htlcAmt lnwire.MilliSatoshi
- expected lnwire.MilliSatoshi
- }{
- {
- lnwire.MilliSatoshi(0),
- lnwire.MilliSatoshi(0),
- lnwire.MilliSatoshi(0),
- lnwire.MilliSatoshi(0),
- },
- {
- lnwire.MilliSatoshi(0),
- lnwire.MilliSatoshi(1),
- lnwire.MilliSatoshi(999999),
- lnwire.MilliSatoshi(0),
- },
- {
- lnwire.MilliSatoshi(0),
- lnwire.MilliSatoshi(1),
- lnwire.MilliSatoshi(1000000),
- lnwire.MilliSatoshi(1),
- },
- {
- lnwire.MilliSatoshi(0),
- lnwire.MilliSatoshi(1),
- lnwire.MilliSatoshi(1000001),
- lnwire.MilliSatoshi(1),
- },
- {
- lnwire.MilliSatoshi(1),
- lnwire.MilliSatoshi(1),
- lnwire.MilliSatoshi(1000000),
- lnwire.MilliSatoshi(2),
- },
- }
-
- for _, test := range testCases {
- f := ForwardingPolicy{
- BaseFee: test.baseFee,
- FeeRate: test.feeRate,
- }
- fee := ExpectedFee(f, test.htlcAmt)
- if fee != test.expected {
- t.Errorf("expected fee to be (%v), instead got (%v)", test.expected,
- fee)
- }
- }
-}
-
-// TestForwardingAsymmetricTimeLockPolicies tests that each link is able to
-// properly handle forwarding HTLCs when their outgoing channels have
-// asymmetric policies w.r.t what they require for time locks.
-func TestForwardingAsymmetricTimeLockPolicies(t *testing.T) {
- t.Parallel()
-
- // First, we'll create our traditional three hop network. Bob
- // interacting with and asserting the state of two of the end points
- // for this test.
- channels, cleanUp, _, err := createClusterChannels(
- btcutil.UnitsPerCoin()*3,
- btcutil.UnitsPerCoin()*5,
- )
- if err != nil {
- t.Fatalf("unable to create channel: %v", err)
- }
- defer cleanUp()
-
- n := newThreeHopNetwork(
- t, channels.aliceToBob, channels.bobToAlice, channels.bobToCarol,
- channels.carolToBob, testStartingHeight,
- )
- if err := n.start(); err != nil {
- t.Fatalf("unable to start three hop network: %v", err)
- }
- defer n.stop()
-
- // Now that each of the links are up, we'll modify the link from Alice
- // -> Bob to have a greater time lock delta than that of the link of
- // Bob -> Carol.
- newPolicy := n.firstBobChannelLink.cfg.FwrdingPolicy
- newPolicy.TimeLockDelta = 7
- n.firstBobChannelLink.UpdateForwardingPolicy(newPolicy)
-
- // Now that the Alice -> Bob link has been updated, we'll craft and
- // send a payment from Alice -> Carol. This should succeed as normal,
- // even though Bob has asymmetric time lock policies.
- amount := lnwire.NewMSatFromSatoshis(btcutil.UnitsPerCoin())
- htlcAmt, totalTimelock, hops := generateHops(
- amount, testStartingHeight, n.firstBobChannelLink,
- n.carolChannelLink,
- )
-
- firstHop := n.firstBobChannelLink.ShortChanID()
- _, err = makePayment(
- n.aliceServer, n.carolServer, firstHop, hops, amount, htlcAmt,
- totalTimelock,
- ).Wait(30 * time.Second)
- if err != nil {
- t.Fatalf("unable to send payment: %v", err)
- }
-}
-
-// TestCheckHtlcForward tests that a link is properly enforcing the HTLC
-// forwarding policy.
-func TestCheckHtlcForward(t *testing.T) {
-
- fetchLastChannelUpdate := func(lnwire.ShortChannelID) (
- *lnwire.ChannelUpdate, er.R) {
-
- return &lnwire.ChannelUpdate{}, nil
- }
-
- testChannel, _, fCleanUp, err := createTestChannel(
- alicePrivKey, bobPrivKey, 100000, 100000,
- 1000, 1000, lnwire.ShortChannelID{},
- )
- if err != nil {
- t.Fatal(err)
- }
- defer fCleanUp()
-
- link := channelLink{
- cfg: ChannelLinkConfig{
- FwrdingPolicy: ForwardingPolicy{
- TimeLockDelta: 20,
- MinHTLCOut: 500,
- MaxHTLC: 1000,
- BaseFee: 10,
- },
- FetchLastChannelUpdate: fetchLastChannelUpdate,
- MaxOutgoingCltvExpiry: DefaultMaxOutgoingCltvExpiry,
- HtlcNotifier: &mockHTLCNotifier{},
- },
- channel: testChannel.channel,
- }
-
- var hash [32]byte
-
- t.Run("satisfied", func(t *testing.T) {
- result := link.CheckHtlcForward(hash, 1500, 1000,
- 200, 150, 0)
- if result != nil {
- t.Fatalf("expected policy to be satisfied")
- }
- })
-
- t.Run("below minhtlc", func(t *testing.T) {
- result := link.CheckHtlcForward(hash, 100, 50,
- 200, 150, 0)
- if _, ok := result.WireMessage().(*lnwire.FailAmountBelowMinimum); !ok {
- t.Fatalf("expected FailAmountBelowMinimum failure code")
- }
- })
-
- t.Run("above maxhtlc", func(t *testing.T) {
- result := link.CheckHtlcForward(hash, 1500, 1200,
- 200, 150, 0)
- if _, ok := result.WireMessage().(*lnwire.FailTemporaryChannelFailure); !ok {
- t.Fatalf("expected FailTemporaryChannelFailure failure code")
- }
- })
-
- t.Run("insufficient fee", func(t *testing.T) {
- result := link.CheckHtlcForward(hash, 1005, 1000,
- 200, 150, 0)
- if _, ok := result.WireMessage().(*lnwire.FailFeeInsufficient); !ok {
- t.Fatalf("expected FailFeeInsufficient failure code")
- }
- })
-
- t.Run("expiry too soon", func(t *testing.T) {
- result := link.CheckHtlcForward(hash, 1500, 1000,
- 200, 150, 190)
- if _, ok := result.WireMessage().(*lnwire.FailExpiryTooSoon); !ok {
- t.Fatalf("expected FailExpiryTooSoon failure code")
- }
- })
-
- t.Run("incorrect cltv expiry", func(t *testing.T) {
- result := link.CheckHtlcForward(hash, 1500, 1000,
- 200, 190, 0)
- if _, ok := result.WireMessage().(*lnwire.FailIncorrectCltvExpiry); !ok {
- t.Fatalf("expected FailIncorrectCltvExpiry failure code")
- }
-
- })
-
- t.Run("cltv expiry too far in the future", func(t *testing.T) {
- // Check that expiry isn't too far in the future.
- result := link.CheckHtlcForward(hash, 1500, 1000,
- 10200, 10100, 0)
- if _, ok := result.WireMessage().(*lnwire.FailExpiryTooFar); !ok {
- t.Fatalf("expected FailExpiryTooFar failure code")
- }
- })
-}
-
-// TestChannelLinkCanceledInvoice in this test checks the interaction
-// between Alice and Bob for a canceled invoice.
-func TestChannelLinkCanceledInvoice(t *testing.T) {
- t.Parallel()
-
- // Setup a alice-bob network.
- alice, bob, cleanUp, err := createTwoClusterChannels(
- btcutil.UnitsPerCoin()*3,
- btcutil.UnitsPerCoin()*5)
- if err != nil {
- t.Fatalf("unable to create channel: %v", err)
- }
- defer cleanUp()
-
- n := newTwoHopNetwork(t, alice.channel, bob.channel, testStartingHeight)
- if err := n.start(); err != nil {
- t.Fatal(err)
- }
- defer n.stop()
-
- // Prepare an alice -> bob payment.
- amount := lnwire.NewMSatFromSatoshis(btcutil.UnitsPerCoin())
- htlcAmt, totalTimelock, hops := generateHops(amount, testStartingHeight,
- n.bobChannelLink)
-
- firstHop := n.bobChannelLink.ShortChanID()
-
- invoice, payFunc, err := preparePayment(
- n.aliceServer, n.bobServer, firstHop, hops, amount, htlcAmt,
- totalTimelock,
- )
- if err != nil {
- t.Fatalf("unable to prepare the payment: %v", err)
- }
-
- // Cancel the invoice at bob's end.
- hash := invoice.Terms.PaymentPreimage.Hash()
- err = n.bobServer.registry.CancelInvoice(hash)
- if err != nil {
- t.Fatal(err)
- }
-
- // Have Alice fire the payment.
- err = waitForPayFuncResult(payFunc, 30*time.Second)
-
- // Because the invoice is canceled, we expect an unknown payment hash
- // result.
- errr := er.Wrapped(err)
- rtErr, ok := errr.(ClearTextError)
- if !ok {
- t.Fatalf("expected ClearTextError, but got %v", err)
- }
- _, ok = rtErr.WireMessage().(*lnwire.FailIncorrectDetails)
- if !ok {
- t.Fatalf("expected unknown payment hash, but got %v", err)
- }
-}
-
-type hodlInvoiceTestCtx struct {
- n *twoHopNetwork
- startBandwidthAlice lnwire.MilliSatoshi
- startBandwidthBob lnwire.MilliSatoshi
- hash lntypes.Hash
- preimage lntypes.Preimage
- amount lnwire.MilliSatoshi
- errChan chan er.R
-
- restoreBob func() (*lnwallet.LightningChannel, er.R)
-
- cleanUp func()
-}
-
-func newHodlInvoiceTestCtx(t *testing.T) (*hodlInvoiceTestCtx, er.R) {
- // Setup a alice-bob network.
- alice, bob, cleanUp, err := createTwoClusterChannels(
- btcutil.UnitsPerCoin()*3,
- btcutil.UnitsPerCoin()*5,
- )
- if err != nil {
- t.Fatalf("unable to create channel: %v", err)
- }
-
- n := newTwoHopNetwork(t, alice.channel, bob.channel, testStartingHeight)
- if err := n.start(); err != nil {
- t.Fatal(err)
- }
-
- aliceBandwidthBefore := n.aliceChannelLink.Bandwidth()
- bobBandwidthBefore := n.bobChannelLink.Bandwidth()
-
- debug := false
- if debug {
- // Log message that alice receives.
- n.aliceServer.intersect(
- createLogFunc("alice", n.aliceChannelLink.ChanID()),
- )
-
- // Log message that bob receives.
- n.bobServer.intersect(
- createLogFunc("bob", n.bobChannelLink.ChanID()),
- )
- }
-
- amount := lnwire.NewMSatFromSatoshis(btcutil.UnitsPerCoin())
- htlcAmt, totalTimelock, hops := generateHops(
- amount, testStartingHeight, n.bobChannelLink,
- )
-
- // Generate hold invoice preimage.
- r, err := generateRandomBytes(sha256.Size)
- if err != nil {
- t.Fatal(err)
- }
- preimage, err := lntypes.MakePreimage(r)
- if err != nil {
- t.Fatal(err)
- }
- hash := preimage.Hash()
-
- // Have alice pay the hodl invoice, wait for bob's commitment state to
- // be updated and the invoice state to be updated.
- receiver := n.bobServer
- receiver.registry.settleChan = make(chan lntypes.Hash)
- firstHop := n.bobChannelLink.ShortChanID()
- errChan := n.makeHoldPayment(
- n.aliceServer, receiver, firstHop, hops, amount, htlcAmt,
- totalTimelock, preimage,
- )
-
- select {
- case err := <-errChan:
- t.Fatalf("no payment result expected: %v", err)
- case <-time.After(5 * time.Second):
- t.Fatal("timeout")
- case h := <-receiver.registry.settleChan:
- if hash != h {
- t.Fatal("unexpect invoice settled")
- }
- }
-
- return &hodlInvoiceTestCtx{
- n: n,
- startBandwidthAlice: aliceBandwidthBefore,
- startBandwidthBob: bobBandwidthBefore,
- preimage: preimage,
- hash: hash,
- amount: amount,
- errChan: errChan,
- restoreBob: bob.restore,
-
- cleanUp: func() {
- cleanUp()
- n.stop()
- },
- }, nil
-}
-
-// TestChannelLinkHoldInvoiceSettle asserts that a hodl invoice can be settled.
-func TestChannelLinkHoldInvoiceSettle(t *testing.T) {
- t.Parallel()
-
- defer timeout(t)()
-
- ctx, err := newHodlInvoiceTestCtx(t)
- if err != nil {
- t.Fatal(err)
- }
- defer ctx.cleanUp()
-
- err = ctx.n.bobServer.registry.SettleHodlInvoice(ctx.preimage)
- if err != nil {
- t.Fatal(err)
- }
-
- // Wait for payment to succeed.
- err = <-ctx.errChan
- if err != nil {
- t.Fatal(err)
- }
-
- // Wait for Alice to receive the revocation. This is needed
- // because the settles are pipelined to the switch and otherwise
- // the bandwidth won't be updated by the time Alice receives a
- // response here.
- time.Sleep(2 * time.Second)
-
- if ctx.startBandwidthAlice-ctx.amount !=
- ctx.n.aliceChannelLink.Bandwidth() {
-
- t.Fatal("alice bandwidth should have decrease on payment " +
- "amount")
- }
-
- if ctx.startBandwidthBob+ctx.amount !=
- ctx.n.bobChannelLink.Bandwidth() {
-
- t.Fatalf("bob bandwidth isn't match: expected %v, got %v",
- ctx.startBandwidthBob+ctx.amount,
- ctx.n.bobChannelLink.Bandwidth())
- }
-}
-
-// TestChannelLinkHoldInvoiceSettle asserts that a hodl invoice can be canceled.
-func TestChannelLinkHoldInvoiceCancel(t *testing.T) {
- t.Parallel()
-
- defer timeout(t)()
-
- ctx, err := newHodlInvoiceTestCtx(t)
- if err != nil {
- t.Fatal(err)
- }
- defer ctx.cleanUp()
-
- err = ctx.n.bobServer.registry.CancelInvoice(ctx.hash)
- if err != nil {
- t.Fatal(err)
- }
-
- // Wait for payment to succeed.
- err = <-ctx.errChan
- assertFailureCode(t, err, lnwire.CodeIncorrectOrUnknownPaymentDetails)
-}
-
-// TestChannelLinkHoldInvoiceRestart asserts hodl htlcs are held after blocks
-// are mined and the link is restarted. The initial expiry checks should not
-// apply to hodl htlcs after restart.
-func TestChannelLinkHoldInvoiceRestart(t *testing.T) {
- t.Parallel()
-
- defer timeout(t)()
-
- chanAmt := btcutil.UnitsPerCoin() * 5
-
- // We'll start by creating a new link with our chanAmt (5 BTC). We will
- // only be testing Alice's behavior, so the reference to Bob's channel
- // state is unnecessary.
- aliceLink, bobChannel, _, start, cleanUp, restore, err :=
- newSingleLinkTestHarness(chanAmt, 0)
- if err != nil {
- t.Fatalf("unable to create link: %v", err)
- }
- defer cleanUp()
-
- alice := newPersistentLinkHarness(
- t, aliceLink, nil, restore,
- )
-
- if err := start(); err != nil {
- t.Fatalf("unable to start test harness: %v", err)
- }
-
- var (
- coreLink = alice.coreLink
- registry = coreLink.cfg.Registry.(*mockInvoiceRegistry)
- )
-
- registry.settleChan = make(chan lntypes.Hash)
-
- htlc, invoice := generateHtlcAndInvoice(t, 0)
-
- // Convert into a hodl invoice and save the preimage for later.
- preimage := invoice.Terms.PaymentPreimage
- invoice.Terms.PaymentPreimage = nil
- invoice.HodlInvoice = true
-
- // We must add the invoice to the registry, such that Alice
- // expects this payment.
- err = registry.AddInvoice(
- *invoice, htlc.PaymentHash,
- )
- if err != nil {
- t.Fatalf("unable to add invoice to registry: %v", err)
- }
-
- ctx := linkTestContext{
- t: t,
- aliceLink: alice.link,
- aliceMsgs: alice.msgs,
- bobChannel: bobChannel,
- }
-
- // Lock in htlc paying the hodl invoice.
- ctx.sendHtlcBobToAlice(htlc)
- ctx.sendCommitSigBobToAlice(1)
- ctx.receiveRevAndAckAliceToBob()
- ctx.receiveCommitSigAliceToBob(1)
- ctx.sendRevAndAckBobToAlice()
-
- // We expect a call to the invoice registry to notify the arrival of the
- // htlc.
- <-registry.settleChan
-
- // Increase block height. This height will be retrieved by the link
- // after restart.
- coreLink.cfg.Switch.bestHeight++
-
- // Restart link.
- alice.restart(false)
- ctx.aliceLink = alice.link
- ctx.aliceMsgs = alice.msgs
-
- // Expect htlc to be reprocessed.
- <-registry.settleChan
-
- // Settle the invoice with the preimage.
- err = registry.SettleHodlInvoice(*preimage)
- if err != nil {
- t.Fatalf("settle hodl invoice: %v", err)
- }
-
- // Expect alice to send a settle and commitsig message to bob.
- ctx.receiveSettleAliceToBob()
- ctx.receiveCommitSigAliceToBob(0)
-
- // Stop the link
- alice.link.Stop()
-
- // Check that no unexpected messages were sent.
- select {
- case msg := <-alice.msgs:
- t.Fatalf("did not expect message %T", msg)
- default:
- }
-}
-
-// TestChannelLinkRevocationWindowRegular asserts that htlcs paying to a regular
-// invoice are settled even if the revocation window gets exhausted.
-func TestChannelLinkRevocationWindowRegular(t *testing.T) {
- t.Parallel()
-
- chanAmt := btcutil.UnitsPerCoin() * 5
-
- // We'll start by creating a new link with our chanAmt (5 BTC). We will
- // only be testing Alice's behavior, so the reference to Bob's channel
- // state is unnecessary.
- aliceLink, bobChannel, _, start, cleanUp, _, err :=
- newSingleLinkTestHarness(chanAmt, 0)
- if err != nil {
- t.Fatalf("unable to create link: %v", err)
- }
- defer cleanUp()
-
- if err := start(); err != nil {
- t.Fatalf("unable to start test harness: %v", err)
- }
- defer aliceLink.Stop()
-
- var (
- coreLink = aliceLink.(*channelLink)
- registry = coreLink.cfg.Registry.(*mockInvoiceRegistry)
- aliceMsgs = coreLink.cfg.Peer.(*mockPeer).sentMsgs
- )
-
- ctx := linkTestContext{
- t: t,
- aliceLink: aliceLink,
- aliceMsgs: aliceMsgs,
- bobChannel: bobChannel,
- }
-
- registry.settleChan = make(chan lntypes.Hash)
-
- htlc1, invoice1 := generateHtlcAndInvoice(t, 0)
- htlc2, invoice2 := generateHtlcAndInvoice(t, 1)
-
- // We must add the invoice to the registry, such that Alice
- // expects this payment.
- err = registry.AddInvoice(*invoice1, htlc1.PaymentHash)
- if err != nil {
- t.Fatalf("unable to add invoice to registry: %v", err)
- }
- err = registry.AddInvoice(*invoice2, htlc2.PaymentHash)
- if err != nil {
- t.Fatalf("unable to add invoice to registry: %v", err)
- }
-
- // Lock in htlc 1 on both sides.
- ctx.sendHtlcBobToAlice(htlc1)
- ctx.sendCommitSigBobToAlice(1)
- ctx.receiveRevAndAckAliceToBob()
- ctx.receiveCommitSigAliceToBob(1)
- ctx.sendRevAndAckBobToAlice()
-
- // We expect a call to the invoice registry to notify the arrival of the
- // htlc.
- select {
- case <-registry.settleChan:
- case <-time.After(5 * time.Second):
- t.Fatal("expected invoice to be settled")
- }
-
- // Expect alice to send a settle and commitsig message to bob. Bob does
- // not yet send the revocation.
- ctx.receiveSettleAliceToBob()
- ctx.receiveCommitSigAliceToBob(0)
-
- // Pay invoice 2.
- ctx.sendHtlcBobToAlice(htlc2)
- ctx.sendCommitSigBobToAlice(2)
- ctx.receiveRevAndAckAliceToBob()
-
- // At this point, Alice cannot send a new commit sig to bob because the
- // revocation window is exhausted.
-
- // Bob sends revocation and signs commit with htlc1 settled.
- ctx.sendRevAndAckBobToAlice()
-
- // After the revocation, it is again possible for Alice to send a commit
- // sig with htlc2.
- ctx.receiveCommitSigAliceToBob(1)
-}
-
-// TestChannelLinkRevocationWindowHodl asserts that htlcs paying to a hodl
-// invoice are settled even if the revocation window gets exhausted.
-func TestChannelLinkRevocationWindowHodl(t *testing.T) {
- t.Parallel()
-
- chanAmt := btcutil.UnitsPerCoin() * 5
-
- // We'll start by creating a new link with our chanAmt (5 BTC). We will
- // only be testing Alice's behavior, so the reference to Bob's channel
- // state is unnecessary.
- aliceLink, bobChannel, batchTicker, start, cleanUp, _, err :=
- newSingleLinkTestHarness(chanAmt, 0)
- if err != nil {
- t.Fatalf("unable to create link: %v", err)
- }
- defer cleanUp()
-
- if err := start(); err != nil {
- t.Fatalf("unable to start test harness: %v", err)
- }
-
- var (
- coreLink = aliceLink.(*channelLink)
- registry = coreLink.cfg.Registry.(*mockInvoiceRegistry)
- aliceMsgs = coreLink.cfg.Peer.(*mockPeer).sentMsgs
- )
-
- registry.settleChan = make(chan lntypes.Hash)
-
- // Generate two invoice-htlc pairs.
- htlc1, invoice1 := generateHtlcAndInvoice(t, 0)
- htlc2, invoice2 := generateHtlcAndInvoice(t, 1)
-
- // Convert into hodl invoices and save the preimages for later.
- preimage1 := invoice1.Terms.PaymentPreimage
- invoice1.Terms.PaymentPreimage = nil
- invoice1.HodlInvoice = true
-
- preimage2 := invoice2.Terms.PaymentPreimage
- invoice2.Terms.PaymentPreimage = nil
- invoice2.HodlInvoice = true
-
- // We must add the invoices to the registry, such that Alice
- // expects the payments.
- err = registry.AddInvoice(*invoice1, htlc1.PaymentHash)
- if err != nil {
- t.Fatalf("unable to add invoice to registry: %v", err)
- }
- err = registry.AddInvoice(*invoice2, htlc2.PaymentHash)
- if err != nil {
- t.Fatalf("unable to add invoice to registry: %v", err)
- }
-
- ctx := linkTestContext{
- t: t,
- aliceLink: aliceLink,
- aliceMsgs: aliceMsgs,
- bobChannel: bobChannel,
- }
-
- // Lock in htlc 1 on both sides.
- ctx.sendHtlcBobToAlice(htlc1)
- ctx.sendCommitSigBobToAlice(1)
- ctx.receiveRevAndAckAliceToBob()
- ctx.receiveCommitSigAliceToBob(1)
- ctx.sendRevAndAckBobToAlice()
-
- // We expect a call to the invoice registry to notify the arrival of
- // htlc 1.
- select {
- case <-registry.settleChan:
- case <-time.After(15 * time.Second):
- t.Fatal("exit hop notification not received")
- }
-
- // Lock in htlc 2 on both sides.
- ctx.sendHtlcBobToAlice(htlc2)
- ctx.sendCommitSigBobToAlice(2)
- ctx.receiveRevAndAckAliceToBob()
- ctx.receiveCommitSigAliceToBob(2)
- ctx.sendRevAndAckBobToAlice()
-
- select {
- case <-registry.settleChan:
- case <-time.After(15 * time.Second):
- t.Fatal("exit hop notification not received")
- }
-
- // Settle invoice 1 with the preimage.
- err = registry.SettleHodlInvoice(*preimage1)
- if err != nil {
- t.Fatalf("settle hodl invoice: %v", err)
- }
-
- // Expect alice to send a settle and commitsig message to bob. Bob does
- // not yet send the revocation.
- ctx.receiveSettleAliceToBob()
- ctx.receiveCommitSigAliceToBob(1)
-
- // Settle invoice 2 with the preimage.
- err = registry.SettleHodlInvoice(*preimage2)
- if err != nil {
- t.Fatalf("settle hodl invoice: %v", err)
- }
-
- // Expect alice to send a settle for htlc 2.
- ctx.receiveSettleAliceToBob()
-
- // At this point, Alice cannot send a new commit sig to bob because the
- // revocation window is exhausted.
-
- // Sleep to let timer(s) expire.
- time.Sleep(time.Second)
-
- // We don't expect a commitSig from Alice.
- select {
- case msg := <-aliceMsgs:
- t.Fatalf("did not expect message %T", msg)
- default:
- }
-
- // Bob sends revocation and signs commit with htlc 1 settled.
- ctx.sendRevAndAckBobToAlice()
-
- // Allow some time for it to be processed by the link.
- time.Sleep(time.Second)
-
- // Trigger the batch timer as this may trigger Alice to send a commit
- // sig.
- batchTicker <- time.Time{}
-
- // After the revocation, it is again possible for Alice to send a commit
- // sig no more htlcs. Bob acks the update.
- ctx.receiveCommitSigAliceToBob(0)
- ctx.sendRevAndAckBobToAlice()
-
- // Bob updates his remote commit tx.
- ctx.sendCommitSigBobToAlice(0)
- ctx.receiveRevAndAckAliceToBob()
-
- // Stop the link
- aliceLink.Stop()
-
- // Check that no unexpected messages were sent.
- select {
- case msg := <-aliceMsgs:
- t.Fatalf("did not expect message %T", msg)
- default:
- }
-}
-
-// TestChannelLinkReceiveEmptySig tests the response of the link to receiving an
-// empty commit sig. This should be tolerated, but we shouldn't send out an
-// empty sig ourselves.
-func TestChannelLinkReceiveEmptySig(t *testing.T) {
- t.Parallel()
-
- chanAmt := btcutil.UnitsPerCoin() * 5
- chanReserve := btcutil.UnitsPerCoin() * 1
- aliceLink, bobChannel, batchTicker, start, cleanUp, _, err :=
- newSingleLinkTestHarness(chanAmt, chanReserve)
- if err != nil {
- t.Fatalf("unable to create link: %v", err)
- }
- defer cleanUp()
-
- if err := start(); err != nil {
- t.Fatalf("unable to start test harness: %v", err)
- }
-
- var (
- coreLink = aliceLink.(*channelLink)
- aliceMsgs = coreLink.cfg.Peer.(*mockPeer).sentMsgs
- )
-
- ctx := linkTestContext{
- t: t,
- aliceLink: aliceLink,
- aliceMsgs: aliceMsgs,
- bobChannel: bobChannel,
- }
-
- htlc, _ := generateHtlcAndInvoice(t, 0)
-
- // First, send an Add from Alice to Bob.
- ctx.sendHtlcAliceToBob(0, htlc)
- ctx.receiveHtlcAliceToBob()
-
- // Tick the batch ticker to trigger a commitsig from Alice->Bob.
- select {
- case batchTicker <- time.Now():
- case <-time.After(5 * time.Second):
- t.Fatalf("could not force commit sig")
- }
-
- // Make Bob send a CommitSig. Since Bob hasn't received Alice's sig, he
- // cannot add the htlc to his remote tx yet. The commit sig that we
- // force Bob to send will be empty. Note that this normally does not
- // happen, because the link (which is not present for Bob in this test)
- // check whether Bob actually owes a sig first.
- ctx.sendCommitSigBobToAlice(0)
-
- // Receive a CommitSig from Alice covering the htlc from above.
- ctx.receiveCommitSigAliceToBob(1)
-
- // Wait for RevokeAndAck Alice->Bob. Even though Bob sent an empty
- // commit sig, Alice still needs to revoke the previous commitment tx.
- ctx.receiveRevAndAckAliceToBob()
-
- // Send RevokeAndAck Bob->Alice to ack the added htlc.
- ctx.sendRevAndAckBobToAlice()
-
- // We received an empty commit sig, we accepted it, but there is nothing
- // new to sign for us.
-
- // No other messages are expected.
- ctx.assertNoMsgFromAlice(time.Second)
-
- // Stop the link
- aliceLink.Stop()
-}
-
-// TestPendingCommitTicker tests that a link will fail itself after a timeout if
-// the commitment dance stalls out.
-func TestPendingCommitTicker(t *testing.T) {
- t.Parallel()
-
- chanAmt := btcutil.UnitsPerCoin() * 5
- chanReserve := btcutil.UnitsPerCoin() * 1
- aliceLink, bobChannel, batchTicker, start, cleanUp, _, err :=
- newSingleLinkTestHarness(chanAmt, chanReserve)
- if err != nil {
- t.Fatalf("unable to create link: %v", err)
- }
-
- var (
- coreLink = aliceLink.(*channelLink)
- aliceMsgs = coreLink.cfg.Peer.(*mockPeer).sentMsgs
- )
-
- coreLink.cfg.PendingCommitTicker = ticker.NewForce(time.Millisecond)
-
- linkErrs := make(chan LinkFailureError)
- coreLink.cfg.OnChannelFailure = func(_ lnwire.ChannelID,
- _ lnwire.ShortChannelID, linkErr LinkFailureError) {
-
- linkErrs <- linkErr
- }
-
- if err := start(); err != nil {
- t.Fatalf("unable to start test harness: %v", err)
- }
- defer cleanUp()
-
- ctx := linkTestContext{
- t: t,
- aliceLink: aliceLink,
- bobChannel: bobChannel,
- aliceMsgs: aliceMsgs,
- }
-
- // Send an HTLC from Alice to Bob, and signal the batch ticker to signa
- // a commitment.
- htlc, _ := generateHtlcAndInvoice(t, 0)
- ctx.sendHtlcAliceToBob(0, htlc)
- ctx.receiveHtlcAliceToBob()
- batchTicker <- time.Now()
-
- select {
- case msg := <-aliceMsgs:
- if _, ok := msg.(*lnwire.CommitSig); !ok {
- t.Fatalf("expected CommitSig, got: %T", msg)
- }
- case <-time.After(time.Second):
- t.Fatalf("alice did not send commit sig")
- }
-
- // Check that Alice hasn't failed.
- select {
- case linkErr := <-linkErrs:
- t.Fatalf("link failed unexpectedly: %v", linkErr)
- case <-time.After(50 * time.Millisecond):
- }
-
- // Without completing the dance, send another HTLC from Alice to Bob.
- // Since the revocation window has been exhausted, we should see the
- // link fail itself immediately due to the low pending commit timeout.
- // In production this would be much longer, e.g. a minute.
- htlc, _ = generateHtlcAndInvoice(t, 1)
- ctx.sendHtlcAliceToBob(1, htlc)
- ctx.receiveHtlcAliceToBob()
- batchTicker <- time.Now()
-
- // Assert that we get the expected link failure from Alice.
- select {
- case linkErr := <-linkErrs:
- if linkErr.code != ErrRemoteUnresponsive {
- t.Fatalf("error code mismatch, "+
- "want: ErrRemoteUnresponsive, got: %v",
- linkErr.code)
- }
-
- case <-time.After(time.Second):
- t.Fatalf("did not receive failure")
- }
-}
-
-// assertFailureCode asserts that an error is of type ClearTextError and that
-// the failure code is as expected.
-func assertFailureCode(t *testing.T, err er.R, code lnwire.FailCode) {
- errr := er.Wrapped(err)
- rtErr, ok := errr.(ClearTextError)
- if !ok {
- t.Fatalf("expected ClearTextError but got %T", err)
- }
-
- if rtErr.WireMessage().Code() != code {
- t.Fatalf("expected %v but got %v",
- code, rtErr.WireMessage().Code())
- }
-}
-
-func TestMain(m *testing.M) {
- globalcfg.SelectConfig(globalcfg.BitcoinDefaults())
- os.Exit(m.Run())
-}
diff --git a/lnd/htlcswitch/linkfailure.go b/lnd/htlcswitch/linkfailure.go
deleted file mode 100644
index 5a2a02bd..00000000
--- a/lnd/htlcswitch/linkfailure.go
+++ /dev/null
@@ -1,120 +0,0 @@
-package htlcswitch
-
-var (
- // ErrLinkShuttingDown signals that the link is shutting down.
- ErrLinkShuttingDown = Err.CodeWithDetail("ErrLinkShuttingDown", "link shutting down")
-)
-
-// errorCode encodes the possible types of errors that will make us fail the
-// current link.
-type errorCode uint8
-
-const (
- // ErrInternalError indicates that something internal in the link
- // failed. In this case we will send a generic error to our peer.
- ErrInternalError errorCode = iota
-
- // ErrRemoteError indicates that our peer sent an error, prompting up
- // to fail the link.
- ErrRemoteError
-
- // ErrRemoteUnresponsive indicates that our peer took too long to
- // complete a commitment dance.
- ErrRemoteUnresponsive
-
- // ErrSyncError indicates that we failed synchronizing the state of the
- // channel with our peer.
- ErrSyncError
-
- // ErrInvalidUpdate indicates that the peer send us an invalid update.
- ErrInvalidUpdate
-
- // ErrInvalidCommitment indicates that the remote peer sent us an
- // invalid commitment signature.
- ErrInvalidCommitment
-
- // ErrInvalidRevocation indicates that the remote peer send us an
- // invalid revocation message.
- ErrInvalidRevocation
-
- // ErrRecoveryError the channel was unable to be resumed, we need the
- // remote party to force close the channel out on chain now as a
- // result.
- ErrRecoveryError
-)
-
-// LinkFailureError encapsulates an error that will make us fail the current
-// link. It contains the necessary information needed to determine if we should
-// force close the channel in the process, and if any error data should be sent
-// to the peer.
-type LinkFailureError struct {
- // code is the type of error this LinkFailureError encapsulates.
- code errorCode
-
- // ForceClose indicates whether we should force close the channel
- // because of this error.
- ForceClose bool
-
- // PermanentFailure indicates whether this failure is permanent, and
- // the channel should not be attempted loaded again.
- PermanentFailure bool
-
- // SendData is a byte slice that will be sent to the peer. If nil a
- // generic error will be sent.
- SendData []byte
-}
-
-// A compile time check to ensure LinkFailureError implements the error
-// interface.
-var _ error = (*LinkFailureError)(nil)
-
-// Error returns a generic error for the LinkFailureError.
-//
-// NOTE: Part of the error interface.
-func (e LinkFailureError) Error() string {
- switch e.code {
- case ErrInternalError:
- return "internal error"
- case ErrRemoteError:
- return "remote error"
- case ErrRemoteUnresponsive:
- return "remote unresponsive"
- case ErrSyncError:
- return "sync error"
- case ErrInvalidUpdate:
- return "invalid update"
- case ErrInvalidCommitment:
- return "invalid commitment"
- case ErrInvalidRevocation:
- return "invalid revocation"
- case ErrRecoveryError:
- return "unable to resume channel, recovery required"
- default:
- return "unknown error"
- }
-}
-
-// ShouldSendToPeer indicates whether we should send an error to the peer if
-// the link fails with this LinkFailureError.
-func (e LinkFailureError) ShouldSendToPeer() bool {
- switch e.code {
-
- // Since sending an error can lead some nodes to force close the
- // channel, create a whitelist of the failures we want to send so that
- // newly added error codes aren't automatically sent to the remote peer.
- case
- ErrInternalError,
- ErrRemoteError,
- ErrSyncError,
- ErrInvalidUpdate,
- ErrInvalidCommitment,
- ErrInvalidRevocation,
- ErrRecoveryError:
-
- return true
-
- // In all other cases we will not attempt to send our peer an error.
- default:
- return false
- }
-}
diff --git a/lnd/htlcswitch/mailbox.go b/lnd/htlcswitch/mailbox.go
deleted file mode 100644
index a3319d97..00000000
--- a/lnd/htlcswitch/mailbox.go
+++ /dev/null
@@ -1,904 +0,0 @@
-package htlcswitch
-
-import (
- "bytes"
- "container/list"
- "sync"
- "time"
-
- "github.com/pkt-cash/pktd/btcutil/er"
- "github.com/pkt-cash/pktd/lnd/clock"
- "github.com/pkt-cash/pktd/lnd/lnwire"
- "github.com/pkt-cash/pktd/pktlog/log"
-)
-
-var (
- // ErrMailBoxShuttingDown is returned when the mailbox is interrupted by
- // a shutdown request.
- ErrMailBoxShuttingDown = Err.CodeWithDetail("ErrMailBoxShuttingDown", "mailbox is shutting down")
-
- // ErrPacketAlreadyExists signals that an attempt to add a packet failed
- // because it already exists in the mailbox.
- ErrPacketAlreadyExists = Err.CodeWithDetail("ErrPacketAlreadyExists", "mailbox already has packet")
-)
-
-// MailBox is an interface which represents a concurrent-safe, in-order
-// delivery queue for messages from the network and also from the main switch.
-// This struct servers as a buffer between incoming messages, and messages to
-// the handled by the link. Each of the mutating methods within this interface
-// should be implemented in a non-blocking manner.
-type MailBox interface {
- // AddMessage appends a new message to the end of the message queue.
- AddMessage(msg lnwire.Message) er.R
-
- // AddPacket appends a new message to the end of the packet queue.
- AddPacket(pkt *htlcPacket) er.R
-
- // HasPacket queries the packets for a circuit key, this is used to drop
- // packets bound for the switch that already have a queued response.
- HasPacket(CircuitKey) bool
-
- // AckPacket removes a packet from the mailboxes in-memory replay
- // buffer. This will prevent a packet from being delivered after a link
- // restarts if the switch has remained online. The returned boolean
- // indicates whether or not a packet with the passed incoming circuit
- // key was removed.
- AckPacket(CircuitKey) bool
-
- // FailAdd fails an UpdateAddHTLC that exists within the mailbox,
- // removing it from the in-memory replay buffer. This will prevent the
- // packet from being delivered after the link restarts if the switch has
- // remained online. The generated LinkError will show an
- // OutgoingFailureDownstreamHtlcAdd FailureDetail.
- FailAdd(pkt *htlcPacket)
-
- // MessageOutBox returns a channel that any new messages ready for
- // delivery will be sent on.
- MessageOutBox() chan lnwire.Message
-
- // PacketOutBox returns a channel that any new packets ready for
- // delivery will be sent on.
- PacketOutBox() chan *htlcPacket
-
- // Clears any pending wire messages from the inbox.
- ResetMessages() er.R
-
- // Reset the packet head to point at the first element in the list.
- ResetPackets() er.R
-
- // Start starts the mailbox and any goroutines it needs to operate
- // properly.
- Start()
-
- // Stop signals the mailbox and its goroutines for a graceful shutdown.
- Stop()
-}
-
-type mailBoxConfig struct {
- // shortChanID is the short channel id of the channel this mailbox
- // belongs to.
- shortChanID lnwire.ShortChannelID
-
- // fetchUpdate retreives the most recent channel update for the channel
- // this mailbox belongs to.
- fetchUpdate func(lnwire.ShortChannelID) (*lnwire.ChannelUpdate, er.R)
-
- // forwardPackets send a varidic number of htlcPackets to the switch to
- // be routed. A quit channel should be provided so that the call can
- // properly exit during shutdown.
- forwardPackets func(chan struct{}, ...*htlcPacket) er.R
-
- // clock is a time source for the mailbox.
- clock clock.Clock
-
- // expiry is the interval after which Adds will be cancelled if they
- // have not been yet been delivered. The computed deadline will expiry
- // this long after the Adds are added via AddPacket.
- expiry time.Duration
-}
-
-// memoryMailBox is an implementation of the MailBox struct backed by purely
-// in-memory queues.
-type memoryMailBox struct {
- started sync.Once
- stopped sync.Once
-
- cfg *mailBoxConfig
-
- wireMessages *list.List
- wireMtx sync.Mutex
- wireCond *sync.Cond
-
- messageOutbox chan lnwire.Message
- msgReset chan chan struct{}
-
- // repPkts is a queue for reply packets, e.g. Settles and Fails.
- repPkts *list.List
- repIndex map[CircuitKey]*list.Element
- repHead *list.Element
-
- // addPkts is a dedicated queue for Adds.
- addPkts *list.List
- addIndex map[CircuitKey]*list.Element
- addHead *list.Element
-
- pktMtx sync.Mutex
- pktCond *sync.Cond
-
- pktOutbox chan *htlcPacket
- pktReset chan chan struct{}
-
- wireShutdown chan struct{}
- pktShutdown chan struct{}
- quit chan struct{}
-}
-
-// newMemoryMailBox creates a new instance of the memoryMailBox.
-func newMemoryMailBox(cfg *mailBoxConfig) *memoryMailBox {
- box := &memoryMailBox{
- cfg: cfg,
- wireMessages: list.New(),
- repPkts: list.New(),
- addPkts: list.New(),
- messageOutbox: make(chan lnwire.Message),
- pktOutbox: make(chan *htlcPacket),
- msgReset: make(chan chan struct{}, 1),
- pktReset: make(chan chan struct{}, 1),
- repIndex: make(map[CircuitKey]*list.Element),
- addIndex: make(map[CircuitKey]*list.Element),
- wireShutdown: make(chan struct{}),
- pktShutdown: make(chan struct{}),
- quit: make(chan struct{}),
- }
- box.wireCond = sync.NewCond(&box.wireMtx)
- box.pktCond = sync.NewCond(&box.pktMtx)
-
- return box
-}
-
-// A compile time assertion to ensure that memoryMailBox meets the MailBox
-// interface.
-var _ MailBox = (*memoryMailBox)(nil)
-
-// courierType is an enum that reflects the distinct types of messages a
-// MailBox can handle. Each type will be placed in an isolated mail box and
-// will have a dedicated goroutine for delivering the messages.
-type courierType uint8
-
-const (
- // wireCourier is a type of courier that handles wire messages.
- wireCourier courierType = iota
-
- // pktCourier is a type of courier that handles htlc packets.
- pktCourier
-)
-
-// Start starts the mailbox and any goroutines it needs to operate properly.
-//
-// NOTE: This method is part of the MailBox interface.
-func (m *memoryMailBox) Start() {
- m.started.Do(func() {
- go m.mailCourier(wireCourier)
- go m.mailCourier(pktCourier)
- })
-}
-
-// ResetMessages blocks until all buffered wire messages are cleared.
-func (m *memoryMailBox) ResetMessages() er.R {
- msgDone := make(chan struct{})
- select {
- case m.msgReset <- msgDone:
- return m.signalUntilReset(wireCourier, msgDone)
- case <-m.quit:
- return ErrMailBoxShuttingDown.Default()
- }
-}
-
-// ResetPackets blocks until the head of packets buffer is reset, causing the
-// packets to be redelivered in order.
-func (m *memoryMailBox) ResetPackets() er.R {
- pktDone := make(chan struct{})
- select {
- case m.pktReset <- pktDone:
- return m.signalUntilReset(pktCourier, pktDone)
- case <-m.quit:
- return ErrMailBoxShuttingDown.Default()
- }
-}
-
-// signalUntilReset strobes the condition variable for the specified inbox type
-// until receiving a response that the mailbox has processed a reset.
-func (m *memoryMailBox) signalUntilReset(cType courierType,
- done chan struct{}) er.R {
-
- for {
-
- switch cType {
- case wireCourier:
- m.wireCond.Signal()
- case pktCourier:
- m.pktCond.Signal()
- }
-
- select {
- case <-time.After(time.Millisecond):
- continue
- case <-done:
- return nil
- case <-m.quit:
- return ErrMailBoxShuttingDown.Default()
- }
- }
-}
-
-// AckPacket removes the packet identified by it's incoming circuit key from the
-// queue of packets to be delivered. The returned boolean indicates whether or
-// not a packet with the passed incoming circuit key was removed.
-//
-// NOTE: It is safe to call this method multiple times for the same circuit key.
-func (m *memoryMailBox) AckPacket(inKey CircuitKey) bool {
- m.pktCond.L.Lock()
- defer m.pktCond.L.Unlock()
-
- if entry, ok := m.repIndex[inKey]; ok {
- // Check whether we are removing the head of the queue. If so,
- // we must advance the head to the next packet before removing.
- // It's possible that the courier has already advanced the
- // repHead, so this check prevents the repHead from getting
- // desynchronized.
- if entry == m.repHead {
- m.repHead = entry.Next()
- }
- m.repPkts.Remove(entry)
- delete(m.repIndex, inKey)
-
- return true
- }
-
- if entry, ok := m.addIndex[inKey]; ok {
- // Check whether we are removing the head of the queue. If so,
- // we must advance the head to the next add before removing.
- // It's possible that the courier has already advanced the
- // addHead, so this check prevents the addHead from getting
- // desynchronized.
- //
- // NOTE: While this event is rare for Settles or Fails, it could
- // be very common for Adds since the mailbox has the ability to
- // cancel Adds before they are delivered. When that occurs, the
- // head of addPkts has only been peeked and we expect to be
- // removing the head of the queue.
- if entry == m.addHead {
- m.addHead = entry.Next()
- }
-
- m.addPkts.Remove(entry)
- delete(m.addIndex, inKey)
-
- return true
- }
-
- return false
-}
-
-// HasPacket queries the packets for a circuit key, this is used to drop packets
-// bound for the switch that already have a queued response.
-func (m *memoryMailBox) HasPacket(inKey CircuitKey) bool {
- m.pktCond.L.Lock()
- _, ok := m.repIndex[inKey]
- m.pktCond.L.Unlock()
-
- return ok
-}
-
-// Stop signals the mailbox and its goroutines for a graceful shutdown.
-//
-// NOTE: This method is part of the MailBox interface.
-func (m *memoryMailBox) Stop() {
- m.stopped.Do(func() {
- close(m.quit)
-
- m.signalUntilShutdown(wireCourier)
- m.signalUntilShutdown(pktCourier)
- })
-}
-
-// signalUntilShutdown strobes the condition variable of the passed courier
-// type, blocking until the worker has exited.
-func (m *memoryMailBox) signalUntilShutdown(cType courierType) {
- var (
- cond *sync.Cond
- shutdown chan struct{}
- )
-
- switch cType {
- case wireCourier:
- cond = m.wireCond
- shutdown = m.wireShutdown
- case pktCourier:
- cond = m.pktCond
- shutdown = m.pktShutdown
- }
-
- for {
- select {
- case <-time.After(time.Millisecond):
- cond.Signal()
- case <-shutdown:
- return
- }
- }
-}
-
-// pktWithExpiry wraps an incoming packet and records the time at which it it
-// should be canceled from the mailbox. This will be used to detect if it gets
-// stuck in the mailbox and inform when to cancel back.
-type pktWithExpiry struct {
- pkt *htlcPacket
- expiry time.Time
-}
-
-func (p *pktWithExpiry) deadline(clock clock.Clock) <-chan time.Time {
- return clock.TickAfter(p.expiry.Sub(clock.Now()))
-}
-
-// mailCourier is a dedicated goroutine whose job is to reliably deliver
-// messages of a particular type. There are two types of couriers: wire
-// couriers, and mail couriers. Depending on the passed courierType, this
-// goroutine will assume one of two roles.
-func (m *memoryMailBox) mailCourier(cType courierType) {
- switch cType {
- case wireCourier:
- defer close(m.wireShutdown)
- case pktCourier:
- defer close(m.pktShutdown)
- }
-
- // TODO(roasbeef): refactor...
-
- for {
- // First, we'll check our condition. If our target mailbox is
- // empty, then we'll wait until a new item is added.
- switch cType {
- case wireCourier:
- m.wireCond.L.Lock()
- for m.wireMessages.Front() == nil {
- m.wireCond.Wait()
-
- select {
- case msgDone := <-m.msgReset:
- m.wireMessages.Init()
-
- close(msgDone)
- case <-m.quit:
- m.wireCond.L.Unlock()
- return
- default:
- }
- }
-
- case pktCourier:
- m.pktCond.L.Lock()
- for m.repHead == nil && m.addHead == nil {
- m.pktCond.Wait()
-
- select {
- // Resetting the packet queue means just moving
- // our pointer to the front. This ensures that
- // any un-ACK'd messages are re-delivered upon
- // reconnect.
- case pktDone := <-m.pktReset:
- m.repHead = m.repPkts.Front()
- m.addHead = m.addPkts.Front()
-
- close(pktDone)
-
- case <-m.quit:
- m.pktCond.L.Unlock()
- return
- default:
- }
- }
- }
-
- var (
- nextRep *htlcPacket
- nextRepEl *list.Element
- nextAdd *pktWithExpiry
- nextAddEl *list.Element
- nextMsg lnwire.Message
- )
- switch cType {
- // Grab the datum off the front of the queue, shifting the
- // slice's reference down one in order to remove the datum from
- // the queue.
- case wireCourier:
- entry := m.wireMessages.Front()
- nextMsg = m.wireMessages.Remove(entry).(lnwire.Message)
-
- // For packets, we actually never remove an item until it has
- // been ACK'd by the link. This ensures that if a read packet
- // doesn't make it into a commitment, then it'll be
- // re-delivered once the link comes back online.
- case pktCourier:
- // Peek at the head of the Settle/Fails and Add queues.
- // We peak both even if there is a Settle/Fail present
- // because we need to set a deadline for the next
- // pending Add if it's present. Due to clock
- // monotonicity, we know that the head of the Adds is
- // the next to expire.
- if m.repHead != nil {
- nextRep = m.repHead.Value.(*htlcPacket)
- nextRepEl = m.repHead
- }
- if m.addHead != nil {
- nextAdd = m.addHead.Value.(*pktWithExpiry)
- nextAddEl = m.addHead
- }
- }
-
- // Now that we're done with the condition, we can unlock it to
- // allow any callers to append to the end of our target queue.
- switch cType {
- case wireCourier:
- m.wireCond.L.Unlock()
- case pktCourier:
- m.pktCond.L.Unlock()
- }
-
- // With the next message obtained, we'll now select to attempt
- // to deliver the message. If we receive a kill signal, then
- // we'll bail out.
- switch cType {
- case wireCourier:
- select {
- case m.messageOutbox <- nextMsg:
- case msgDone := <-m.msgReset:
- m.wireCond.L.Lock()
- m.wireMessages.Init()
- m.wireCond.L.Unlock()
-
- close(msgDone)
- case <-m.quit:
- return
- }
-
- case pktCourier:
- var (
- pktOutbox chan *htlcPacket
- addOutbox chan *htlcPacket
- add *htlcPacket
- deadline <-chan time.Time
- )
-
- // Prioritize delivery of Settle/Fail packets over Adds.
- // This ensures that we actively clear the commitment of
- // existing HTLCs before trying to add new ones. This
- // can help to improve forwarding performance since the
- // time to sign a commitment is linear in the number of
- // HTLCs manifested on the commitments.
- //
- // NOTE: Both types are eventually delivered over the
- // same channel, but we can control which is delivered
- // by exclusively making one nil and the other non-nil.
- // We know from our loop condition that at least one
- // nextRep and nextAdd are non-nil.
- if nextRep != nil {
- pktOutbox = m.pktOutbox
- } else {
- addOutbox = m.pktOutbox
- }
-
- // If we have a pending Add, we'll also construct the
- // deadline so we can fail it back if we are unable to
- // deliver any message in time. We also dereference the
- // nextAdd's packet, since we will need access to it in
- // the case we are delivering it and/or if the deadline
- // expires.
- //
- // NOTE: It's possible after this point for add to be
- // nil, but this can only occur when addOutbox is also
- // nil, hence we won't accidentally deliver a nil
- // packet.
- if nextAdd != nil {
- add = nextAdd.pkt
- deadline = nextAdd.deadline(m.cfg.clock)
- }
-
- select {
- case pktOutbox <- nextRep:
- m.pktCond.L.Lock()
- // Only advance the repHead if this Settle or
- // Fail is still at the head of the queue.
- if m.repHead != nil && m.repHead == nextRepEl {
- m.repHead = m.repHead.Next()
- }
- m.pktCond.L.Unlock()
-
- case addOutbox <- add:
- m.pktCond.L.Lock()
- // Only advance the addHead if this Add is still
- // at the head of the queue.
- if m.addHead != nil && m.addHead == nextAddEl {
- m.addHead = m.addHead.Next()
- }
- m.pktCond.L.Unlock()
-
- case <-deadline:
- m.FailAdd(add)
-
- case pktDone := <-m.pktReset:
- m.pktCond.L.Lock()
- m.repHead = m.repPkts.Front()
- m.addHead = m.addPkts.Front()
- m.pktCond.L.Unlock()
-
- close(pktDone)
-
- case <-m.quit:
- return
- }
- }
-
- }
-}
-
-// AddMessage appends a new message to the end of the message queue.
-//
-// NOTE: This method is safe for concrete use and part of the MailBox
-// interface.
-func (m *memoryMailBox) AddMessage(msg lnwire.Message) er.R {
- // First, we'll lock the condition, and add the message to the end of
- // the wire message inbox.
- m.wireCond.L.Lock()
- m.wireMessages.PushBack(msg)
- m.wireCond.L.Unlock()
-
- // With the message added, we signal to the mailCourier that there are
- // additional messages to deliver.
- m.wireCond.Signal()
-
- return nil
-}
-
-// AddPacket appends a new message to the end of the packet queue.
-//
-// NOTE: This method is safe for concrete use and part of the MailBox
-// interface.
-func (m *memoryMailBox) AddPacket(pkt *htlcPacket) er.R {
- m.pktCond.L.Lock()
- switch htlc := pkt.htlc.(type) {
-
- // Split off Settle/Fail packets into the repPkts queue.
- case *lnwire.UpdateFulfillHTLC, *lnwire.UpdateFailHTLC:
- if _, ok := m.repIndex[pkt.inKey()]; ok {
- m.pktCond.L.Unlock()
- return ErrPacketAlreadyExists.Default()
- }
-
- entry := m.repPkts.PushBack(pkt)
- m.repIndex[pkt.inKey()] = entry
- if m.repHead == nil {
- m.repHead = entry
- }
-
- // Split off Add packets into the addPkts queue.
- case *lnwire.UpdateAddHTLC:
- if _, ok := m.addIndex[pkt.inKey()]; ok {
- m.pktCond.L.Unlock()
- return ErrPacketAlreadyExists.Default()
- }
-
- entry := m.addPkts.PushBack(&pktWithExpiry{
- pkt: pkt,
- expiry: m.cfg.clock.Now().Add(m.cfg.expiry),
- })
- m.addIndex[pkt.inKey()] = entry
- if m.addHead == nil {
- m.addHead = entry
- }
-
- default:
- m.pktCond.L.Unlock()
- return er.Errorf("unknown htlc type: %T", htlc)
- }
- m.pktCond.L.Unlock()
-
- // With the packet added, we signal to the mailCourier that there are
- // additional packets to consume.
- m.pktCond.Signal()
-
- return nil
-}
-
-// FailAdd fails an UpdateAddHTLC that exists within the mailbox, removing it
-// from the in-memory replay buffer. This will prevent the packet from being
-// delivered after the link restarts if the switch has remained online. The
-// generated LinkError will show an OutgoingFailureDownstreamHtlcAdd
-// FailureDetail.
-func (m *memoryMailBox) FailAdd(pkt *htlcPacket) {
- // First, remove the packet from mailbox. If we didn't find the packet
- // because it has already been acked, we'll exit early to avoid sending
- // a duplicate fail message through the switch.
- if !m.AckPacket(pkt.inKey()) {
- return
- }
-
- var (
- localFailure = false
- reason lnwire.OpaqueReason
- )
-
- // Create a temporary channel failure which we will send back to our
- // peer if this is a forward, or report to the user if the failed
- // payment was locally initiated.
- var failure lnwire.FailureMessage
- update, err := m.cfg.fetchUpdate(m.cfg.shortChanID)
- if err != nil {
- failure = &lnwire.FailTemporaryNodeFailure{}
- } else {
- failure = lnwire.NewTemporaryChannelFailure(update)
- }
-
- // If the payment was locally initiated (which is indicated by a nil
- // obfuscator), we do not need to encrypt it back to the sender.
- if pkt.obfuscator == nil {
- var b bytes.Buffer
- err := lnwire.EncodeFailure(&b, failure, 0)
- if err != nil {
- log.Errorf("Unable to encode failure: %v", err)
- return
- }
- reason = lnwire.OpaqueReason(b.Bytes())
- localFailure = true
- } else {
- // If the packet is part of a forward, (identified by a non-nil
- // obfuscator) we need to encrypt the error back to the source.
- var err er.R
- reason, err = pkt.obfuscator.EncryptFirstHop(failure)
- if err != nil {
- log.Errorf("Unable to obfuscate error: %v", err)
- return
- }
- }
-
- // Create a link error containing the temporary channel failure and a
- // detail which indicates the we failed to add the htlc.
- linkError := NewDetailedLinkError(
- failure, OutgoingFailureDownstreamHtlcAdd,
- )
-
- failPkt := &htlcPacket{
- incomingChanID: pkt.incomingChanID,
- incomingHTLCID: pkt.incomingHTLCID,
- circuit: pkt.circuit,
- sourceRef: pkt.sourceRef,
- hasSource: true,
- localFailure: localFailure,
- linkFailure: linkError,
- htlc: &lnwire.UpdateFailHTLC{
- Reason: reason,
- },
- }
-
- if err := m.cfg.forwardPackets(m.quit, failPkt); err != nil {
- log.Errorf("Unhandled error while reforwarding packets "+
- "settle/fail over htlcswitch: %v", err)
- }
-}
-
-// MessageOutBox returns a channel that any new messages ready for delivery
-// will be sent on.
-//
-// NOTE: This method is part of the MailBox interface.
-func (m *memoryMailBox) MessageOutBox() chan lnwire.Message {
- return m.messageOutbox
-}
-
-// PacketOutBox returns a channel that any new packets ready for delivery will
-// be sent on.
-//
-// NOTE: This method is part of the MailBox interface.
-func (m *memoryMailBox) PacketOutBox() chan *htlcPacket {
- return m.pktOutbox
-}
-
-// mailOrchestrator is responsible for coordinating the creation and lifecycle
-// of mailboxes used within the switch. It supports the ability to create
-// mailboxes, reassign their short channel id's, deliver htlc packets, and
-// queue packets for mailboxes that have not been created due to a link's late
-// registration.
-type mailOrchestrator struct {
- mu sync.RWMutex
-
- cfg *mailOrchConfig
-
- // mailboxes caches exactly one mailbox for all known channels.
- mailboxes map[lnwire.ChannelID]MailBox
-
- // liveIndex maps a live short chan id to the primary mailbox key.
- // An index in liveIndex map is only entered under two conditions:
- // 1. A link has a non-zero short channel id at time of AddLink.
- // 2. A link receives a non-zero short channel via UpdateShortChanID.
- liveIndex map[lnwire.ShortChannelID]lnwire.ChannelID
-
- // TODO(conner): add another pair of indexes:
- // chan_id -> short_chan_id
- // short_chan_id -> mailbox
- // so that Deliver can lookup mailbox directly once live,
- // but still queriable by channel_id.
-
- // unclaimedPackets maps a live short chan id to queue of packets if no
- // mailbox has been created.
- unclaimedPackets map[lnwire.ShortChannelID][]*htlcPacket
-}
-
-type mailOrchConfig struct {
- // forwardPackets send a varidic number of htlcPackets to the switch to
- // be routed. A quit channel should be provided so that the call can
- // properly exit during shutdown.
- forwardPackets func(chan struct{}, ...*htlcPacket) er.R
-
- // fetchUpdate retreives the most recent channel update for the channel
- // this mailbox belongs to.
- fetchUpdate func(lnwire.ShortChannelID) (*lnwire.ChannelUpdate, er.R)
-
- // clock is a time source for the generated mailboxes.
- clock clock.Clock
-
- // expiry is the interval after which Adds will be cancelled if they
- // have not been yet been delivered. The computed deadline will expiry
- // this long after the Adds are added to a mailbox via AddPacket.
- expiry time.Duration
-}
-
-// newMailOrchestrator initializes a fresh mailOrchestrator.
-func newMailOrchestrator(cfg *mailOrchConfig) *mailOrchestrator {
- return &mailOrchestrator{
- cfg: cfg,
- mailboxes: make(map[lnwire.ChannelID]MailBox),
- liveIndex: make(map[lnwire.ShortChannelID]lnwire.ChannelID),
- unclaimedPackets: make(map[lnwire.ShortChannelID][]*htlcPacket),
- }
-}
-
-// Stop instructs the orchestrator to stop all active mailboxes.
-func (mo *mailOrchestrator) Stop() {
- for _, mailbox := range mo.mailboxes {
- mailbox.Stop()
- }
-}
-
-// GetOrCreateMailBox returns an existing mailbox belonging to `chanID`, or
-// creates and returns a new mailbox if none is found.
-func (mo *mailOrchestrator) GetOrCreateMailBox(chanID lnwire.ChannelID,
- shortChanID lnwire.ShortChannelID) MailBox {
-
- // First, try lookup the mailbox directly using only the shared mutex.
- mo.mu.RLock()
- mailbox, ok := mo.mailboxes[chanID]
- if ok {
- mo.mu.RUnlock()
- return mailbox
- }
- mo.mu.RUnlock()
-
- // Otherwise, we will try again with exclusive lock, creating a mailbox
- // if one still has not been created.
- mo.mu.Lock()
- mailbox = mo.exclusiveGetOrCreateMailBox(chanID, shortChanID)
- mo.mu.Unlock()
-
- return mailbox
-}
-
-// exclusiveGetOrCreateMailBox checks for the existence of a mailbox for the
-// given channel id. If none is found, a new one is creates, started, and
-// recorded.
-//
-// NOTE: This method MUST be invoked with the mailOrchestrator's exclusive lock.
-func (mo *mailOrchestrator) exclusiveGetOrCreateMailBox(
- chanID lnwire.ChannelID, shortChanID lnwire.ShortChannelID) MailBox {
-
- mailbox, ok := mo.mailboxes[chanID]
- if !ok {
- mailbox = newMemoryMailBox(&mailBoxConfig{
- shortChanID: shortChanID,
- fetchUpdate: mo.cfg.fetchUpdate,
- forwardPackets: mo.cfg.forwardPackets,
- clock: mo.cfg.clock,
- expiry: mo.cfg.expiry,
- })
- mailbox.Start()
- mo.mailboxes[chanID] = mailbox
- }
-
- return mailbox
-}
-
-// BindLiveShortChanID registers that messages bound for a particular short
-// channel id should be forwarded to the mailbox corresponding to the given
-// channel id. This method also checks to see if there are any unclaimed
-// packets for this short_chan_id. If any are found, they are delivered to the
-// mailbox and removed (marked as claimed).
-func (mo *mailOrchestrator) BindLiveShortChanID(mailbox MailBox,
- cid lnwire.ChannelID, sid lnwire.ShortChannelID) {
-
- mo.mu.Lock()
- // Update the mapping from short channel id to mailbox's channel id.
- mo.liveIndex[sid] = cid
-
- // Retrieve any unclaimed packets destined for this mailbox.
- pkts := mo.unclaimedPackets[sid]
- delete(mo.unclaimedPackets, sid)
- mo.mu.Unlock()
-
- // Deliver the unclaimed packets.
- for _, pkt := range pkts {
- mailbox.AddPacket(pkt)
- }
-}
-
-// Deliver lookups the target mailbox using the live index from short_chan_id
-// to channel_id. If the mailbox is found, the message is delivered directly.
-// Otherwise the packet is recorded as unclaimed, and will be delivered to the
-// mailbox upon the subsequent call to BindLiveShortChanID.
-func (mo *mailOrchestrator) Deliver(
- sid lnwire.ShortChannelID, pkt *htlcPacket) er.R {
-
- var (
- mailbox MailBox
- found bool
- )
-
- // First, try to find the channel id for the target short_chan_id. If
- // the link is live, we will also look up the created mailbox.
- mo.mu.RLock()
- chanID, isLive := mo.liveIndex[sid]
- if isLive {
- mailbox, found = mo.mailboxes[chanID]
- }
- mo.mu.RUnlock()
-
- // The link is live and target mailbox was found, deliver immediately.
- if isLive && found {
- return mailbox.AddPacket(pkt)
- }
-
- // If we detected that the link has not been made live, we will acquire
- // the exclusive lock preemptively in order to queue this packet in the
- // list of unclaimed packets.
- mo.mu.Lock()
-
- // Double check to see if the mailbox has been not made live since the
- // release of the shared lock.
- //
- // NOTE: Checking again with the exclusive lock held prevents a race
- // condition where BindLiveShortChanID is interleaved between the
- // release of the shared lock, and acquiring the exclusive lock. The
- // result would be stuck packets, as they wouldn't be redelivered until
- // the next call to BindLiveShortChanID, which is expected to occur
- // infrequently.
- chanID, isLive = mo.liveIndex[sid]
- if isLive {
- // Reaching this point indicates the mailbox is actually live.
- // We'll try to load the mailbox using the fresh channel id.
- //
- // NOTE: This should never create a new mailbox, as the live
- // index should only be set if the mailbox had been initialized
- // beforehand. However, this does ensure that this case is
- // handled properly in the event that it could happen.
- mailbox = mo.exclusiveGetOrCreateMailBox(chanID, sid)
- mo.mu.Unlock()
-
- // Deliver the packet to the mailbox if it was found or created.
- return mailbox.AddPacket(pkt)
- }
-
- // Finally, if the channel id is still not found in the live index,
- // we'll add this to the list of unclaimed packets. These will be
- // delivered upon the next call to BindLiveShortChanID.
- mo.unclaimedPackets[sid] = append(mo.unclaimedPackets[sid], pkt)
- mo.mu.Unlock()
-
- return nil
-}
diff --git a/lnd/htlcswitch/mailbox_test.go b/lnd/htlcswitch/mailbox_test.go
deleted file mode 100644
index 646de57a..00000000
--- a/lnd/htlcswitch/mailbox_test.go
+++ /dev/null
@@ -1,675 +0,0 @@
-package htlcswitch
-
-import (
- prand "math/rand"
- "reflect"
- "testing"
- "time"
-
- "github.com/davecgh/go-spew/spew"
- "github.com/pkt-cash/pktd/btcutil/er"
- "github.com/pkt-cash/pktd/lnd/clock"
- "github.com/pkt-cash/pktd/lnd/lnwire"
-)
-
-const testExpiry = time.Minute
-
-// TestMailBoxCouriers tests that both aspects of the mailBox struct works
-// properly. Both packets and messages should be able to added to each
-// respective mailbox concurrently, and also messages/packets should also be
-// able to be received concurrently.
-func TestMailBoxCouriers(t *testing.T) {
- t.Parallel()
-
- // First, we'll create new instance of the current default mailbox
- // type.
- ctx := newMailboxContext(t, time.Now(), testExpiry)
- defer ctx.mailbox.Stop()
-
- // We'll be adding 10 message of both types to the mailbox.
- const numPackets = 10
- const halfPackets = numPackets / 2
-
- // We'll add a set of random packets to the mailbox.
- sentPackets := make([]*htlcPacket, numPackets)
- for i := 0; i < numPackets; i++ {
- pkt := &htlcPacket{
- outgoingChanID: lnwire.NewShortChanIDFromInt(uint64(prand.Int63())),
- incomingChanID: lnwire.NewShortChanIDFromInt(uint64(prand.Int63())),
- amount: lnwire.MilliSatoshi(prand.Int63()),
- htlc: &lnwire.UpdateAddHTLC{
- ID: uint64(i),
- },
- }
- sentPackets[i] = pkt
-
- err := ctx.mailbox.AddPacket(pkt)
- if err != nil {
- t.Fatalf("unable to add packet: %v", err)
- }
- }
-
- // Next, we'll do the same, but this time adding wire messages.
- sentMessages := make([]lnwire.Message, numPackets)
- for i := 0; i < numPackets; i++ {
- msg := &lnwire.UpdateAddHTLC{
- ID: uint64(prand.Int63()),
- Amount: lnwire.MilliSatoshi(prand.Int63()),
- }
- sentMessages[i] = msg
-
- err := ctx.mailbox.AddMessage(msg)
- if err != nil {
- t.Fatalf("unable to add message: %v", err)
- }
- }
-
- // Now we'll attempt to read back the packets/messages we added to the
- // mailbox. We'll alternative reading from the message outbox vs the
- // packet outbox to ensure that they work concurrently properly.
- recvdPackets := make([]*htlcPacket, 0, numPackets)
- recvdMessages := make([]lnwire.Message, 0, numPackets)
- for i := 0; i < numPackets*2; i++ {
- timeout := time.After(time.Second * 5)
- if i%2 == 0 {
- select {
- case <-timeout:
- t.Fatalf("didn't recv pkt after timeout")
- case pkt := <-ctx.mailbox.PacketOutBox():
- recvdPackets = append(recvdPackets, pkt)
- }
- } else {
- select {
- case <-timeout:
- t.Fatalf("didn't recv message after timeout")
- case msg := <-ctx.mailbox.MessageOutBox():
- recvdMessages = append(recvdMessages, msg)
- }
- }
- }
-
- // The number of messages/packets we sent, and the number we received
- // should match exactly.
- if len(sentPackets) != len(recvdPackets) {
- t.Fatalf("expected %v packets instead got %v", len(sentPackets),
- len(recvdPackets))
- }
- if len(sentMessages) != len(recvdMessages) {
- t.Fatalf("expected %v messages instead got %v", len(sentMessages),
- len(recvdMessages))
- }
-
- // Additionally, the set of packets should match exactly, as we should
- // have received the packets in the exact same ordering that we added.
- if !reflect.DeepEqual(sentPackets, recvdPackets) {
- t.Fatalf("recvd packets mismatched: expected %v, got %v",
- spew.Sdump(sentPackets), spew.Sdump(recvdPackets))
- }
- if !reflect.DeepEqual(recvdMessages, recvdMessages) {
- t.Fatalf("recvd messages mismatched: expected %v, got %v",
- spew.Sdump(sentMessages), spew.Sdump(recvdMessages))
- }
-
- // Now that we've received all of the intended msgs/pkts, ack back half
- // of the packets.
- for _, recvdPkt := range recvdPackets[:halfPackets] {
- ctx.mailbox.AckPacket(recvdPkt.inKey())
- }
-
- // With the packets drained and partially acked, we reset the mailbox,
- // simulating a link shutting down and then coming back up.
- err := ctx.mailbox.ResetMessages()
- if err != nil {
- t.Fatalf("unable to reset messages: %v", err)
- }
- err = ctx.mailbox.ResetPackets()
- if err != nil {
- t.Fatalf("unable to reset packets: %v", err)
- }
-
- // Now, we'll use the same alternating strategy to read from our
- // mailbox. All wire messages are dropped on startup, but any unacked
- // packets will be replayed in the same order they were delivered
- // initially.
- recvdPackets2 := make([]*htlcPacket, 0, halfPackets)
- for i := 0; i < 2*halfPackets; i++ {
- timeout := time.After(time.Second * 5)
- if i%2 == 0 {
- select {
- case <-timeout:
- t.Fatalf("didn't recv pkt after timeout")
- case pkt := <-ctx.mailbox.PacketOutBox():
- recvdPackets2 = append(recvdPackets2, pkt)
- }
- } else {
- select {
- case <-ctx.mailbox.MessageOutBox():
- t.Fatalf("should not receive wire msg after reset")
- default:
- }
- }
- }
-
- // The number of packets we received should match the number of unacked
- // packets left in the mailbox.
- if halfPackets != len(recvdPackets2) {
- t.Fatalf("expected %v packets instead got %v", halfPackets,
- len(recvdPackets))
- }
-
- // Additionally, the set of packets should match exactly with the
- // unacked packets, and we should have received the packets in the exact
- // same ordering that we added.
- if !reflect.DeepEqual(recvdPackets[halfPackets:], recvdPackets2) {
- t.Fatalf("recvd packets mismatched: expected %v, got %v",
- spew.Sdump(sentPackets), spew.Sdump(recvdPackets))
- }
-}
-
-// TestMailBoxResetAfterShutdown tests that ResetMessages and ResetPackets
-// return ErrMailBoxShuttingDown after the mailbox has been stopped.
-func TestMailBoxResetAfterShutdown(t *testing.T) {
- t.Parallel()
-
- ctx := newMailboxContext(t, time.Now(), time.Second)
-
- // Stop the mailbox, then try to reset the message and packet couriers.
- ctx.mailbox.Stop()
-
- err := ctx.mailbox.ResetMessages()
- if !ErrMailBoxShuttingDown.Is(err) {
- t.Fatalf("expected ErrMailBoxShuttingDown, got: %v", err)
- }
-
- err = ctx.mailbox.ResetPackets()
- if !ErrMailBoxShuttingDown.Is(err) {
- t.Fatalf("expected ErrMailBoxShuttingDown, got: %v", err)
- }
-}
-
-type mailboxContext struct {
- t *testing.T
- mailbox MailBox
- clock *clock.TestClock
- forwards chan *htlcPacket
-}
-
-func newMailboxContext(t *testing.T, startTime time.Time,
- expiry time.Duration) *mailboxContext {
-
- ctx := &mailboxContext{
- t: t,
- clock: clock.NewTestClock(startTime),
- forwards: make(chan *htlcPacket, 1),
- }
- ctx.mailbox = newMemoryMailBox(&mailBoxConfig{
- fetchUpdate: func(sid lnwire.ShortChannelID) (
- *lnwire.ChannelUpdate, er.R) {
- return &lnwire.ChannelUpdate{
- ShortChannelID: sid,
- }, nil
- },
- forwardPackets: ctx.forward,
- clock: ctx.clock,
- expiry: expiry,
- })
- ctx.mailbox.Start()
-
- return ctx
-}
-
-func (c *mailboxContext) forward(_ chan struct{},
- pkts ...*htlcPacket) er.R {
-
- for _, pkt := range pkts {
- c.forwards <- pkt
- }
-
- return nil
-}
-
-func (c *mailboxContext) sendAdds(start, num int) []*htlcPacket {
- c.t.Helper()
-
- sentPackets := make([]*htlcPacket, num)
- for i := 0; i < num; i++ {
- pkt := &htlcPacket{
- outgoingChanID: lnwire.NewShortChanIDFromInt(
- uint64(prand.Int63())),
- incomingChanID: lnwire.NewShortChanIDFromInt(
- uint64(prand.Int63())),
- incomingHTLCID: uint64(start + i),
- amount: lnwire.MilliSatoshi(prand.Int63()),
- htlc: &lnwire.UpdateAddHTLC{
- ID: uint64(start + i),
- },
- }
- sentPackets[i] = pkt
-
- err := c.mailbox.AddPacket(pkt)
- if err != nil {
- c.t.Fatalf("unable to add packet: %v", err)
- }
- }
-
- return sentPackets
-}
-
-func (c *mailboxContext) receivePkts(pkts []*htlcPacket) {
- c.t.Helper()
-
- for i, expPkt := range pkts {
- select {
- case pkt := <-c.mailbox.PacketOutBox():
- if reflect.DeepEqual(expPkt, pkt) {
- continue
- }
-
- c.t.Fatalf("inkey mismatch #%d, want: %v vs "+
- "got: %v", i, expPkt.inKey(), pkt.inKey())
-
- case <-time.After(50 * time.Millisecond):
- c.t.Fatalf("did not receive fail for index %d", i)
- }
- }
-}
-
-func (c *mailboxContext) checkFails(adds []*htlcPacket) {
- c.t.Helper()
-
- for i, add := range adds {
- select {
- case fail := <-c.forwards:
- if add.inKey() == fail.inKey() {
- continue
- }
- c.t.Fatalf("inkey mismatch #%d, add: %v vs fail: %v",
- i, add.inKey(), fail.inKey())
-
- case <-time.After(50 * time.Millisecond):
- c.t.Fatalf("did not receive fail for index %d", i)
- }
- }
-
- select {
- case pkt := <-c.forwards:
- c.t.Fatalf("unexpected forward: %v", pkt)
- case <-time.After(50 * time.Millisecond):
- }
-}
-
-// TestMailBoxFailAdd asserts that FailAdd returns a response to the switch
-// under various interleavings with other operations on the mailbox.
-func TestMailBoxFailAdd(t *testing.T) {
- var (
- batchDelay = time.Second
- expiry = time.Minute
- firstBatchStart = time.Now()
- secondBatchStart = time.Now().Add(batchDelay)
- thirdBatchStart = time.Now().Add(2 * batchDelay)
- thirdBatchExpiry = thirdBatchStart.Add(expiry)
- )
- ctx := newMailboxContext(t, firstBatchStart, expiry)
- defer ctx.mailbox.Stop()
-
- failAdds := func(adds []*htlcPacket) {
- for _, add := range adds {
- ctx.mailbox.FailAdd(add)
- }
- }
-
- const numBatchPackets = 5
-
- // Send 10 adds, and pull them from the mailbox.
- firstBatch := ctx.sendAdds(0, numBatchPackets)
- ctx.receivePkts(firstBatch)
-
- // Fail all of these adds, simulating an error adding the HTLCs to the
- // commitment. We should see a failure message for each.
- go failAdds(firstBatch)
- ctx.checkFails(firstBatch)
-
- // As a sanity check, Fail all of them again and assert that no
- // duplicate fails are sent.
- go failAdds(firstBatch)
- ctx.checkFails(nil)
-
- // Now, send a second batch of adds after a short delay and deliver them
- // to the link.
- ctx.clock.SetTime(secondBatchStart)
- secondBatch := ctx.sendAdds(numBatchPackets, numBatchPackets)
- ctx.receivePkts(secondBatch)
-
- // Reset the packet queue w/o changing the current time. This simulates
- // the link flapping and coming back up before the second batch's
- // expiries have elapsed. We should see no failures sent back.
- err := ctx.mailbox.ResetPackets()
- if err != nil {
- t.Fatalf("unable to reset packets: %v", err)
- }
- ctx.checkFails(nil)
-
- // Redeliver the second batch to the link and hold them there.
- ctx.receivePkts(secondBatch)
-
- // Send a third batch of adds shortly after the second batch.
- ctx.clock.SetTime(thirdBatchStart)
- thirdBatch := ctx.sendAdds(2*numBatchPackets, numBatchPackets)
-
- // Advance the clock so that the third batch expires. We expect to only
- // see fails for the third batch, since the second batch is still being
- // held by the link.
- ctx.clock.SetTime(thirdBatchExpiry)
- ctx.checkFails(thirdBatch)
-
- // Finally, reset the link which should cause the second batch to be
- // cancelled immediately.
- err = ctx.mailbox.ResetPackets()
- if err != nil {
- t.Fatalf("unable to reset packets: %v", err)
- }
- ctx.checkFails(secondBatch)
-}
-
-// TestMailBoxPacketPrioritization asserts that the mailbox will prioritize
-// delivering Settle and Fail packets over Adds if both are available for
-// delivery at the same time.
-func TestMailBoxPacketPrioritization(t *testing.T) {
- t.Parallel()
-
- // First, we'll create new instance of the current default mailbox
- // type.
- ctx := newMailboxContext(t, time.Now(), testExpiry)
- defer ctx.mailbox.Stop()
-
- const numPackets = 5
-
- _, _, aliceChanID, bobChanID := genIDs()
-
- // Next we'll send the following sequence of packets:
- // - Settle1
- // - Add1
- // - Add2
- // - Fail
- // - Settle2
- sentPackets := make([]*htlcPacket, numPackets)
- for i := 0; i < numPackets; i++ {
- pkt := &htlcPacket{
- outgoingChanID: aliceChanID,
- outgoingHTLCID: uint64(i),
- incomingChanID: bobChanID,
- incomingHTLCID: uint64(i),
- amount: lnwire.MilliSatoshi(prand.Int63()),
- }
-
- switch i {
- case 0, 4:
- // First and last packets are a Settle. A non-Add is
- // sent first to make the test deterministic w/o needing
- // to sleep.
- pkt.htlc = &lnwire.UpdateFulfillHTLC{ID: uint64(i)}
- case 1, 2:
- // Next two packets are Adds.
- pkt.htlc = &lnwire.UpdateAddHTLC{ID: uint64(i)}
- case 3:
- // Last packet is a Fail.
- pkt.htlc = &lnwire.UpdateFailHTLC{ID: uint64(i)}
- }
-
- sentPackets[i] = pkt
-
- err := ctx.mailbox.AddPacket(pkt)
- if err != nil {
- t.Fatalf("failed to add packet: %v", err)
- }
- }
-
- // When dequeueing the packets, we expect the following sequence:
- // - Settle1
- // - Fail
- // - Settle2
- // - Add1
- // - Add2
- //
- // We expect to see Fail and Settle2 to be delivered before either Add1
- // or Add2 due to the prioritization between the split queue.
- for i := 0; i < numPackets; i++ {
- select {
- case pkt := <-ctx.mailbox.PacketOutBox():
- var expPkt *htlcPacket
- switch i {
- case 0:
- // First packet should be Settle1.
- expPkt = sentPackets[0]
- case 1:
- // Second packet should be Fail.
- expPkt = sentPackets[3]
- case 2:
- // Third packet should be Settle2.
- expPkt = sentPackets[4]
- case 3:
- // Fourth packet should be Add1.
- expPkt = sentPackets[1]
- case 4:
- // Last packet should be Add2.
- expPkt = sentPackets[2]
- }
-
- if !reflect.DeepEqual(expPkt, pkt) {
- t.Fatalf("recvd packet mismatch %d, want: %v, got: %v",
- i, spew.Sdump(expPkt), spew.Sdump(pkt))
- }
-
- case <-time.After(50 * time.Millisecond):
- t.Fatalf("didn't receive packet %d before timeout", i)
- }
- }
-}
-
-// TestMailBoxAddExpiry asserts that the mailbox will cancel back Adds that have
-// reached their expiry time.
-func TestMailBoxAddExpiry(t *testing.T) {
- var (
- expiry = time.Minute
- batchDelay = time.Second
- firstBatchStart = time.Now()
- firstBatchExpiry = firstBatchStart.Add(expiry)
- secondBatchStart = firstBatchStart.Add(batchDelay)
- secondBatchExpiry = secondBatchStart.Add(expiry)
- )
-
- ctx := newMailboxContext(t, firstBatchStart, expiry)
- defer ctx.mailbox.Stop()
-
- // Each batch will consist of 10 messages.
- const numBatchPackets = 10
-
- firstBatch := ctx.sendAdds(0, numBatchPackets)
-
- ctx.clock.SetTime(secondBatchStart)
- ctx.checkFails(nil)
-
- secondBatch := ctx.sendAdds(numBatchPackets, numBatchPackets)
-
- ctx.clock.SetTime(firstBatchExpiry)
- ctx.checkFails(firstBatch)
-
- ctx.clock.SetTime(secondBatchExpiry)
- ctx.checkFails(secondBatch)
-}
-
-// TestMailBoxDuplicateAddPacket asserts that the mailbox returns an
-// ErrPacketAlreadyExists failure when two htlcPackets are added with identical
-// incoming circuit keys.
-func TestMailBoxDuplicateAddPacket(t *testing.T) {
- t.Parallel()
-
- ctx := newMailboxContext(t, time.Now(), testExpiry)
- ctx.mailbox.Start()
- defer ctx.mailbox.Stop()
-
- addTwice := func(t *testing.T, pkt *htlcPacket) {
- // The first add should succeed.
- err := ctx.mailbox.AddPacket(pkt)
- if err != nil {
- t.Fatalf("unable to add packet: %v", err)
- }
-
- // Adding again with the same incoming circuit key should fail.
- err = ctx.mailbox.AddPacket(pkt)
- if !ErrPacketAlreadyExists.Is(err) {
- t.Fatalf("expected ErrPacketAlreadyExists, got: %v", err)
- }
- }
-
- // Assert duplicate AddPacket calls fail for all types of HTLCs.
- addTwice(t, &htlcPacket{
- incomingHTLCID: 0,
- htlc: &lnwire.UpdateAddHTLC{},
- })
- addTwice(t, &htlcPacket{
- incomingHTLCID: 1,
- htlc: &lnwire.UpdateFulfillHTLC{},
- })
- addTwice(t, &htlcPacket{
- incomingHTLCID: 2,
- htlc: &lnwire.UpdateFailHTLC{},
- })
-}
-
-// TestMailOrchestrator asserts that the orchestrator properly buffers packets
-// for channels that haven't been made live, such that they are delivered
-// immediately after BindLiveShortChanID. It also tests that packets are delivered
-// readily to mailboxes for channels that are already in the live state.
-func TestMailOrchestrator(t *testing.T) {
- t.Parallel()
-
- // First, we'll create a new instance of our orchestrator.
- mo := newMailOrchestrator(&mailOrchConfig{
- fetchUpdate: func(sid lnwire.ShortChannelID) (
- *lnwire.ChannelUpdate, er.R) {
- return &lnwire.ChannelUpdate{
- ShortChannelID: sid,
- }, nil
- },
- forwardPackets: func(_ chan struct{},
- pkts ...*htlcPacket) er.R {
- return nil
- },
- clock: clock.NewTestClock(time.Now()),
- expiry: testExpiry,
- })
- defer mo.Stop()
-
- // We'll be delivering 10 htlc packets via the orchestrator.
- const numPackets = 10
- const halfPackets = numPackets / 2
-
- // Before any mailbox is created or made live, we will deliver half of
- // the htlcs via the orchestrator.
- chanID1, chanID2, aliceChanID, bobChanID := genIDs()
- sentPackets := make([]*htlcPacket, halfPackets)
- for i := 0; i < halfPackets; i++ {
- pkt := &htlcPacket{
- outgoingChanID: aliceChanID,
- outgoingHTLCID: uint64(i),
- incomingChanID: bobChanID,
- incomingHTLCID: uint64(i),
- amount: lnwire.MilliSatoshi(prand.Int63()),
- htlc: &lnwire.UpdateAddHTLC{
- ID: uint64(i),
- },
- }
- sentPackets[i] = pkt
-
- mo.Deliver(pkt.outgoingChanID, pkt)
- }
-
- // Now, initialize a new mailbox for Alice's chanid.
- mailbox := mo.GetOrCreateMailBox(chanID1, aliceChanID)
-
- // Verify that no messages are received, since Alice's mailbox has not
- // been made live.
- for i := 0; i < halfPackets; i++ {
- timeout := time.After(50 * time.Millisecond)
- select {
- case <-mailbox.MessageOutBox():
- t.Fatalf("should not receive wire msg after reset")
- case <-timeout:
- }
- }
-
- // Assign a short chan id to the existing mailbox, make it available for
- // capturing incoming HTLCs. The HTLCs added above should be delivered
- // immediately.
- mo.BindLiveShortChanID(mailbox, chanID1, aliceChanID)
-
- // Verify that all of the packets are queued and delivered to Alice's
- // mailbox.
- recvdPackets := make([]*htlcPacket, 0, len(sentPackets))
- for i := 0; i < halfPackets; i++ {
- timeout := time.After(5 * time.Second)
- select {
- case <-timeout:
- t.Fatalf("didn't recv pkt %d after timeout", i)
- case pkt := <-mailbox.PacketOutBox():
- recvdPackets = append(recvdPackets, pkt)
- }
- }
-
- // We should have received half of the total number of packets.
- if len(recvdPackets) != halfPackets {
- t.Fatalf("expected %v packets instead got %v",
- halfPackets, len(recvdPackets))
- }
-
- // Check that the received packets are equal to the sent packets.
- if !reflect.DeepEqual(recvdPackets, sentPackets) {
- t.Fatalf("recvd packets mismatched: expected %v, got %v",
- spew.Sdump(sentPackets), spew.Sdump(recvdPackets))
- }
-
- // For the second half of the test, create a new mailbox for Bob and
- // immediately make it live with an assigned short chan id.
- mailbox = mo.GetOrCreateMailBox(chanID2, bobChanID)
- mo.BindLiveShortChanID(mailbox, chanID2, bobChanID)
-
- // Create the second half of our htlcs, and deliver them via the
- // orchestrator. We should be able to receive each of these in order.
- recvdPackets = make([]*htlcPacket, 0, len(sentPackets))
- for i := 0; i < halfPackets; i++ {
- pkt := &htlcPacket{
- outgoingChanID: aliceChanID,
- outgoingHTLCID: uint64(halfPackets + i),
- incomingChanID: bobChanID,
- incomingHTLCID: uint64(halfPackets + i),
- amount: lnwire.MilliSatoshi(prand.Int63()),
- htlc: &lnwire.UpdateAddHTLC{
- ID: uint64(halfPackets + i),
- },
- }
- sentPackets[i] = pkt
-
- mo.Deliver(pkt.incomingChanID, pkt)
-
- timeout := time.After(50 * time.Millisecond)
- select {
- case <-timeout:
- t.Fatalf("didn't recv pkt %d after timeout", halfPackets+i)
- case pkt := <-mailbox.PacketOutBox():
- recvdPackets = append(recvdPackets, pkt)
- }
- }
-
- // Again, we should have received half of the total number of packets.
- if len(recvdPackets) != halfPackets {
- t.Fatalf("expected %v packets instead got %v",
- halfPackets, len(recvdPackets))
- }
-
- // Check that the received packets are equal to the sent packets.
- if !reflect.DeepEqual(recvdPackets, sentPackets) {
- t.Fatalf("recvd packets mismatched: expected %v, got %v",
- spew.Sdump(sentPackets), spew.Sdump(recvdPackets))
- }
-}
diff --git a/lnd/htlcswitch/mock.go b/lnd/htlcswitch/mock.go
deleted file mode 100644
index 60b04534..00000000
--- a/lnd/htlcswitch/mock.go
+++ /dev/null
@@ -1,947 +0,0 @@
-package htlcswitch
-
-import (
- "bytes"
- "crypto/sha256"
- "encoding/binary"
- "io"
- "io/ioutil"
- "net"
- "os"
- "sync"
- "sync/atomic"
- "testing"
- "time"
-
- "github.com/pkt-cash/pktd/btcec"
- "github.com/pkt-cash/pktd/btcutil/er"
- "github.com/pkt-cash/pktd/btcutil/util"
- sphinx "github.com/pkt-cash/pktd/lightning-onion"
- "github.com/pkt-cash/pktd/lnd/chainntnfs"
- "github.com/pkt-cash/pktd/lnd/channeldb"
- "github.com/pkt-cash/pktd/lnd/clock"
- "github.com/pkt-cash/pktd/lnd/contractcourt"
- "github.com/pkt-cash/pktd/lnd/htlcswitch/hop"
- "github.com/pkt-cash/pktd/lnd/invoices"
- "github.com/pkt-cash/pktd/lnd/lnpeer"
- "github.com/pkt-cash/pktd/lnd/lntest/mock"
- "github.com/pkt-cash/pktd/lnd/lntypes"
- "github.com/pkt-cash/pktd/lnd/lnwallet/chainfee"
- "github.com/pkt-cash/pktd/lnd/lnwire"
- "github.com/pkt-cash/pktd/lnd/ticker"
- "github.com/pkt-cash/pktd/wire"
-)
-
-type mockPreimageCache struct {
- sync.Mutex
- preimageMap map[lntypes.Hash]lntypes.Preimage
-}
-
-func newMockPreimageCache() *mockPreimageCache {
- return &mockPreimageCache{
- preimageMap: make(map[lntypes.Hash]lntypes.Preimage),
- }
-}
-
-func (m *mockPreimageCache) LookupPreimage(
- hash lntypes.Hash) (lntypes.Preimage, bool) {
-
- m.Lock()
- defer m.Unlock()
-
- p, ok := m.preimageMap[hash]
- return p, ok
-}
-
-func (m *mockPreimageCache) AddPreimages(preimages ...lntypes.Preimage) er.R {
- m.Lock()
- defer m.Unlock()
-
- for _, preimage := range preimages {
- m.preimageMap[preimage.Hash()] = preimage
- }
-
- return nil
-}
-
-func (m *mockPreimageCache) SubscribeUpdates() *contractcourt.WitnessSubscription {
- return nil
-}
-
-type mockFeeEstimator struct {
- byteFeeIn chan chainfee.SatPerKWeight
-
- quit chan struct{}
-}
-
-func (m *mockFeeEstimator) EstimateFeePerKW(
- numBlocks uint32) (chainfee.SatPerKWeight, er.R) {
-
- select {
- case feeRate := <-m.byteFeeIn:
- return feeRate, nil
- case <-m.quit:
- return 0, er.Errorf("exiting")
- }
-}
-
-func (m *mockFeeEstimator) RelayFeePerKW() chainfee.SatPerKWeight {
- return 1e3
-}
-
-func (m *mockFeeEstimator) Start() er.R {
- return nil
-}
-func (m *mockFeeEstimator) Stop() er.R {
- close(m.quit)
- return nil
-}
-
-var _ chainfee.Estimator = (*mockFeeEstimator)(nil)
-
-type mockForwardingLog struct {
- sync.Mutex
-
- events map[time.Time]channeldb.ForwardingEvent
-}
-
-func (m *mockForwardingLog) AddForwardingEvents(events []channeldb.ForwardingEvent) er.R {
- m.Lock()
- defer m.Unlock()
-
- for _, event := range events {
- m.events[event.Timestamp] = event
- }
-
- return nil
-}
-
-type mockServer struct {
- started int32 // To be used atomically.
- shutdown int32 // To be used atomically.
- wg sync.WaitGroup
- quit chan struct{}
-
- t testing.TB
-
- name string
- messages chan lnwire.Message
-
- id [33]byte
- htlcSwitch *Switch
-
- registry *mockInvoiceRegistry
- pCache *mockPreimageCache
- interceptorFuncs []messageInterceptor
-}
-
-var _ lnpeer.Peer = (*mockServer)(nil)
-
-func initDB() (*channeldb.DB, er.R) {
- tempPath, errr := ioutil.TempDir("", "switchdb")
- if errr != nil {
- return nil, er.E(errr)
- }
-
- db, err := channeldb.Open(tempPath)
- if err != nil {
- return nil, err
- }
-
- return db, err
-}
-
-func initSwitchWithDB(startingHeight uint32, db *channeldb.DB) (*Switch, er.R) {
- var err er.R
-
- if db == nil {
- db, err = initDB()
- if err != nil {
- return nil, err
- }
- }
-
- cfg := Config{
- DB: db,
- SwitchPackager: channeldb.NewSwitchPackager(),
- FwdingLog: &mockForwardingLog{
- events: make(map[time.Time]channeldb.ForwardingEvent),
- },
- FetchLastChannelUpdate: func(lnwire.ShortChannelID) (*lnwire.ChannelUpdate, er.R) {
- return nil, nil
- },
- Notifier: &mock.ChainNotifier{
- SpendChan: make(chan *chainntnfs.SpendDetail),
- EpochChan: make(chan *chainntnfs.BlockEpoch),
- ConfChan: make(chan *chainntnfs.TxConfirmation),
- },
- FwdEventTicker: ticker.NewForce(DefaultFwdEventInterval),
- LogEventTicker: ticker.NewForce(DefaultLogInterval),
- AckEventTicker: ticker.NewForce(DefaultAckInterval),
- HtlcNotifier: &mockHTLCNotifier{},
- Clock: clock.NewDefaultClock(),
- HTLCExpiry: time.Hour,
- }
-
- return New(cfg, startingHeight)
-}
-
-func newMockServer(t testing.TB, name string, startingHeight uint32,
- db *channeldb.DB, defaultDelta uint32) (*mockServer, er.R) {
-
- var id [33]byte
- h := sha256.Sum256([]byte(name))
- copy(id[:], h[:])
-
- pCache := newMockPreimageCache()
-
- htlcSwitch, err := initSwitchWithDB(startingHeight, db)
- if err != nil {
- return nil, err
- }
-
- registry := newMockRegistry(defaultDelta)
-
- return &mockServer{
- t: t,
- id: id,
- name: name,
- messages: make(chan lnwire.Message, 3000),
- quit: make(chan struct{}),
- registry: registry,
- htlcSwitch: htlcSwitch,
- pCache: pCache,
- interceptorFuncs: make([]messageInterceptor, 0),
- }, nil
-}
-
-func (s *mockServer) Start() er.R {
- if !atomic.CompareAndSwapInt32(&s.started, 0, 1) {
- return er.New("mock server already started")
- }
-
- if err := s.htlcSwitch.Start(); err != nil {
- return err
- }
-
- s.wg.Add(1)
- go func() {
- defer s.wg.Done()
-
- defer func() {
- s.htlcSwitch.Stop()
- }()
-
- for {
- select {
- case msg := <-s.messages:
- var shouldSkip bool
-
- for _, interceptor := range s.interceptorFuncs {
- skip, err := interceptor(msg)
- if err != nil {
- s.t.Fatalf("%v: error in the "+
- "interceptor: %v", s.name, err)
- return
- }
- shouldSkip = shouldSkip || skip
- }
-
- if shouldSkip {
- continue
- }
-
- if err := s.readHandler(msg); err != nil {
- s.t.Fatal(err)
- return
- }
- case <-s.quit:
- return
- }
- }
- }()
-
- return nil
-}
-
-func (s *mockServer) QuitSignal() <-chan struct{} {
- return s.quit
-}
-
-// mockHopIterator represents the test version of hop iterator which instead
-// of encrypting the path in onion blob just stores the path as a list of hops.
-type mockHopIterator struct {
- hops []*hop.Payload
-}
-
-func newMockHopIterator(hops ...*hop.Payload) hop.Iterator {
- return &mockHopIterator{hops: hops}
-}
-
-func (r *mockHopIterator) HopPayload() (*hop.Payload, er.R) {
- h := r.hops[0]
- r.hops = r.hops[1:]
- return h, nil
-}
-
-func (r *mockHopIterator) ExtraOnionBlob() []byte {
- return nil
-}
-
-func (r *mockHopIterator) ExtractErrorEncrypter(
- extracter hop.ErrorEncrypterExtracter) (hop.ErrorEncrypter,
- lnwire.FailCode) {
-
- return extracter(nil)
-}
-
-func (r *mockHopIterator) EncodeNextHop(w io.Writer) er.R {
- var hopLength [4]byte
- binary.BigEndian.PutUint32(hopLength[:], uint32(len(r.hops)))
-
- if _, err := util.Write(w, hopLength[:]); err != nil {
- return err
- }
-
- for _, hop := range r.hops {
- fwdInfo := hop.ForwardingInfo()
- if err := encodeFwdInfo(w, &fwdInfo); err != nil {
- return err
- }
- }
-
- return nil
-}
-
-func encodeFwdInfo(w io.Writer, f *hop.ForwardingInfo) er.R {
- if _, err := util.Write(w, []byte{byte(f.Network)}); err != nil {
- return err
- }
-
- if err := util.WriteBin(w, binary.BigEndian, f.NextHop); err != nil {
- return err
- }
-
- if err := util.WriteBin(w, binary.BigEndian, f.AmountToForward); err != nil {
- return err
- }
-
- if err := util.WriteBin(w, binary.BigEndian, f.OutgoingCTLV); err != nil {
- return err
- }
-
- return nil
-}
-
-var _ hop.Iterator = (*mockHopIterator)(nil)
-
-// mockObfuscator mock implementation of the failure obfuscator which only
-// encodes the failure and do not makes any onion obfuscation.
-type mockObfuscator struct {
- ogPacket *sphinx.OnionPacket
- failure lnwire.FailureMessage
-}
-
-// NewMockObfuscator initializes a dummy mockObfuscator used for testing.
-func NewMockObfuscator() hop.ErrorEncrypter {
- return &mockObfuscator{}
-}
-
-func (o *mockObfuscator) OnionPacket() *sphinx.OnionPacket {
- return o.ogPacket
-}
-
-func (o *mockObfuscator) Type() hop.EncrypterType {
- return hop.EncrypterTypeMock
-}
-
-func (o *mockObfuscator) Encode(w io.Writer) er.R {
- return nil
-}
-
-func (o *mockObfuscator) Decode(r io.Reader) er.R {
- return nil
-}
-
-func (o *mockObfuscator) Reextract(
- extracter hop.ErrorEncrypterExtracter) er.R {
-
- return nil
-}
-
-func (o *mockObfuscator) EncryptFirstHop(failure lnwire.FailureMessage) (
- lnwire.OpaqueReason, er.R) {
-
- o.failure = failure
-
- var b bytes.Buffer
- if err := lnwire.EncodeFailure(&b, failure, 0); err != nil {
- return nil, err
- }
- return b.Bytes(), nil
-}
-
-func (o *mockObfuscator) IntermediateEncrypt(reason lnwire.OpaqueReason) lnwire.OpaqueReason {
- return reason
-}
-
-func (o *mockObfuscator) EncryptMalformedError(reason lnwire.OpaqueReason) lnwire.OpaqueReason {
- return reason
-}
-
-// mockDeobfuscator mock implementation of the failure deobfuscator which
-// only decodes the failure do not makes any onion obfuscation.
-type mockDeobfuscator struct{}
-
-func newMockDeobfuscator() ErrorDecrypter {
- return &mockDeobfuscator{}
-}
-
-func (o *mockDeobfuscator) DecryptError(reason lnwire.OpaqueReason) (*ForwardingError, er.R) {
-
- r := bytes.NewReader(reason)
- failure, err := lnwire.DecodeFailure(r, 0)
- if err != nil {
- return nil, err
- }
-
- return NewForwardingError(failure, 1), nil
-}
-
-var _ ErrorDecrypter = (*mockDeobfuscator)(nil)
-
-// mockIteratorDecoder test version of hop iterator decoder which decodes the
-// encoded array of hops.
-type mockIteratorDecoder struct {
- mu sync.RWMutex
-
- responses map[[32]byte][]hop.DecodeHopIteratorResponse
-
- decodeFail bool
-}
-
-func newMockIteratorDecoder() *mockIteratorDecoder {
- return &mockIteratorDecoder{
- responses: make(map[[32]byte][]hop.DecodeHopIteratorResponse),
- }
-}
-
-func (p *mockIteratorDecoder) DecodeHopIterator(r io.Reader, rHash []byte,
- cltv uint32) (hop.Iterator, lnwire.FailCode) {
-
- var b [4]byte
- _, err := r.Read(b[:])
- if err != nil {
- return nil, lnwire.CodeTemporaryChannelFailure
- }
- hopLength := binary.BigEndian.Uint32(b[:])
-
- hops := make([]*hop.Payload, hopLength)
- for i := uint32(0); i < hopLength; i++ {
- var f hop.ForwardingInfo
- if err := decodeFwdInfo(r, &f); err != nil {
- return nil, lnwire.CodeTemporaryChannelFailure
- }
-
- var nextHopBytes [8]byte
- binary.BigEndian.PutUint64(nextHopBytes[:], f.NextHop.ToUint64())
-
- hops[i] = hop.NewLegacyPayload(&sphinx.HopData{
- Realm: [1]byte{}, // hop.BitcoinNetwork
- NextAddress: nextHopBytes,
- ForwardAmount: uint64(f.AmountToForward),
- OutgoingCltv: f.OutgoingCTLV,
- })
- }
-
- return newMockHopIterator(hops...), lnwire.CodeNone
-}
-
-func (p *mockIteratorDecoder) DecodeHopIterators(id []byte,
- reqs []hop.DecodeHopIteratorRequest) (
- []hop.DecodeHopIteratorResponse, er.R) {
-
- idHash := sha256.Sum256(id)
-
- p.mu.RLock()
- if resps, ok := p.responses[idHash]; ok {
- p.mu.RUnlock()
- return resps, nil
- }
- p.mu.RUnlock()
-
- batchSize := len(reqs)
-
- resps := make([]hop.DecodeHopIteratorResponse, 0, batchSize)
- for _, req := range reqs {
- iterator, failcode := p.DecodeHopIterator(
- req.OnionReader, req.RHash, req.IncomingCltv,
- )
-
- if p.decodeFail {
- failcode = lnwire.CodeTemporaryChannelFailure
- }
-
- resp := hop.DecodeHopIteratorResponse{
- HopIterator: iterator,
- FailCode: failcode,
- }
- resps = append(resps, resp)
- }
-
- p.mu.Lock()
- p.responses[idHash] = resps
- p.mu.Unlock()
-
- return resps, nil
-}
-
-func decodeFwdInfo(r io.Reader, f *hop.ForwardingInfo) er.R {
- var net [1]byte
- if _, err := r.Read(net[:]); err != nil {
- return er.E(err)
- }
- f.Network = hop.Network(net[0])
-
- if err := util.ReadBin(r, binary.BigEndian, &f.NextHop); err != nil {
- return err
- }
-
- if err := util.ReadBin(r, binary.BigEndian, &f.AmountToForward); err != nil {
- return err
- }
-
- if err := util.ReadBin(r, binary.BigEndian, &f.OutgoingCTLV); err != nil {
- return err
- }
-
- return nil
-}
-
-// messageInterceptor is function that handles the incoming peer messages and
-// may decide should the peer skip the message or not.
-type messageInterceptor func(m lnwire.Message) (bool, er.R)
-
-// Record is used to set the function which will be triggered when new
-// lnwire message was received.
-func (s *mockServer) intersect(f messageInterceptor) {
- s.interceptorFuncs = append(s.interceptorFuncs, f)
-}
-
-func (s *mockServer) SendMessage(sync bool, msgs ...lnwire.Message) er.R {
-
- for _, msg := range msgs {
- select {
- case s.messages <- msg:
- case <-s.quit:
- return er.New("server is stopped")
- }
- }
-
- return nil
-}
-
-func (s *mockServer) SendMessageLazy(sync bool, msgs ...lnwire.Message) er.R {
- panic("not implemented")
-}
-
-func (s *mockServer) readHandler(message lnwire.Message) er.R {
- var targetChan lnwire.ChannelID
-
- switch msg := message.(type) {
- case *lnwire.UpdateAddHTLC:
- targetChan = msg.ChanID
- case *lnwire.UpdateFulfillHTLC:
- targetChan = msg.ChanID
- case *lnwire.UpdateFailHTLC:
- targetChan = msg.ChanID
- case *lnwire.UpdateFailMalformedHTLC:
- targetChan = msg.ChanID
- case *lnwire.RevokeAndAck:
- targetChan = msg.ChanID
- case *lnwire.CommitSig:
- targetChan = msg.ChanID
- case *lnwire.FundingLocked:
- // Ignore
- return nil
- case *lnwire.ChannelReestablish:
- targetChan = msg.ChanID
- case *lnwire.UpdateFee:
- targetChan = msg.ChanID
- default:
- return er.Errorf("unknown message type: %T", msg)
- }
-
- // Dispatch the commitment update message to the proper channel link
- // dedicated to this channel. If the link is not found, we will discard
- // the message.
- link, err := s.htlcSwitch.GetLink(targetChan)
- if err != nil {
- return nil
- }
-
- // Create goroutine for this, in order to be able to properly stop
- // the server when handler stacked (server unavailable)
- link.HandleChannelUpdate(message)
-
- return nil
-}
-
-func (s *mockServer) PubKey() [33]byte {
- return s.id
-}
-
-func (s *mockServer) IdentityKey() *btcec.PublicKey {
- pubkey, _ := btcec.ParsePubKey(s.id[:], btcec.S256())
- return pubkey
-}
-
-func (s *mockServer) Address() net.Addr {
- return nil
-}
-
-func (s *mockServer) AddNewChannel(channel *channeldb.OpenChannel,
- cancel <-chan struct{}) er.R {
-
- return nil
-}
-
-func (s *mockServer) WipeChannel(*wire.OutPoint) {}
-
-func (s *mockServer) LocalFeatures() *lnwire.FeatureVector {
- return nil
-}
-
-func (s *mockServer) RemoteFeatures() *lnwire.FeatureVector {
- return nil
-}
-
-func (s *mockServer) Stop() er.R {
- if !atomic.CompareAndSwapInt32(&s.shutdown, 0, 1) {
- return nil
- }
-
- close(s.quit)
- s.wg.Wait()
-
- return nil
-}
-
-func (s *mockServer) String() string {
- return s.name
-}
-
-type mockChannelLink struct {
- htlcSwitch *Switch
-
- shortChanID lnwire.ShortChannelID
-
- chanID lnwire.ChannelID
-
- peer lnpeer.Peer
-
- mailBox MailBox
-
- packets chan *htlcPacket
-
- eligible bool
-
- htlcID uint64
-
- checkHtlcTransitResult *LinkError
-
- checkHtlcForwardResult *LinkError
-}
-
-// completeCircuit is a helper method for adding the finalized payment circuit
-// to the switch's circuit map. In testing, this should be executed after
-// receiving an htlc from the downstream packets channel.
-func (f *mockChannelLink) completeCircuit(pkt *htlcPacket) er.R {
- switch htlc := pkt.htlc.(type) {
- case *lnwire.UpdateAddHTLC:
- pkt.outgoingChanID = f.shortChanID
- pkt.outgoingHTLCID = f.htlcID
- htlc.ID = f.htlcID
-
- keystone := Keystone{pkt.inKey(), pkt.outKey()}
- if err := f.htlcSwitch.openCircuits(keystone); err != nil {
- return err
- }
-
- f.htlcID++
-
- case *lnwire.UpdateFulfillHTLC, *lnwire.UpdateFailHTLC:
- err := f.htlcSwitch.teardownCircuit(pkt)
- if err != nil {
- return err
- }
- }
-
- f.mailBox.AckPacket(pkt.inKey())
-
- return nil
-}
-
-func (f *mockChannelLink) deleteCircuit(pkt *htlcPacket) er.R {
- return f.htlcSwitch.deleteCircuits(pkt.inKey())
-}
-
-func newMockChannelLink(htlcSwitch *Switch, chanID lnwire.ChannelID,
- shortChanID lnwire.ShortChannelID, peer lnpeer.Peer, eligible bool,
-) *mockChannelLink {
-
- return &mockChannelLink{
- htlcSwitch: htlcSwitch,
- chanID: chanID,
- shortChanID: shortChanID,
- peer: peer,
- eligible: eligible,
- }
-}
-
-func (f *mockChannelLink) HandleSwitchPacket(pkt *htlcPacket) er.R {
- f.mailBox.AddPacket(pkt)
- return nil
-}
-
-func (f *mockChannelLink) HandleLocalAddPacket(pkt *htlcPacket) er.R {
- _ = f.mailBox.AddPacket(pkt)
- return nil
-}
-
-func (f *mockChannelLink) HandleChannelUpdate(lnwire.Message) {
-}
-
-func (f *mockChannelLink) UpdateForwardingPolicy(_ ForwardingPolicy) {
-}
-func (f *mockChannelLink) CheckHtlcForward([32]byte, lnwire.MilliSatoshi,
- lnwire.MilliSatoshi, uint32, uint32, uint32) *LinkError {
-
- return f.checkHtlcForwardResult
-}
-
-func (f *mockChannelLink) CheckHtlcTransit(payHash [32]byte,
- amt lnwire.MilliSatoshi, timeout uint32,
- heightNow uint32) *LinkError {
-
- return f.checkHtlcTransitResult
-}
-
-func (f *mockChannelLink) Stats() (uint64, lnwire.MilliSatoshi, lnwire.MilliSatoshi) {
- return 0, 0, 0
-}
-
-func (f *mockChannelLink) AttachMailBox(mailBox MailBox) {
- f.mailBox = mailBox
- f.packets = mailBox.PacketOutBox()
-}
-
-func (f *mockChannelLink) Start() er.R {
- f.mailBox.ResetMessages()
- f.mailBox.ResetPackets()
- return nil
-}
-
-func (f *mockChannelLink) ChanID() lnwire.ChannelID { return f.chanID }
-func (f *mockChannelLink) ShortChanID() lnwire.ShortChannelID { return f.shortChanID }
-func (f *mockChannelLink) Bandwidth() lnwire.MilliSatoshi { return 99999999 }
-func (f *mockChannelLink) Peer() lnpeer.Peer { return f.peer }
-func (f *mockChannelLink) ChannelPoint() *wire.OutPoint { return &wire.OutPoint{} }
-func (f *mockChannelLink) Stop() {}
-func (f *mockChannelLink) EligibleToForward() bool { return f.eligible }
-func (f *mockChannelLink) setLiveShortChanID(sid lnwire.ShortChannelID) { f.shortChanID = sid }
-func (f *mockChannelLink) UpdateShortChanID() (lnwire.ShortChannelID, er.R) {
- f.eligible = true
- return f.shortChanID, nil
-}
-
-var _ ChannelLink = (*mockChannelLink)(nil)
-
-func newDB() (*channeldb.DB, func(), er.R) {
- // First, create a temporary directory to be used for the duration of
- // this test.
- tempDirName, errr := ioutil.TempDir("", "channeldb")
- if errr != nil {
- return nil, nil, er.E(errr)
- }
-
- // Next, create channeldb for the first time.
- cdb, err := channeldb.Open(tempDirName)
- if err != nil {
- os.RemoveAll(tempDirName)
- return nil, nil, err
- }
-
- cleanUp := func() {
- cdb.Close()
- os.RemoveAll(tempDirName)
- }
-
- return cdb, cleanUp, nil
-}
-
-const testInvoiceCltvExpiry = 6
-
-type mockInvoiceRegistry struct {
- settleChan chan lntypes.Hash
-
- registry *invoices.InvoiceRegistry
-
- cleanup func()
-}
-
-func newMockRegistry(minDelta uint32) *mockInvoiceRegistry {
- cdb, cleanup, err := newDB()
- if err != nil {
- panic(err)
- }
-
- registry := invoices.NewRegistry(
- cdb,
- invoices.NewInvoiceExpiryWatcher(clock.NewDefaultClock()),
- &invoices.RegistryConfig{
- FinalCltvRejectDelta: 5,
- },
- )
- registry.Start()
-
- return &mockInvoiceRegistry{
- registry: registry,
- cleanup: cleanup,
- }
-}
-
-func (i *mockInvoiceRegistry) LookupInvoice(rHash lntypes.Hash) (
- channeldb.Invoice, er.R) {
-
- return i.registry.LookupInvoice(rHash)
-}
-
-func (i *mockInvoiceRegistry) SettleHodlInvoice(preimage lntypes.Preimage) er.R {
- return i.registry.SettleHodlInvoice(preimage)
-}
-
-func (i *mockInvoiceRegistry) NotifyExitHopHtlc(rhash lntypes.Hash,
- amt lnwire.MilliSatoshi, expiry uint32, currentHeight int32,
- circuitKey channeldb.CircuitKey, hodlChan chan<- interface{},
- payload invoices.Payload) (invoices.HtlcResolution, er.R) {
-
- event, err := i.registry.NotifyExitHopHtlc(
- rhash, amt, expiry, currentHeight, circuitKey, hodlChan,
- payload,
- )
- if err != nil {
- return nil, err
- }
- if i.settleChan != nil {
- i.settleChan <- rhash
- }
-
- return event, nil
-}
-
-func (i *mockInvoiceRegistry) CancelInvoice(payHash lntypes.Hash) er.R {
- return i.registry.CancelInvoice(payHash)
-}
-
-func (i *mockInvoiceRegistry) AddInvoice(invoice channeldb.Invoice,
- paymentHash lntypes.Hash) er.R {
-
- _, err := i.registry.AddInvoice(&invoice, paymentHash)
- return err
-}
-
-func (i *mockInvoiceRegistry) HodlUnsubscribeAll(subscriber chan<- interface{}) {
- i.registry.HodlUnsubscribeAll(subscriber)
-}
-
-var _ InvoiceDatabase = (*mockInvoiceRegistry)(nil)
-
-type mockCircuitMap struct {
- lookup chan *PaymentCircuit
-}
-
-var _ CircuitMap = (*mockCircuitMap)(nil)
-
-func (m *mockCircuitMap) OpenCircuits(...Keystone) er.R {
- return nil
-}
-
-func (m *mockCircuitMap) TrimOpenCircuits(chanID lnwire.ShortChannelID,
- start uint64) er.R {
- return nil
-}
-
-func (m *mockCircuitMap) DeleteCircuits(inKeys ...CircuitKey) er.R {
- return nil
-}
-
-func (m *mockCircuitMap) CommitCircuits(
- circuit ...*PaymentCircuit) (*CircuitFwdActions, er.R) {
-
- return nil, nil
-}
-
-func (m *mockCircuitMap) CloseCircuit(outKey CircuitKey) (*PaymentCircuit,
- er.R) {
- return nil, nil
-}
-
-func (m *mockCircuitMap) FailCircuit(inKey CircuitKey) (*PaymentCircuit,
- er.R) {
- return nil, nil
-}
-
-func (m *mockCircuitMap) LookupCircuit(inKey CircuitKey) *PaymentCircuit {
- return <-m.lookup
-}
-
-func (m *mockCircuitMap) LookupOpenCircuit(outKey CircuitKey) *PaymentCircuit {
- return nil
-}
-
-func (m *mockCircuitMap) LookupByPaymentHash(hash [32]byte) []*PaymentCircuit {
- return nil
-}
-
-func (m *mockCircuitMap) NumPending() int {
- return 0
-}
-
-func (m *mockCircuitMap) NumOpen() int {
- return 0
-}
-
-type mockOnionErrorDecryptor struct {
- sourceIdx int
- message []byte
- err er.R
-}
-
-func (m *mockOnionErrorDecryptor) DecryptError(encryptedData []byte) (
- *sphinx.DecryptedError, er.R) {
-
- return &sphinx.DecryptedError{
- SenderIdx: m.sourceIdx,
- Message: m.message,
- }, m.err
-}
-
-var _ htlcNotifier = (*mockHTLCNotifier)(nil)
-
-type mockHTLCNotifier struct{}
-
-func (h *mockHTLCNotifier) NotifyForwardingEvent(key HtlcKey, info HtlcInfo,
- eventType HtlcEventType) {
-}
-
-func (h *mockHTLCNotifier) NotifyLinkFailEvent(key HtlcKey, info HtlcInfo,
- eventType HtlcEventType, linkErr *LinkError, incoming bool) {
-}
-
-func (h *mockHTLCNotifier) NotifyForwardingFailEvent(key HtlcKey,
- eventType HtlcEventType) {
-}
-
-func (h *mockHTLCNotifier) NotifySettleEvent(key HtlcKey, eventType HtlcEventType) {
-}
diff --git a/lnd/htlcswitch/packet.go b/lnd/htlcswitch/packet.go
deleted file mode 100644
index a3c1f163..00000000
--- a/lnd/htlcswitch/packet.go
+++ /dev/null
@@ -1,123 +0,0 @@
-package htlcswitch
-
-import (
- "github.com/pkt-cash/pktd/lnd/channeldb"
- "github.com/pkt-cash/pktd/lnd/htlcswitch/hop"
- "github.com/pkt-cash/pktd/lnd/lnwire"
- "github.com/pkt-cash/pktd/lnd/record"
-)
-
-// htlcPacket is a wrapper around htlc lnwire update, which adds additional
-// information which is needed by this package.
-type htlcPacket struct {
- // incomingChanID is the ID of the channel that we have received an incoming
- // HTLC on.
- incomingChanID lnwire.ShortChannelID
-
- // outgoingChanID is the ID of the channel that we have offered or will
- // offer an outgoing HTLC on.
- outgoingChanID lnwire.ShortChannelID
-
- // incomingHTLCID is the ID of the HTLC that we have received from the peer
- // on the incoming channel.
- incomingHTLCID uint64
-
- // outgoingHTLCID is the ID of the HTLC that we offered to the peer on the
- // outgoing channel.
- outgoingHTLCID uint64
-
- // sourceRef is used by forwarded htlcPackets to locate incoming Add
- // entry in a fwdpkg owned by the incoming link. This value can be nil
- // if there is no such entry, e.g. switch initiated payments.
- sourceRef *channeldb.AddRef
-
- // destRef is used to locate a settle/fail entry in the outgoing link's
- // fwdpkg. If sourceRef is non-nil, this reference should be to a
- // settle/fail in response to the sourceRef.
- destRef *channeldb.SettleFailRef
-
- // incomingAmount is the value in milli-satoshis that arrived on an
- // incoming link.
- incomingAmount lnwire.MilliSatoshi
-
- // amount is the value of the HTLC that is being created or modified.
- amount lnwire.MilliSatoshi
-
- // htlc lnwire message type of which depends on switch request type.
- htlc lnwire.Message
-
- // obfuscator contains the necessary state to allow the switch to wrap
- // any forwarded errors in an additional layer of encryption.
- obfuscator hop.ErrorEncrypter
-
- // localFailure is set to true if an HTLC fails for a local payment before
- // the first hop. In this case, the failure reason is simply encoded, not
- // encrypted with any shared secret.
- localFailure bool
-
- // linkFailure is non-nil for htlcs that fail at our node. This may
- // occur for our own payments which fail on the outgoing link,
- // or for forwards which fail in the switch or on the outgoing link.
- linkFailure *LinkError
-
- // convertedError is set to true if this is an HTLC fail that was
- // created using an UpdateFailMalformedHTLC from the remote party. If
- // this is true, then when forwarding this failure packet, we'll need
- // to wrap it as if we were the first hop if it's a multi-hop HTLC. If
- // it's a direct HTLC, then we'll decode the error as no encryption has
- // taken place.
- convertedError bool
-
- // hasSource is set to true if the incomingChanID and incomingHTLCID
- // fields of a forwarded fail packet are already set and do not need to
- // be looked up in the circuit map.
- hasSource bool
-
- // isResolution is set to true if this packet was actually an incoming
- // resolution message from an outside sub-system. We'll treat these as
- // if they emanated directly from the switch. As a result, we'll
- // encrypt all errors related to this packet as if we were the first
- // hop.
- isResolution bool
-
- // circuit holds a reference to an Add's circuit which is persisted in
- // the switch during successful forwarding.
- circuit *PaymentCircuit
-
- // incomingTimeout is the timeout that the incoming HTLC carried. This
- // is the timeout of the HTLC applied to the incoming link.
- incomingTimeout uint32
-
- // outgoingTimeout is the timeout of the proposed outgoing HTLC. This
- // will be extraced from the hop payload recevived by the incoming
- // link.
- outgoingTimeout uint32
-
- // customRecords are user-defined records in the custom type range that
- // were included in the payload.
- customRecords record.CustomSet
-}
-
-// inKey returns the circuit key used to identify the incoming htlc.
-func (p *htlcPacket) inKey() CircuitKey {
- return CircuitKey{
- ChanID: p.incomingChanID,
- HtlcID: p.incomingHTLCID,
- }
-}
-
-// outKey returns the circuit key used to identify the outgoing, forwarded htlc.
-func (p *htlcPacket) outKey() CircuitKey {
- return CircuitKey{
- ChanID: p.outgoingChanID,
- HtlcID: p.outgoingHTLCID,
- }
-}
-
-// keystone returns a tuple containing the incoming and outgoing circuit keys.
-func (p *htlcPacket) keystone() Keystone {
- return Keystone{
- InKey: p.inKey(),
- OutKey: p.outKey(),
- }
-}
diff --git a/lnd/htlcswitch/payment_result.go b/lnd/htlcswitch/payment_result.go
deleted file mode 100644
index f0890cad..00000000
--- a/lnd/htlcswitch/payment_result.go
+++ /dev/null
@@ -1,310 +0,0 @@
-package htlcswitch
-
-import (
- "bytes"
- "encoding/binary"
- "io"
- "sync"
-
- "github.com/pkt-cash/pktd/btcutil/er"
- "github.com/pkt-cash/pktd/lnd/channeldb"
- "github.com/pkt-cash/pktd/lnd/channeldb/kvdb"
- "github.com/pkt-cash/pktd/lnd/lnwire"
- "github.com/pkt-cash/pktd/lnd/multimutex"
- "github.com/pkt-cash/pktd/pktlog/log"
-)
-
-var (
-
- // networkResultStoreBucketKey is used for the root level bucket that
- // stores the network result for each payment ID.
- networkResultStoreBucketKey = []byte("network-result-store-bucket")
-
- // ErrPaymentIDNotFound is an error returned if the given paymentID is
- // not found.
- ErrPaymentIDNotFound = Err.CodeWithDetail("ErrPaymentIDNotFound", "paymentID not found")
-
- // ErrPaymentIDAlreadyExists is returned if we try to write a pending
- // payment whose paymentID already exists.
- ErrPaymentIDAlreadyExists = Err.CodeWithDetail("ErrPaymentIDAlreadyExists", "paymentID already exists")
-)
-
-// PaymentResult wraps a decoded result received from the network after a
-// payment attempt was made. This is what is eventually handed to the router
-// for processing.
-type PaymentResult struct {
- // Preimage is set by the switch in case a sent HTLC was settled.
- Preimage [32]byte
-
- // Error is non-nil in case a HTLC send failed, and the HTLC is now
- // irrevocably canceled. If the payment failed during forwarding, this
- // error will be a *ForwardingError.
- Error er.R
-}
-
-// networkResult is the raw result received from the network after a payment
-// attempt has been made. Since the switch doesn't always have the necessary
-// data to decode the raw message, we store it together with some meta data,
-// and decode it when the router query for the final result.
-type networkResult struct {
- // msg is the received result. This should be of type UpdateFulfillHTLC
- // or UpdateFailHTLC.
- msg lnwire.Message
-
- // unencrypted indicates whether the failure encoded in the message is
- // unencrypted, and hence doesn't need to be decrypted.
- unencrypted bool
-
- // isResolution indicates whether this is a resolution message, in
- // which the failure reason might not be included.
- isResolution bool
-}
-
-// serializeNetworkResult serializes the networkResult.
-func serializeNetworkResult(w io.Writer, n *networkResult) er.R {
- if _, err := lnwire.WriteMessage(w, n.msg, 0); err != nil {
- return err
- }
-
- return channeldb.WriteElements(w, n.unencrypted, n.isResolution)
-}
-
-// deserializeNetworkResult deserializes the networkResult.
-func deserializeNetworkResult(r io.Reader) (*networkResult, er.R) {
- var (
- err er.R
- )
-
- n := &networkResult{}
-
- n.msg, err = lnwire.ReadMessage(r, 0)
- if err != nil {
- return nil, err
- }
-
- if err := channeldb.ReadElements(r,
- &n.unencrypted, &n.isResolution,
- ); err != nil {
- return nil, err
- }
-
- return n, nil
-}
-
-// networkResultStore is a persistent store that stores any results of HTLCs in
-// flight on the network. Since payment results are inherently asynchronous, it
-// is used as a common access point for senders of HTLCs, to know when a result
-// is back. The Switch will checkpoint any received result to the store, and
-// the store will keep results and notify the callers about them.
-type networkResultStore struct {
- db *channeldb.DB
-
- // results is a map from paymentIDs to channels where subscribers to
- // payment results will be notified.
- results map[uint64][]chan *networkResult
- resultsMtx sync.Mutex
-
- // paymentIDMtx is a multimutex used to make sure the database and
- // result subscribers map is consistent for each payment ID in case of
- // concurrent callers.
- paymentIDMtx *multimutex.Mutex
-}
-
-func newNetworkResultStore(db *channeldb.DB) *networkResultStore {
- return &networkResultStore{
- db: db,
- results: make(map[uint64][]chan *networkResult),
- paymentIDMtx: multimutex.NewMutex(),
- }
-}
-
-// storeResult stores the networkResult for the given paymentID, and
-// notifies any subscribers.
-func (store *networkResultStore) storeResult(paymentID uint64,
- result *networkResult) er.R {
-
- // We get a mutex for this payment ID. This is needed to ensure
- // consistency between the database state and the subscribers in case
- // of concurrent calls.
- store.paymentIDMtx.Lock(paymentID)
- defer store.paymentIDMtx.Unlock(paymentID)
-
- // Serialize the payment result.
- var b bytes.Buffer
- if err := serializeNetworkResult(&b, result); err != nil {
- return err
- }
-
- var paymentIDBytes [8]byte
- binary.BigEndian.PutUint64(paymentIDBytes[:], paymentID)
-
- err := kvdb.Batch(store.db.Backend, func(tx kvdb.RwTx) er.R {
- networkResults, err := tx.CreateTopLevelBucket(
- networkResultStoreBucketKey,
- )
- if err != nil {
- return err
- }
-
- return networkResults.Put(paymentIDBytes[:], b.Bytes())
- })
- if err != nil {
- return err
- }
-
- // Now that the result is stored in the database, we can notify any
- // active subscribers.
- store.resultsMtx.Lock()
- for _, res := range store.results[paymentID] {
- res <- result
- }
- delete(store.results, paymentID)
- store.resultsMtx.Unlock()
-
- return nil
-}
-
-// subscribeResult is used to get the payment result for the given
-// payment ID. It returns a channel on which the result will be delivered when
-// ready.
-func (store *networkResultStore) subscribeResult(paymentID uint64) (
- <-chan *networkResult, er.R) {
-
- // We get a mutex for this payment ID. This is needed to ensure
- // consistency between the database state and the subscribers in case
- // of concurrent calls.
- store.paymentIDMtx.Lock(paymentID)
- defer store.paymentIDMtx.Unlock(paymentID)
-
- var (
- result *networkResult
- resultChan = make(chan *networkResult, 1)
- )
-
- err := kvdb.View(store.db, func(tx kvdb.RTx) er.R {
- var err er.R
- result, err = fetchResult(tx, paymentID)
- switch {
-
- // Result not yet available, we will notify once a result is
- // available.
- case ErrPaymentIDNotFound.Is(err):
- return nil
-
- case err != nil:
- return err
-
- // The result was found, and will be returned immediately.
- default:
- return nil
- }
- }, func() {
- result = nil
- })
- if err != nil {
- return nil, err
- }
-
- // If the result was found, we can send it on the result channel
- // imemdiately.
- if result != nil {
- resultChan <- result
- return resultChan, nil
- }
-
- // Otherwise we store the result channel for when the result is
- // available.
- store.resultsMtx.Lock()
- store.results[paymentID] = append(
- store.results[paymentID], resultChan,
- )
- store.resultsMtx.Unlock()
-
- return resultChan, nil
-}
-
-// getResult attempts to immediately fetch the result for the given pid from
-// the store. If no result is available, ErrPaymentIDNotFound is returned.
-func (store *networkResultStore) getResult(pid uint64) (
- *networkResult, er.R) {
-
- var result *networkResult
- err := kvdb.View(store.db, func(tx kvdb.RTx) er.R {
- var err er.R
- result, err = fetchResult(tx, pid)
- return err
- }, func() {
- result = nil
- })
- if err != nil {
- return nil, err
- }
-
- return result, nil
-}
-
-func fetchResult(tx kvdb.RTx, pid uint64) (*networkResult, er.R) {
- var paymentIDBytes [8]byte
- binary.BigEndian.PutUint64(paymentIDBytes[:], pid)
-
- networkResults := tx.ReadBucket(networkResultStoreBucketKey)
- if networkResults == nil {
- return nil, ErrPaymentIDNotFound.Default()
- }
-
- // Check whether a result is already available.
- resultBytes := networkResults.Get(paymentIDBytes[:])
- if resultBytes == nil {
- return nil, ErrPaymentIDNotFound.Default()
- }
-
- // Decode the result we found.
- r := bytes.NewReader(resultBytes)
-
- return deserializeNetworkResult(r)
-}
-
-// cleanStore removes all entries from the store, except the payment IDs given.
-// NOTE: Since every result not listed in the keep map will be deleted, care
-// should be taken to ensure no new payment attempts are being made
-// concurrently while this process is ongoing, as its result might end up being
-// deleted.
-func (store *networkResultStore) cleanStore(keep map[uint64]struct{}) er.R {
- return kvdb.Update(store.db.Backend, func(tx kvdb.RwTx) er.R {
- networkResults, err := tx.CreateTopLevelBucket(
- networkResultStoreBucketKey,
- )
- if err != nil {
- return err
- }
-
- // Iterate through the bucket, deleting all items not in the
- // keep map.
- var toClean [][]byte
- if err := networkResults.ForEach(func(k, _ []byte) er.R {
- pid := binary.BigEndian.Uint64(k)
- if _, ok := keep[pid]; ok {
- return nil
- }
-
- toClean = append(toClean, k)
- return nil
- }); err != nil {
- return err
- }
-
- for _, k := range toClean {
- err := networkResults.Delete(k)
- if err != nil {
- return err
- }
- }
-
- if len(toClean) > 0 {
- log.Infof("Removed %d stale entries from network "+
- "result store", len(toClean))
- }
-
- return nil
- }, func() {})
-}
diff --git a/lnd/htlcswitch/payment_result_test.go b/lnd/htlcswitch/payment_result_test.go
deleted file mode 100644
index 3162aac7..00000000
--- a/lnd/htlcswitch/payment_result_test.go
+++ /dev/null
@@ -1,217 +0,0 @@
-package htlcswitch
-
-import (
- "bytes"
- "io/ioutil"
- "math/rand"
- "reflect"
- "testing"
- "time"
-
- "github.com/davecgh/go-spew/spew"
- "github.com/pkt-cash/pktd/btcutil/util"
- "github.com/pkt-cash/pktd/lnd/channeldb"
- "github.com/pkt-cash/pktd/lnd/lntypes"
- "github.com/pkt-cash/pktd/lnd/lnwire"
-)
-
-// TestNetworkResultSerialization checks that NetworkResults are properly
-// (de)serialized.
-func TestNetworkResultSerialization(t *testing.T) {
- t.Parallel()
-
- var preimage lntypes.Preimage
- if _, err := rand.Read(preimage[:]); err != nil {
- t.Fatalf("unable gen rand preimag: %v", err)
- }
-
- var chanID lnwire.ChannelID
- if _, err := rand.Read(chanID[:]); err != nil {
- t.Fatalf("unable gen rand chanid: %v", err)
- }
-
- var reason [256]byte
- if _, err := rand.Read(reason[:]); err != nil {
- t.Fatalf("unable gen rand reason: %v", err)
- }
-
- settle := &lnwire.UpdateFulfillHTLC{
- ChanID: chanID,
- ID: 2,
- PaymentPreimage: preimage,
- }
-
- fail := &lnwire.UpdateFailHTLC{
- ChanID: chanID,
- ID: 1,
- Reason: []byte{},
- }
-
- fail2 := &lnwire.UpdateFailHTLC{
- ChanID: chanID,
- ID: 1,
- Reason: reason[:],
- }
-
- testCases := []*networkResult{
- {
- msg: settle,
- },
- {
- msg: fail,
- unencrypted: false,
- isResolution: false,
- },
- {
- msg: fail,
- unencrypted: false,
- isResolution: true,
- },
- {
- msg: fail2,
- unencrypted: true,
- isResolution: false,
- },
- }
-
- for _, p := range testCases {
- var buf bytes.Buffer
- if err := serializeNetworkResult(&buf, p); err != nil {
- t.Fatalf("serialize failed: %v", err)
- }
-
- r := bytes.NewReader(buf.Bytes())
- p1, err := deserializeNetworkResult(r)
- if err != nil {
- t.Fatalf("unable to deserizlize: %v", err)
- }
-
- if !reflect.DeepEqual(p, p1) {
- t.Fatalf("not equal. %v vs %v", spew.Sdump(p),
- spew.Sdump(p1))
- }
- }
-}
-
-// TestNetworkResultStore tests that the networkResult store behaves as
-// expected, and that we can store, get and subscribe to results.
-func TestNetworkResultStore(t *testing.T) {
- t.Parallel()
-
- const numResults = 4
-
- tempDir, errr := ioutil.TempDir("", "testdb")
- if errr != nil {
- t.Fatal(errr)
- }
-
- db, err := channeldb.Open(tempDir)
- if err != nil {
- t.Fatal(err)
- }
-
- store := newNetworkResultStore(db)
-
- var results []*networkResult
- for i := 0; i < numResults; i++ {
- n := &networkResult{
- msg: &lnwire.UpdateAddHTLC{},
- unencrypted: true,
- isResolution: true,
- }
- results = append(results, n)
- }
-
- // Subscribe to 2 of them.
- var subs []<-chan *networkResult
- for i := uint64(0); i < 2; i++ {
- sub, err := store.subscribeResult(i)
- if err != nil {
- t.Fatalf("unable to subscribe: %v", err)
- }
- subs = append(subs, sub)
- }
-
- // Store three of them.
- for i := uint64(0); i < 3; i++ {
- err := store.storeResult(i, results[i])
- if err != nil {
- t.Fatalf("unable to store result: %v", err)
- }
- }
-
- // The two subscribers should be notified.
- for _, sub := range subs {
- select {
- case <-sub:
- case <-time.After(1 * time.Second):
- t.Fatalf("no result received")
- }
- }
-
- // Let the third one subscribe now. THe result should be received
- // immediately.
- sub, err := store.subscribeResult(2)
- if err != nil {
- t.Fatalf("unable to subscribe: %v", err)
- }
- select {
- case <-sub:
- case <-time.After(1 * time.Second):
- t.Fatalf("no result received")
- }
-
- // Try fetching the result directly for the non-stored one. This should
- // fail.
- _, err = store.getResult(3)
- if !ErrPaymentIDNotFound.Is(err) {
- t.Fatalf("expected ErrPaymentIDNotFound, got %v", err)
- }
-
- // Add the result and try again.
- err = store.storeResult(3, results[3])
- if err != nil {
- t.Fatalf("unable to store result: %v", err)
- }
-
- _, err = store.getResult(3)
- if err != nil {
- t.Fatalf("unable to get result: %v", err)
- }
-
- // Since we don't delete results from the store (yet), make sure we
- // will get subscriptions for all of them.
- for i := uint64(0); i < numResults; i++ {
- sub, err := store.subscribeResult(i)
- if err != nil {
- t.Fatalf("unable to subscribe: %v", err)
- }
-
- select {
- case <-sub:
- case <-time.After(1 * time.Second):
- t.Fatalf("no result received")
- }
- }
-
- // Clean the store keeping the first two results.
- toKeep := map[uint64]struct{}{
- 0: {},
- 1: {},
- }
- // Finally, delete the result.
- err = store.cleanStore(toKeep)
- util.RequireNoErr(t, err)
-
- // Payment IDs 0 and 1 should be found, 2 and 3 should be deleted.
- for i := uint64(0); i < numResults; i++ {
- _, err = store.getResult(i)
- if i <= 1 {
- util.RequireNoErr(t, err, "unable to get result")
- }
- if i >= 2 && !ErrPaymentIDNotFound.Is(err) {
- t.Fatalf("expected ErrPaymentIDNotFound, got %v", err)
- }
-
- }
-}
diff --git a/lnd/htlcswitch/sequencer.go b/lnd/htlcswitch/sequencer.go
deleted file mode 100644
index fc1cfa90..00000000
--- a/lnd/htlcswitch/sequencer.go
+++ /dev/null
@@ -1,130 +0,0 @@
-package htlcswitch
-
-import (
- "sync"
-
- "github.com/pkt-cash/pktd/btcutil/er"
- "github.com/pkt-cash/pktd/lnd/channeldb"
- "github.com/pkt-cash/pktd/lnd/channeldb/kvdb"
-)
-
-// defaultSequenceBatchSize specifies the window of sequence numbers that are
-// allocated for each write to disk made by the sequencer.
-const defaultSequenceBatchSize = 1000
-
-// Sequencer emits sequence numbers for locally initiated HTLCs. These are
-// only used internally for tracking pending payments, however they must be
-// unique in order to avoid circuit key collision in the circuit map.
-type Sequencer interface {
- // NextID returns a unique sequence number for each invocation.
- NextID() (uint64, er.R)
-}
-
-var (
- // nextPaymentIDKey identifies the bucket that will keep track of the
- // persistent sequence numbers for payments.
- nextPaymentIDKey = []byte("next-payment-id-key")
-
- // ErrSequencerCorrupted signals that the persistence engine was not
- // initialized, or has been corrupted since startup.
- ErrSequencerCorrupted = Err.CodeWithDetail("ErrSequencerCorrupted",
- "sequencer database has been corrupted")
-)
-
-// persistentSequencer is a concrete implementation of IDGenerator, that uses
-// channeldb to allocate sequence numbers.
-type persistentSequencer struct {
- db *channeldb.DB
-
- mu sync.Mutex
-
- nextID uint64
- horizonID uint64
-}
-
-// NewPersistentSequencer initializes a new sequencer using a channeldb backend.
-func NewPersistentSequencer(db *channeldb.DB) (Sequencer, er.R) {
- g := &persistentSequencer{
- db: db,
- }
-
- // Ensure the database bucket is created before any updates are
- // performed.
- if err := g.initDB(); err != nil {
- return nil, err
- }
-
- return g, nil
-}
-
-// NextID returns a unique sequence number for every invocation, persisting the
-// assignment to avoid reuse.
-func (s *persistentSequencer) NextID() (uint64, er.R) {
-
- // nextID will be the unique sequence number returned if no errors are
- // encountered.
- var nextID uint64
-
- // If our sequence batch has not been exhausted, we can allocate the
- // next identifier in the range.
- s.mu.Lock()
- defer s.mu.Unlock()
-
- if s.nextID < s.horizonID {
- nextID = s.nextID
- s.nextID++
-
- return nextID, nil
- }
-
- // Otherwise, our sequence batch has been exhausted. We use the last
- // known sequence number on disk to mark the beginning of the next
- // sequence batch, and allocate defaultSequenceBatchSize (1000) at a
- // time.
- //
- // NOTE: This also will happen on the first invocation after startup,
- // i.e. when nextID and horizonID are both 0. The next sequence batch to be
- // allocated will start from the last known tip on disk, which is fine
- // as we only require uniqueness of the allocated numbers.
- var nextHorizonID uint64
- if err := kvdb.Update(s.db, func(tx kvdb.RwTx) er.R {
- nextIDBkt := tx.ReadWriteBucket(nextPaymentIDKey)
- if nextIDBkt == nil {
- return ErrSequencerCorrupted.Default()
- }
-
- nextID = nextIDBkt.Sequence()
- nextHorizonID = nextID + defaultSequenceBatchSize
-
- // Cannot fail when used in Update.
- nextIDBkt.SetSequence(nextHorizonID)
-
- return nil
- }, func() {
- nextHorizonID = 0
- }); err != nil {
- return 0, err
- }
-
- // Never assign index zero, to avoid collisions with the EmptyKeystone.
- if nextID == 0 {
- nextID++
- }
-
- // If our batch sequence allocation succeed, update our in-memory values
- // so we can continue to allocate sequence numbers without hitting disk.
- // The nextID is incremented by one in memory so the in can be used
- // issued directly on the next invocation.
- s.nextID = nextID + 1
- s.horizonID = nextHorizonID
-
- return nextID, nil
-}
-
-// initDB populates the bucket used to generate payment sequence numbers.
-func (s *persistentSequencer) initDB() er.R {
- return kvdb.Update(s.db, func(tx kvdb.RwTx) er.R {
- _, err := tx.CreateTopLevelBucket(nextPaymentIDKey)
- return err
- }, func() {})
-}
diff --git a/lnd/htlcswitch/switch.go b/lnd/htlcswitch/switch.go
deleted file mode 100644
index 4a15ec80..00000000
--- a/lnd/htlcswitch/switch.go
+++ /dev/null
@@ -1,2249 +0,0 @@
-package htlcswitch
-
-import (
- "bytes"
- "math/rand"
- "sync"
- "sync/atomic"
- "time"
-
- "github.com/davecgh/go-spew/spew"
- "github.com/pkt-cash/pktd/btcutil"
- "github.com/pkt-cash/pktd/btcutil/er"
- "github.com/pkt-cash/pktd/lnd/chainntnfs"
- "github.com/pkt-cash/pktd/lnd/channeldb"
- "github.com/pkt-cash/pktd/lnd/channeldb/kvdb"
- "github.com/pkt-cash/pktd/lnd/clock"
- "github.com/pkt-cash/pktd/lnd/contractcourt"
- "github.com/pkt-cash/pktd/lnd/htlcswitch/hop"
- "github.com/pkt-cash/pktd/lnd/lntypes"
- "github.com/pkt-cash/pktd/lnd/lnwallet"
- "github.com/pkt-cash/pktd/lnd/lnwallet/chainfee"
- "github.com/pkt-cash/pktd/lnd/lnwire"
- "github.com/pkt-cash/pktd/lnd/ticker"
- "github.com/pkt-cash/pktd/pktlog/log"
- "github.com/pkt-cash/pktd/wire"
-)
-
-const (
- // DefaultFwdEventInterval is the duration between attempts to flush
- // pending forwarding events to disk.
- DefaultFwdEventInterval = 15 * time.Second
-
- // DefaultLogInterval is the duration between attempts to log statistics
- // about forwarding events.
- DefaultLogInterval = 10 * time.Second
-
- // DefaultAckInterval is the duration between attempts to ack any settle
- // fails in a forwarding package.
- DefaultAckInterval = 15 * time.Second
-
- // DefaultHTLCExpiry is the duration after which Adds will be cancelled
- // if they could not get added to an outgoing commitment.
- DefaultHTLCExpiry = time.Minute
-)
-
-var (
- Err = er.NewErrorType("lnd.htlcswitch")
- // ErrChannelLinkNotFound is used when channel link hasn't been found.
- ErrChannelLinkNotFound = Err.CodeWithDetail("ErrChannelLinkNotFound", "channel link not found")
-
- // ErrDuplicateAdd signals that the ADD htlc was already forwarded
- // through the switch and is locked into another commitment txn.
- ErrDuplicateAdd = Err.CodeWithDetail("ErrDuplicateAdd", "duplicate add HTLC detected")
-
- // ErrUnknownErrorDecryptor signals that we were unable to locate the
- // error decryptor for this payment. This is likely due to restarting
- // the daemon.
- ErrUnknownErrorDecryptor = Err.CodeWithDetail("ErrUnknownErrorDecryptor", "unknown error decryptor")
-
- // ErrSwitchExiting signaled when the switch has received a shutdown
- // request.
- ErrSwitchExiting = Err.CodeWithDetail("ErrSwitchExiting", "htlcswitch shutting down")
-
- // ErrNoLinksFound is an error returned when we attempt to retrieve the
- // active links in the switch for a specific destination.
- ErrNoLinksFound = Err.CodeWithDetail("ErrNoLinksFound", "no channel links found")
-
- // ErrUnreadableFailureMessage is returned when the failure message
- // cannot be decrypted.
- ErrUnreadableFailureMessage = Err.CodeWithDetail("ErrUnreadableFailureMessage", "unreadable failure message")
-
- // ErrLocalAddFailed signals that the ADD htlc for a local payment
- // failed to be processed.
- ErrLocalAddFailed = Err.CodeWithDetail("ErrLocalAddFailed", "local add HTLC failed")
-)
-
-// plexPacket encapsulates switch packet and adds error channel to receive
-// error from request handler.
-type plexPacket struct {
- pkt *htlcPacket
- err chan er.R
-}
-
-// ChannelCloseType is an enum which signals the type of channel closure the
-// peer should execute.
-type ChannelCloseType uint8
-
-const (
- // CloseRegular indicates a regular cooperative channel closure
- // should be attempted.
- CloseRegular ChannelCloseType = iota
-
- // CloseBreach indicates that a channel breach has been detected, and
- // the link should immediately be marked as unavailable.
- CloseBreach
-)
-
-// ChanClose represents a request which close a particular channel specified by
-// its id.
-type ChanClose struct {
- // CloseType is a variable which signals the type of channel closure the
- // peer should execute.
- CloseType ChannelCloseType
-
- // ChanPoint represent the id of the channel which should be closed.
- ChanPoint *wire.OutPoint
-
- // TargetFeePerKw is the ideal fee that was specified by the caller.
- // This value is only utilized if the closure type is CloseRegular.
- // This will be the starting offered fee when the fee negotiation
- // process for the cooperative closure transaction kicks off.
- TargetFeePerKw chainfee.SatPerKWeight
-
- // DeliveryScript is an optional delivery script to pay funds out to.
- DeliveryScript lnwire.DeliveryAddress
-
- // Updates is used by request creator to receive the notifications about
- // execution of the close channel request.
- Updates chan interface{}
-
- // Err is used by request creator to receive request execution error.
- Err chan er.R
-}
-
-// Config defines the configuration for the service. ALL elements within the
-// configuration MUST be non-nil for the service to carry out its duties.
-type Config struct {
- // FwdingLog is an interface that will be used by the switch to log
- // forwarding events. A forwarding event happens each time a payment
- // circuit is successfully completed. So when we forward an HTLC, and a
- // settle is eventually received.
- FwdingLog ForwardingLog
-
- // LocalChannelClose kicks-off the workflow to execute a cooperative or
- // forced unilateral closure of the channel initiated by a local
- // subsystem.
- LocalChannelClose func(pubKey []byte, request *ChanClose)
-
- // DB is the channeldb instance that will be used to back the switch's
- // persistent circuit map.
- DB *channeldb.DB
-
- // SwitchPackager provides access to the forwarding packages of all
- // active channels. This gives the switch the ability to read arbitrary
- // forwarding packages, and ack settles and fails contained within them.
- SwitchPackager channeldb.FwdOperator
-
- // ExtractErrorEncrypter is an interface allowing switch to reextract
- // error encrypters stored in the circuit map on restarts, since they
- // are not stored directly within the database.
- ExtractErrorEncrypter hop.ErrorEncrypterExtracter
-
- // FetchLastChannelUpdate retrieves the latest routing policy for a
- // target channel. This channel will typically be the outgoing channel
- // specified when we receive an incoming HTLC. This will be used to
- // provide payment senders our latest policy when sending encrypted
- // error messages.
- FetchLastChannelUpdate func(lnwire.ShortChannelID) (*lnwire.ChannelUpdate, er.R)
-
- // Notifier is an instance of a chain notifier that we'll use to signal
- // the switch when a new block has arrived.
- Notifier chainntnfs.ChainNotifier
-
- // HtlcNotifier is an instance of a htlcNotifier which we will pipe htlc
- // events through.
- HtlcNotifier htlcNotifier
-
- // FwdEventTicker is a signal that instructs the htlcswitch to flush any
- // pending forwarding events.
- FwdEventTicker ticker.Ticker
-
- // LogEventTicker is a signal instructing the htlcswitch to log
- // aggregate stats about it's forwarding during the last interval.
- LogEventTicker ticker.Ticker
-
- // AckEventTicker is a signal instructing the htlcswitch to ack any settle
- // fails in forwarding packages.
- AckEventTicker ticker.Ticker
-
- // AllowCircularRoute is true if the user has configured their node to
- // allow forwards that arrive and depart our node over the same channel.
- AllowCircularRoute bool
-
- // RejectHTLC is a flag that instructs the htlcswitch to reject any
- // HTLCs that are not from the source hop.
- RejectHTLC bool
-
- // Clock is a time source for the switch.
- Clock clock.Clock
-
- // HTLCExpiry is the interval after which Adds will be cancelled if they
- // have not been yet been delivered to a link. The computed deadline
- // will expiry this long after the Adds are added to a mailbox via
- // AddPacket.
- HTLCExpiry time.Duration
-}
-
-// Switch is the central messaging bus for all incoming/outgoing HTLCs.
-// Connected peers with active channels are treated as named interfaces which
-// refer to active channels as links. A link is the switch's message
-// communication point with the goroutine that manages an active channel. New
-// links are registered each time a channel is created, and unregistered once
-// the channel is closed. The switch manages the hand-off process for multi-hop
-// HTLCs, forwarding HTLCs initiated from within the daemon, and finally
-// notifies users local-systems concerning their outstanding payment requests.
-type Switch struct {
- started int32 // To be used atomically.
- shutdown int32 // To be used atomically.
-
- // bestHeight is the best known height of the main chain. The links will
- // be used this information to govern decisions based on HTLC timeouts.
- // This will be retrieved by the registered links atomically.
- bestHeight uint32
-
- wg sync.WaitGroup
- quit chan struct{}
-
- // cfg is a copy of the configuration struct that the htlc switch
- // service was initialized with.
- cfg *Config
-
- // networkResults stores the results of payments initiated by the user.
- // results. The store is used to later look up the payments and notify
- // the user of the result when they are complete. Each payment attempt
- // should be given a unique integer ID when it is created, otherwise
- // results might be overwritten.
- networkResults *networkResultStore
-
- // circuits is storage for payment circuits which are used to
- // forward the settle/fail htlc updates back to the add htlc initiator.
- circuits CircuitMap
-
- // mailOrchestrator manages the lifecycle of mailboxes used throughout
- // the switch, and facilitates delayed delivery of packets to links that
- // later come online.
- mailOrchestrator *mailOrchestrator
-
- // indexMtx is a read/write mutex that protects the set of indexes
- // below.
- indexMtx sync.RWMutex
-
- // pendingLinkIndex holds links that have not had their final, live
- // short_chan_id assigned. These links can be transitioned into the
- // primary linkIndex by using UpdateShortChanID to load their live id.
- pendingLinkIndex map[lnwire.ChannelID]ChannelLink
-
- // links is a map of channel id and channel link which manages
- // this channel.
- linkIndex map[lnwire.ChannelID]ChannelLink
-
- // forwardingIndex is an index which is consulted by the switch when it
- // needs to locate the next hop to forward an incoming/outgoing HTLC
- // update to/from.
- //
- // TODO(roasbeef): eventually add a NetworkHop mapping before the
- // ChannelLink
- forwardingIndex map[lnwire.ShortChannelID]ChannelLink
-
- // interfaceIndex maps the compressed public key of a peer to all the
- // channels that the switch maintains with that peer.
- interfaceIndex map[[33]byte]map[lnwire.ChannelID]ChannelLink
-
- // htlcPlex is the channel which all connected links use to coordinate
- // the setup/teardown of Sphinx (onion routing) payment circuits.
- // Active links forward any add/settle messages over this channel each
- // state transition, sending new adds/settles which are fully locked
- // in.
- htlcPlex chan *plexPacket
-
- // chanCloseRequests is used to transfer the channel close request to
- // the channel close handler.
- chanCloseRequests chan *ChanClose
-
- // resolutionMsgs is the channel that all external contract resolution
- // messages will be sent over.
- resolutionMsgs chan *resolutionMsg
-
- // pendingFwdingEvents is the set of forwarding events which have been
- // collected during the current interval, but hasn't yet been written
- // to the forwarding log.
- fwdEventMtx sync.Mutex
- pendingFwdingEvents []channeldb.ForwardingEvent
-
- // blockEpochStream is an active block epoch event stream backed by an
- // active ChainNotifier instance. This will be used to retrieve the
- // lastest height of the chain.
- blockEpochStream *chainntnfs.BlockEpochEvent
-
- // pendingSettleFails is the set of settle/fail entries that we need to
- // ack in the forwarding package of the outgoing link. This was added to
- // make pipelining settles more efficient.
- pendingSettleFails []channeldb.SettleFailRef
-}
-
-// New creates the new instance of htlc switch.
-func New(cfg Config, currentHeight uint32) (*Switch, er.R) {
- circuitMap, err := NewCircuitMap(&CircuitMapConfig{
- DB: cfg.DB,
- ExtractErrorEncrypter: cfg.ExtractErrorEncrypter,
- })
- if err != nil {
- return nil, err
- }
-
- s := &Switch{
- bestHeight: currentHeight,
- cfg: &cfg,
- circuits: circuitMap,
- linkIndex: make(map[lnwire.ChannelID]ChannelLink),
- forwardingIndex: make(map[lnwire.ShortChannelID]ChannelLink),
- interfaceIndex: make(map[[33]byte]map[lnwire.ChannelID]ChannelLink),
- pendingLinkIndex: make(map[lnwire.ChannelID]ChannelLink),
- networkResults: newNetworkResultStore(cfg.DB),
- htlcPlex: make(chan *plexPacket),
- chanCloseRequests: make(chan *ChanClose),
- resolutionMsgs: make(chan *resolutionMsg),
- quit: make(chan struct{}),
- }
-
- s.mailOrchestrator = newMailOrchestrator(&mailOrchConfig{
- fetchUpdate: s.cfg.FetchLastChannelUpdate,
- forwardPackets: s.ForwardPackets,
- clock: s.cfg.Clock,
- expiry: s.cfg.HTLCExpiry,
- })
-
- return s, nil
-}
-
-// resolutionMsg is a struct that wraps an existing ResolutionMsg with a done
-// channel. We'll use this channel to synchronize delivery of the message with
-// the caller.
-type resolutionMsg struct {
- contractcourt.ResolutionMsg
-
- doneChan chan struct{}
-}
-
-// ProcessContractResolution is called by active contract resolvers once a
-// contract they are watching over has been fully resolved. The message carries
-// an external signal that *would* have been sent if the outgoing channel
-// didn't need to go to the chain in order to fulfill a contract. We'll process
-// this message just as if it came from an active outgoing channel.
-func (s *Switch) ProcessContractResolution(msg contractcourt.ResolutionMsg) er.R {
-
- done := make(chan struct{})
-
- select {
- case s.resolutionMsgs <- &resolutionMsg{
- ResolutionMsg: msg,
- doneChan: done,
- }:
- case <-s.quit:
- return ErrSwitchExiting.Default()
- }
-
- select {
- case <-done:
- case <-s.quit:
- return ErrSwitchExiting.Default()
- }
-
- return nil
-}
-
-// GetPaymentResult returns the the result of the payment attempt with the
-// given paymentID. The method returns a channel where the payment result will
-// be sent when available, or an error is encountered during forwarding. When a
-// result is received on the channel, the HTLC is guaranteed to no longer be in
-// flight. The switch shutting down is signaled by closing the channel. If the
-// paymentID is unknown, ErrPaymentIDNotFound will be returned.
-func (s *Switch) GetPaymentResult(paymentID uint64, paymentHash lntypes.Hash,
- deobfuscator ErrorDecrypter) (<-chan *PaymentResult, er.R) {
-
- var (
- nChan <-chan *networkResult
- err er.R
- outKey = CircuitKey{
- ChanID: hop.Source,
- HtlcID: paymentID,
- }
- )
-
- // If the payment is not found in the circuit map, check whether a
- // result is already available.
- // Assumption: no one will add this payment ID other than the caller.
- if s.circuits.LookupCircuit(outKey) == nil {
- res, err := s.networkResults.getResult(paymentID)
- if err != nil {
- return nil, err
- }
- c := make(chan *networkResult, 1)
- c <- res
- nChan = c
- } else {
- // The payment was committed to the circuits, subscribe for a
- // result.
- nChan, err = s.networkResults.subscribeResult(paymentID)
- if err != nil {
- return nil, err
- }
- }
-
- resultChan := make(chan *PaymentResult, 1)
-
- // Since the payment was known, we can start a goroutine that can
- // extract the result when it is available, and pass it on to the
- // caller.
- s.wg.Add(1)
- go func() {
- defer s.wg.Done()
-
- var n *networkResult
- select {
- case n = <-nChan:
- case <-s.quit:
- // We close the result channel to signal a shutdown. We
- // don't send any result in this case since the HTLC is
- // still in flight.
- close(resultChan)
- return
- }
-
- // Extract the result and pass it to the result channel.
- result, err := s.extractResult(
- deobfuscator, n, paymentID, paymentHash,
- )
- if err != nil {
- e := er.Errorf("unable to extract result: %v", err)
- log.Error(e)
- resultChan <- &PaymentResult{
- Error: e,
- }
- return
- }
- resultChan <- result
- }()
-
- return resultChan, nil
-}
-
-// CleanStore calls the underlying result store, telling it is safe to delete
-// all entries except the ones in the keepPids map. This should be called
-// preiodically to let the switch clean up payment results that we have
-// handled.
-func (s *Switch) CleanStore(keepPids map[uint64]struct{}) er.R {
- return s.networkResults.cleanStore(keepPids)
-}
-
-// SendHTLC is used by other subsystems which aren't belong to htlc switch
-// package in order to send the htlc update. The paymentID used MUST be unique
-// for this HTLC, and MUST be used only once, otherwise the switch might reject
-// it.
-func (s *Switch) SendHTLC(firstHop lnwire.ShortChannelID, paymentID uint64,
- htlc *lnwire.UpdateAddHTLC) er.R {
-
- // Generate and send new update packet, if error will be received on
- // this stage it means that packet haven't left boundaries of our
- // system and something wrong happened.
- packet := &htlcPacket{
- incomingChanID: hop.Source,
- incomingHTLCID: paymentID,
- outgoingChanID: firstHop,
- htlc: htlc,
- }
-
- circuit := newPaymentCircuit(&htlc.PaymentHash, packet)
- actions, err := s.circuits.CommitCircuits(circuit)
- if err != nil {
- log.Errorf("unable to commit circuit in switch: %v", err)
- return err
- }
-
- // Drop duplicate packet if it has already been seen.
- switch {
- case len(actions.Drops) == 1:
- return ErrDuplicateAdd.Default()
-
- case len(actions.Fails) == 1:
- return ErrLocalAddFailed.Default()
- }
-
- // Send packet to link.
- packet.circuit = circuit
-
- // User has created the htlc update therefore we should find the
- // appropriate channel link and send the payment over this link.
- link, linkErr := s.getLocalLink(packet, htlc)
- if linkErr != nil {
- // Notify the htlc notifier of a link failure on our
- // outgoing link. Incoming timelock/amount values are
- // not set because they are not present for local sends.
- s.cfg.HtlcNotifier.NotifyLinkFailEvent(
- newHtlcKey(packet),
- HtlcInfo{
- OutgoingTimeLock: htlc.Expiry,
- OutgoingAmt: htlc.Amount,
- },
- HtlcEventTypeSend,
- linkErr,
- false,
- )
-
- return er.E(linkErr)
- }
-
- return link.HandleLocalAddPacket(packet)
-}
-
-// UpdateForwardingPolicies sends a message to the switch to update the
-// forwarding policies for the set of target channels, keyed in chanPolicies.
-//
-// NOTE: This function is synchronous and will block until either the
-// forwarding policies for all links have been updated, or the switch shuts
-// down.
-func (s *Switch) UpdateForwardingPolicies(
- chanPolicies map[wire.OutPoint]ForwardingPolicy) {
-
- log.Tracef("Updating link policies: %v", log.C(func() string {
- return spew.Sdump(chanPolicies)
- }))
-
- s.indexMtx.RLock()
-
- // Update each link in chanPolicies.
- for targetLink, policy := range chanPolicies {
- cid := lnwire.NewChanIDFromOutPoint(&targetLink)
-
- link, ok := s.linkIndex[cid]
- if !ok {
- log.Debugf("Unable to find ChannelPoint(%v) to update "+
- "link policy", targetLink)
- continue
- }
-
- link.UpdateForwardingPolicy(policy)
- }
-
- s.indexMtx.RUnlock()
-}
-
-// IsForwardedHTLC checks for a given channel and htlc index if it is related
-// to an opened circuit that represents a forwarded payment.
-func (s *Switch) IsForwardedHTLC(chanID lnwire.ShortChannelID,
- htlcIndex uint64) bool {
-
- circuit := s.circuits.LookupOpenCircuit(channeldb.CircuitKey{
- ChanID: chanID,
- HtlcID: htlcIndex,
- })
- return circuit != nil && circuit.Incoming.ChanID != hop.Source
-}
-
-// ForwardPackets adds a list of packets to the switch for processing. Fails
-// and settles are added on a first past, simultaneously constructing circuits
-// for any adds. After persisting the circuits, another pass of the adds is
-// given to forward them through the router. The sending link's quit channel is
-// used to prevent deadlocks when the switch stops a link in the midst of
-// forwarding.
-func (s *Switch) ForwardPackets(linkQuit chan struct{},
- packets ...*htlcPacket) er.R {
-
- var (
- // fwdChan is a buffered channel used to receive err msgs from
- // the htlcPlex when forwarding this batch.
- fwdChan = make(chan er.R, len(packets))
-
- // numSent keeps a running count of how many packets are
- // forwarded to the switch, which determines how many responses
- // we will wait for on the fwdChan..
- numSent int
- )
-
- // No packets, nothing to do.
- if len(packets) == 0 {
- return nil
- }
-
- // Setup a barrier to prevent the background tasks from processing
- // responses until this function returns to the user.
- var wg sync.WaitGroup
- wg.Add(1)
- defer wg.Done()
-
- // Before spawning the following goroutine to proxy our error responses,
- // check to see if we have already been issued a shutdown request. If
- // so, we exit early to avoid incrementing the switch's waitgroup while
- // it is already in the process of shutting down.
- select {
- case <-linkQuit:
- return nil
- case <-s.quit:
- return nil
- default:
- // Spawn a goroutine to log the errors returned from failed packets.
- s.wg.Add(1)
- go s.logFwdErrs(&numSent, &wg, fwdChan)
- }
-
- // Make a first pass over the packets, forwarding any settles or fails.
- // As adds are found, we create a circuit and append it to our set of
- // circuits to be written to disk.
- var circuits []*PaymentCircuit
- var addBatch []*htlcPacket
- for _, packet := range packets {
- switch htlc := packet.htlc.(type) {
- case *lnwire.UpdateAddHTLC:
- circuit := newPaymentCircuit(&htlc.PaymentHash, packet)
- packet.circuit = circuit
- circuits = append(circuits, circuit)
- addBatch = append(addBatch, packet)
- default:
- err := s.routeAsync(packet, fwdChan, linkQuit)
- if err != nil {
- return er.Errorf("failed to forward packet %v", err)
- }
- numSent++
- }
- }
-
- // If this batch did not contain any circuits to commit, we can return
- // early.
- if len(circuits) == 0 {
- return nil
- }
-
- // Write any circuits that we found to disk.
- actions, err := s.circuits.CommitCircuits(circuits...)
- if err != nil {
- log.Errorf("unable to commit circuits in switch: %v", err)
- }
-
- // Split the htlc packets by comparing an in-order seek to the head of
- // the added, dropped, or failed circuits.
- //
- // NOTE: This assumes each list is guaranteed to be a subsequence of the
- // circuits, and that the union of the sets results in the original set
- // of circuits.
- var addedPackets, failedPackets []*htlcPacket
- for _, packet := range addBatch {
- switch {
- case len(actions.Adds) > 0 && packet.circuit == actions.Adds[0]:
- addedPackets = append(addedPackets, packet)
- actions.Adds = actions.Adds[1:]
-
- case len(actions.Drops) > 0 && packet.circuit == actions.Drops[0]:
- actions.Drops = actions.Drops[1:]
-
- case len(actions.Fails) > 0 && packet.circuit == actions.Fails[0]:
- failedPackets = append(failedPackets, packet)
- actions.Fails = actions.Fails[1:]
- }
- }
-
- // Now, forward any packets for circuits that were successfully added to
- // the switch's circuit map.
- for _, packet := range addedPackets {
- err := s.routeAsync(packet, fwdChan, linkQuit)
- if err != nil {
- return er.Errorf("failed to forward packet %v", err)
- }
- numSent++
- }
-
- // Lastly, for any packets that failed, this implies that they were
- // left in a half added state, which can happen when recovering from
- // failures.
- if len(failedPackets) > 0 {
- var failure lnwire.FailureMessage
- update, err := s.cfg.FetchLastChannelUpdate(
- failedPackets[0].incomingChanID,
- )
- if err != nil {
- failure = &lnwire.FailTemporaryNodeFailure{}
- } else {
- failure = lnwire.NewTemporaryChannelFailure(update)
- }
- linkError := NewDetailedLinkError(
- failure, OutgoingFailureIncompleteForward,
- )
-
- for _, packet := range failedPackets {
- // We don't handle the error here since this method
- // always returns an error.
- _ = s.failAddPacket(packet, linkError)
- }
- }
-
- return nil
-}
-
-// logFwdErrs logs any errors received on `fwdChan`
-func (s *Switch) logFwdErrs(num *int, wg *sync.WaitGroup, fwdChan chan er.R) {
- defer s.wg.Done()
-
- // Wait here until the outer function has finished persisting
- // and routing the packets. This guarantees we don't read from num until
- // the value is accurate.
- wg.Wait()
-
- numSent := *num
- for i := 0; i < numSent; i++ {
- select {
- case err := <-fwdChan:
- if err != nil {
- log.Errorf("Unhandled error while reforwarding htlc "+
- "settle/fail over htlcswitch: %v", err)
- }
- case <-s.quit:
- log.Errorf("unable to forward htlc packet " +
- "htlc switch was stopped")
- return
- }
- }
-}
-
-// routeAsync sends a packet through the htlc switch, using the provided err
-// chan to propagate errors back to the caller. The link's quit channel is
-// provided so that the send can be canceled if either the link or the switch
-// receive a shutdown requuest. This method does not wait for a response from
-// the htlcForwarder before returning.
-func (s *Switch) routeAsync(packet *htlcPacket, errChan chan er.R,
- linkQuit chan struct{}) er.R {
-
- command := &plexPacket{
- pkt: packet,
- err: errChan,
- }
-
- select {
- case s.htlcPlex <- command:
- return nil
- case <-linkQuit:
- return ErrLinkShuttingDown.Default()
- case <-s.quit:
- return er.New("htlc switch was stopped")
- }
-}
-
-// getLocalLink handles the addition of a htlc for a send that originates from
-// our node. It returns the link that the htlc should be forwarded outwards on,
-// and a link error if the htlc cannot be forwarded.
-func (s *Switch) getLocalLink(pkt *htlcPacket, htlc *lnwire.UpdateAddHTLC) (
- ChannelLink, *LinkError) {
-
- // Try to find links by node destination.
- s.indexMtx.RLock()
- link, err := s.getLinkByShortID(pkt.outgoingChanID)
- s.indexMtx.RUnlock()
- if err != nil {
- log.Errorf("Link %v not found", pkt.outgoingChanID)
- return nil, NewLinkError(&lnwire.FailUnknownNextPeer{})
- }
-
- if !link.EligibleToForward() {
- log.Errorf("Link %v is not available to forward",
- pkt.outgoingChanID)
-
- // The update does not need to be populated as the error
- // will be returned back to the router.
- return nil, NewDetailedLinkError(
- lnwire.NewTemporaryChannelFailure(nil),
- OutgoingFailureLinkNotEligible,
- )
- }
-
- // Ensure that the htlc satisfies the outgoing channel policy.
- currentHeight := atomic.LoadUint32(&s.bestHeight)
- htlcErr := link.CheckHtlcTransit(
- htlc.PaymentHash, htlc.Amount, htlc.Expiry, currentHeight,
- )
- if htlcErr != nil {
- log.Errorf("Link %v policy for local forward not "+
- "satisfied", pkt.outgoingChanID)
- return nil, htlcErr
- }
- return link, nil
-}
-
-// handleLocalResponse processes a Settle or Fail responding to a
-// locally-initiated payment. This is handled asynchronously to avoid blocking
-// the main event loop within the switch, as these operations can require
-// multiple db transactions. The guarantees of the circuit map are stringent
-// enough such that we are able to tolerate reordering of these operations
-// without side effects. The primary operations handled are:
-// 1. Save the payment result to the pending payment store.
-// 2. Notify subscribers about the payment result.
-// 3. Ack settle/fail references, to avoid resending this response internally
-// 4. Teardown the closing circuit in the circuit map
-//
-// NOTE: This method MUST be spawned as a goroutine.
-func (s *Switch) handleLocalResponse(pkt *htlcPacket) {
- defer s.wg.Done()
-
- paymentID := pkt.incomingHTLCID
-
- // The error reason will be unencypted in case this a local
- // failure or a converted error.
- unencrypted := pkt.localFailure || pkt.convertedError
- n := &networkResult{
- msg: pkt.htlc,
- unencrypted: unencrypted,
- isResolution: pkt.isResolution,
- }
-
- // Store the result to the db. This will also notify subscribers about
- // the result.
- if err := s.networkResults.storeResult(paymentID, n); err != nil {
- log.Errorf("Unable to complete payment for pid=%v: %v",
- paymentID, err)
- return
- }
-
- // First, we'll clean up any fwdpkg references, circuit entries, and
- // mark in our db that the payment for this payment hash has either
- // succeeded or failed.
- //
- // If this response is contained in a forwarding package, we'll start by
- // acking the settle/fail so that we don't continue to retransmit the
- // HTLC internally.
- if pkt.destRef != nil {
- if err := s.ackSettleFail(*pkt.destRef); err != nil {
- log.Warnf("Unable to ack settle/fail reference: %s: %v",
- *pkt.destRef, err)
- return
- }
- }
-
- // Next, we'll remove the circuit since we are about to complete an
- // fulfill/fail of this HTLC. Since we've already removed the
- // settle/fail fwdpkg reference, the response from the peer cannot be
- // replayed internally if this step fails. If this happens, this logic
- // will be executed when a provided resolution message comes through.
- // This can only happen if the circuit is still open, which is why this
- // ordering is chosen.
- if err := s.teardownCircuit(pkt); err != nil {
- log.Warnf("Unable to teardown circuit %s: %v",
- pkt.inKey(), err)
- return
- }
-
- // Finally, notify on the htlc failure or success that has been handled.
- key := newHtlcKey(pkt)
- eventType := getEventType(pkt)
-
- switch pkt.htlc.(type) {
- case *lnwire.UpdateFulfillHTLC:
- s.cfg.HtlcNotifier.NotifySettleEvent(key, eventType)
-
- case *lnwire.UpdateFailHTLC:
- s.cfg.HtlcNotifier.NotifyForwardingFailEvent(key, eventType)
- }
-}
-
-// extractResult uses the given deobfuscator to extract the payment result from
-// the given network message.
-func (s *Switch) extractResult(deobfuscator ErrorDecrypter, n *networkResult,
- paymentID uint64, paymentHash lntypes.Hash) (*PaymentResult, er.R) {
-
- switch htlc := n.msg.(type) {
-
- // We've received a settle update which means we can finalize the user
- // payment and return successful response.
- case *lnwire.UpdateFulfillHTLC:
- return &PaymentResult{
- Preimage: htlc.PaymentPreimage,
- }, nil
-
- // We've received a fail update which means we can finalize the
- // user payment and return fail response.
- case *lnwire.UpdateFailHTLC:
- paymentErr := s.parseFailedPayment(
- deobfuscator, paymentID, paymentHash, n.unencrypted,
- n.isResolution, htlc,
- )
-
- return &PaymentResult{
- Error: paymentErr,
- }, nil
-
- default:
- return nil, er.Errorf("received unknown response type: %T",
- htlc)
- }
-}
-
-// parseFailedPayment determines the appropriate failure message to return to
-// a user initiated payment. The three cases handled are:
-// 1) An unencrypted failure, which should already plaintext.
-// 2) A resolution from the chain arbitrator, which possibly has no failure
-// reason attached.
-// 3) A failure from the remote party, which will need to be decrypted using
-// the payment deobfuscator.
-func (s *Switch) parseFailedPayment(deobfuscator ErrorDecrypter,
- paymentID uint64, paymentHash lntypes.Hash, unencrypted,
- isResolution bool, htlc *lnwire.UpdateFailHTLC) er.R {
-
- switch {
-
- // The payment never cleared the link, so we don't need to
- // decrypt the error, simply decode it them report back to the
- // user.
- case unencrypted:
- r := bytes.NewReader(htlc.Reason)
- failureMsg, err := lnwire.DecodeFailure(r, 0)
- if err != nil {
- // If we could not decode the failure reason, return a link
- // error indicating that we failed to decode the onion.
- linkError := NewDetailedLinkError(
- // As this didn't even clear the link, we don't
- // need to apply an update here since it goes
- // directly to the router.
- lnwire.NewTemporaryChannelFailure(nil),
- OutgoingFailureDecodeError,
- )
-
- log.Errorf("%v: (hash=%v, pid=%d): %v",
- linkError.FailureDetail.FailureString(),
- paymentHash, paymentID, err)
-
- return er.E(linkError)
- }
-
- // If we successfully decoded the failure reason, return it.
- return er.E(NewLinkError(failureMsg))
-
- // A payment had to be timed out on chain before it got past
- // the first hop. In this case, we'll report a permanent
- // channel failure as this means us, or the remote party had to
- // go on chain.
- case isResolution && htlc.Reason == nil:
- linkError := NewDetailedLinkError(
- &lnwire.FailPermanentChannelFailure{},
- OutgoingFailureOnChainTimeout,
- )
-
- log.Infof("%v: hash=%v, pid=%d",
- linkError.FailureDetail.FailureString(),
- paymentHash, paymentID)
-
- return er.E(linkError)
-
- // A regular multi-hop payment error that we'll need to
- // decrypt.
- default:
- // We'll attempt to fully decrypt the onion encrypted
- // error. If we're unable to then we'll bail early.
- failure, err := deobfuscator.DecryptError(htlc.Reason)
- if err != nil {
- log.Errorf("unable to de-obfuscate onion failure "+
- "(hash=%v, pid=%d): %v",
- paymentHash, paymentID, err)
-
- return ErrUnreadableFailureMessage.Default()
- }
-
- return er.E(failure)
- }
-}
-
-// handlePacketForward is used in cases when we need forward the htlc update
-// from one channel link to another and be able to propagate the settle/fail
-// updates back. This behaviour is achieved by creation of payment circuits.
-func (s *Switch) handlePacketForward(packet *htlcPacket) er.R {
- switch htlc := packet.htlc.(type) {
-
- // Channel link forwarded us a new htlc, therefore we initiate the
- // payment circuit within our internal state so we can properly forward
- // the ultimate settle message back latter.
- case *lnwire.UpdateAddHTLC:
- // Check if the node is set to reject all onward HTLCs and also make
- // sure that HTLC is not from the source node.
- if s.cfg.RejectHTLC {
- failure := NewDetailedLinkError(
- &lnwire.FailChannelDisabled{},
- OutgoingFailureForwardsDisabled,
- )
-
- return s.failAddPacket(packet, failure)
- }
-
- // Before we attempt to find a non-strict forwarding path for
- // this htlc, check whether the htlc is being routed over the
- // same incoming and outgoing channel. If our node does not
- // allow forwards of this nature, we fail the htlc early. This
- // check is in place to disallow inefficiently routed htlcs from
- // locking up our balance.
- linkErr := checkCircularForward(
- packet.incomingChanID, packet.outgoingChanID,
- s.cfg.AllowCircularRoute, htlc.PaymentHash,
- )
- if linkErr != nil {
- return s.failAddPacket(packet, linkErr)
- }
-
- s.indexMtx.RLock()
- targetLink, err := s.getLinkByShortID(packet.outgoingChanID)
- if err != nil {
- s.indexMtx.RUnlock()
-
- log.Debugf("unable to find link with "+
- "destination %v", packet.outgoingChanID)
-
- // If packet was forwarded from another channel link
- // than we should notify this link that some error
- // occurred.
- linkError := NewLinkError(
- &lnwire.FailUnknownNextPeer{},
- )
-
- return s.failAddPacket(packet, linkError)
- }
- targetPeerKey := targetLink.Peer().PubKey()
- interfaceLinks, _ := s.getLinks(targetPeerKey)
- s.indexMtx.RUnlock()
-
- // We'll keep track of any HTLC failures during the link
- // selection process. This way we can return the error for
- // precise link that the sender selected, while optimistically
- // trying all links to utilize our available bandwidth.
- linkErrs := make(map[lnwire.ShortChannelID]*LinkError)
-
- // Find all destination channel links with appropriate
- // bandwidth.
- var destinations []ChannelLink
- for _, link := range interfaceLinks {
- var failure *LinkError
-
- // We'll skip any links that aren't yet eligible for
- // forwarding.
- if !link.EligibleToForward() {
- failure = NewDetailedLinkError(
- &lnwire.FailUnknownNextPeer{},
- OutgoingFailureLinkNotEligible,
- )
- } else {
- // We'll ensure that the HTLC satisfies the
- // current forwarding conditions of this target
- // link.
- currentHeight := atomic.LoadUint32(&s.bestHeight)
- failure = link.CheckHtlcForward(
- htlc.PaymentHash, packet.incomingAmount,
- packet.amount, packet.incomingTimeout,
- packet.outgoingTimeout, currentHeight,
- )
- }
-
- // If this link can forward the htlc, add it to the set
- // of destinations.
- if failure == nil {
- destinations = append(destinations, link)
- continue
- }
-
- linkErrs[link.ShortChanID()] = failure
- }
-
- // If we had a forwarding failure due to the HTLC not
- // satisfying the current policy, then we'll send back an
- // error, but ensure we send back the error sourced at the
- // *target* link.
- if len(destinations) == 0 {
- // At this point, some or all of the links rejected the
- // HTLC so we couldn't forward it. So we'll try to look
- // up the error that came from the source.
- linkErr, ok := linkErrs[packet.outgoingChanID]
- if !ok {
- // If we can't find the error of the source,
- // then we'll return an unknown next peer,
- // though this should never happen.
- linkErr = NewLinkError(
- &lnwire.FailUnknownNextPeer{},
- )
- log.Warnf("unable to find err source for "+
- "outgoing_link=%v, errors=%v",
- packet.outgoingChanID, log.C(func() string {
- return spew.Sdump(linkErrs)
- }))
- }
-
- log.Tracef("incoming HTLC(%x) violated "+
- "target outgoing link (id=%v) policy: %v",
- htlc.PaymentHash[:], packet.outgoingChanID,
- linkErr)
-
- return s.failAddPacket(packet, linkErr)
- }
-
- // Choose a random link out of the set of links that can forward
- // this htlc. The reason for randomization is to evenly
- // distribute the htlc load without making assumptions about
- // what the best channel is.
- destination := destinations[rand.Intn(len(destinations))]
-
- // Send the packet to the destination channel link which
- // manages the channel.
- packet.outgoingChanID = destination.ShortChanID()
- return destination.HandleSwitchPacket(packet)
-
- case *lnwire.UpdateFailHTLC, *lnwire.UpdateFulfillHTLC:
- // If the source of this packet has not been set, use the
- // circuit map to lookup the origin.
- circuit, err := s.closeCircuit(packet)
- if err != nil {
- return err
- }
-
- // closeCircuit returns a nil circuit when a settle packet returns an
- // ErrUnknownCircuit error upon the inner call to CloseCircuit.
- if circuit == nil {
- return nil
- }
-
- fail, isFail := htlc.(*lnwire.UpdateFailHTLC)
- if isFail && !packet.hasSource {
- switch {
- // No message to encrypt, locally sourced payment.
- case circuit.ErrorEncrypter == nil:
-
- // If this is a resolution message, then we'll need to
- // encrypt it as it's actually internally sourced.
- case packet.isResolution:
- var err er.R
- // TODO(roasbeef): don't need to pass actually?
- failure := &lnwire.FailPermanentChannelFailure{}
- fail.Reason, err = circuit.ErrorEncrypter.EncryptFirstHop(
- failure,
- )
- if err != nil {
- err = er.Errorf("unable to obfuscate "+
- "error: %v", err)
- log.Error(err)
- }
-
- // Alternatively, if the remote party send us an
- // UpdateFailMalformedHTLC, then we'll need to convert
- // this into a proper well formatted onion error as
- // there's no HMAC currently.
- case packet.convertedError:
- log.Infof("Converting malformed HTLC error "+
- "for circuit for Circuit(%x: "+
- "(%s, %d) <-> (%s, %d))", packet.circuit.PaymentHash,
- packet.incomingChanID, packet.incomingHTLCID,
- packet.outgoingChanID, packet.outgoingHTLCID)
-
- fail.Reason = circuit.ErrorEncrypter.EncryptMalformedError(
- fail.Reason,
- )
-
- default:
- // Otherwise, it's a forwarded error, so we'll perform a
- // wrapper encryption as normal.
- fail.Reason = circuit.ErrorEncrypter.IntermediateEncrypt(
- fail.Reason,
- )
- }
- } else if !isFail && circuit.Outgoing != nil {
- // If this is an HTLC settle, and it wasn't from a
- // locally initiated HTLC, then we'll log a forwarding
- // event so we can flush it to disk later.
- //
- // TODO(roasbeef): only do this once link actually
- // fully settles?
- localHTLC := packet.incomingChanID == hop.Source
- if !localHTLC {
- log.Infof("Forwarded HTLC(%x) of %v (fee: %v) "+
- "from IncomingChanID(%v) to OutgoingChanID(%v)",
- circuit.PaymentHash[:], circuit.OutgoingAmount,
- circuit.IncomingAmount-circuit.OutgoingAmount,
- circuit.Incoming.ChanID, circuit.Outgoing.ChanID)
- s.fwdEventMtx.Lock()
- s.pendingFwdingEvents = append(
- s.pendingFwdingEvents,
- channeldb.ForwardingEvent{
- Timestamp: time.Now(),
- IncomingChanID: circuit.Incoming.ChanID,
- OutgoingChanID: circuit.Outgoing.ChanID,
- AmtIn: circuit.IncomingAmount,
- AmtOut: circuit.OutgoingAmount,
- },
- )
- s.fwdEventMtx.Unlock()
- }
- }
-
- // A blank IncomingChanID in a circuit indicates that it is a pending
- // user-initiated payment.
- if packet.incomingChanID == hop.Source {
- s.wg.Add(1)
- go s.handleLocalResponse(packet)
- return nil
- }
-
- // Check to see that the source link is online before removing
- // the circuit.
- return s.mailOrchestrator.Deliver(packet.incomingChanID, packet)
-
- default:
- return er.New("wrong update type")
- }
-}
-
-// checkCircularForward checks whether a forward is circular (arrives and
-// departs on the same link) and returns a link error if the switch is
-// configured to disallow this behaviour.
-func checkCircularForward(incoming, outgoing lnwire.ShortChannelID,
- allowCircular bool, paymentHash lntypes.Hash) *LinkError {
-
- // If the route is not circular we do not need to perform any further
- // checks.
- if incoming != outgoing {
- return nil
- }
-
- // If the incoming and outgoing link are equal, the htlc is part of a
- // circular route which may be used to lock up our liquidity. If the
- // switch is configured to allow circular routes, log that we are
- // allowing the route then return nil.
- if allowCircular {
- log.Debugf("allowing circular route over link: %v "+
- "(payment hash: %x)", incoming, paymentHash)
- return nil
- }
-
- // If our node disallows circular routes, return a temporary channel
- // failure. There is nothing wrong with the policy used by the remote
- // node, so we do not include a channel update.
- return NewDetailedLinkError(
- lnwire.NewTemporaryChannelFailure(nil),
- OutgoingFailureCircularRoute,
- )
-}
-
-// failAddPacket encrypts a fail packet back to an add packet's source.
-// The ciphertext will be derived from the failure message proivded by context.
-// This method returns the failErr if all other steps complete successfully.
-func (s *Switch) failAddPacket(packet *htlcPacket, failure *LinkError) er.R {
- // Encrypt the failure so that the sender will be able to read the error
- // message. Since we failed this packet, we use EncryptFirstHop to
- // obfuscate the failure for their eyes only.
- reason, err := packet.obfuscator.EncryptFirstHop(failure.WireMessage())
- if err != nil {
- err := er.Errorf("unable to obfuscate "+
- "error: %v", err)
- log.Error(err)
- return err
- }
-
- log.Error(failure.Error())
-
- // Create a failure packet for this htlc. The the full set of
- // information about the htlc failure is included so that they can
- // be included in link failure notifications.
- failPkt := &htlcPacket{
- sourceRef: packet.sourceRef,
- incomingChanID: packet.incomingChanID,
- incomingHTLCID: packet.incomingHTLCID,
- outgoingChanID: packet.outgoingChanID,
- outgoingHTLCID: packet.outgoingHTLCID,
- incomingAmount: packet.incomingAmount,
- amount: packet.amount,
- incomingTimeout: packet.incomingTimeout,
- outgoingTimeout: packet.outgoingTimeout,
- circuit: packet.circuit,
- linkFailure: failure,
- htlc: &lnwire.UpdateFailHTLC{
- Reason: reason,
- },
- }
-
- // Route a fail packet back to the source link.
- err = s.mailOrchestrator.Deliver(failPkt.incomingChanID, failPkt)
- if err != nil {
- err = er.Errorf("source chanid=%v unable to "+
- "handle switch packet: %v",
- packet.incomingChanID, err)
- log.Error(err)
- return err
- }
-
- return er.E(failure)
-}
-
-// closeCircuit accepts a settle or fail htlc and the associated htlc packet and
-// attempts to determine the source that forwarded this htlc. This method will
-// set the incoming chan and htlc ID of the given packet if the source was
-// found, and will properly [re]encrypt any failure messages.
-func (s *Switch) closeCircuit(pkt *htlcPacket) (*PaymentCircuit, er.R) {
- // If the packet has its source, that means it was failed locally by
- // the outgoing link. We fail it here to make sure only one response
- // makes it through the switch.
- if pkt.hasSource {
- circuit, err := s.circuits.FailCircuit(pkt.inKey())
- switch {
-
- // Circuit successfully closed.
- case err == nil:
- return circuit, nil
-
- // Circuit was previously closed, but has not been deleted.
- // We'll just drop this response until the circuit has been
- // fully removed.
- case ErrCircuitClosing.Is(err):
- return nil, err
-
- // Failed to close circuit because it does not exist. This is
- // likely because the circuit was already successfully closed.
- // Since this packet failed locally, there is no forwarding
- // package entry to acknowledge.
- case ErrUnknownCircuit.Is(err):
- return nil, err
-
- // Unexpected error.
- default:
- return nil, err
- }
- }
-
- // Otherwise, this is packet was received from the remote party. Use
- // circuit map to find the incoming link to receive the settle/fail.
- circuit, err := s.circuits.CloseCircuit(pkt.outKey())
- switch {
-
- // Open circuit successfully closed.
- case err == nil:
- pkt.incomingChanID = circuit.Incoming.ChanID
- pkt.incomingHTLCID = circuit.Incoming.HtlcID
- pkt.circuit = circuit
- pkt.sourceRef = &circuit.AddRef
-
- pktType := "SETTLE"
- if _, ok := pkt.htlc.(*lnwire.UpdateFailHTLC); ok {
- pktType = "FAIL"
- }
-
- log.Debugf("Closed completed %s circuit for %x: "+
- "(%s, %d) <-> (%s, %d)", pktType, pkt.circuit.PaymentHash,
- pkt.incomingChanID, pkt.incomingHTLCID,
- pkt.outgoingChanID, pkt.outgoingHTLCID)
-
- return circuit, nil
-
- // Circuit was previously closed, but has not been deleted. We'll just
- // drop this response until the circuit has been removed.
- case ErrCircuitClosing.Is(err):
- return nil, err
-
- // Failed to close circuit because it does not exist. This is likely
- // because the circuit was already successfully closed.
- case ErrUnknownCircuit.Is(err):
- if pkt.destRef != nil {
- // Add this SettleFailRef to the set of pending settle/fail entries
- // awaiting acknowledgement.
- s.pendingSettleFails = append(s.pendingSettleFails, *pkt.destRef)
- }
-
- // If this is a settle, we will not log an error message as settles
- // are expected to hit the ErrUnknownCircuit case. The only way fails
- // can hit this case if the link restarts after having just sent a fail
- // to the switch.
- _, isSettle := pkt.htlc.(*lnwire.UpdateFulfillHTLC)
- if !isSettle {
- err := er.Errorf("unable to find target channel "+
- "for HTLC fail: channel ID = %s, "+
- "HTLC ID = %d", pkt.outgoingChanID,
- pkt.outgoingHTLCID)
- log.Error(err)
-
- return nil, err
- }
-
- return nil, nil
-
- // Unexpected error.
- default:
- return nil, err
- }
-}
-
-// ackSettleFail is used by the switch to ACK any settle/fail entries in the
-// forwarding package of the outgoing link for a payment circuit. We do this if
-// we're the originator of the payment, so the link stops attempting to
-// re-broadcast.
-func (s *Switch) ackSettleFail(settleFailRefs ...channeldb.SettleFailRef) er.R {
- return kvdb.Batch(s.cfg.DB.Backend, func(tx kvdb.RwTx) er.R {
- return s.cfg.SwitchPackager.AckSettleFails(tx, settleFailRefs...)
- })
-}
-
-// teardownCircuit removes a pending or open circuit from the switch's circuit
-// map and prints useful logging statements regarding the outcome.
-func (s *Switch) teardownCircuit(pkt *htlcPacket) er.R {
- var pktType string
- switch htlc := pkt.htlc.(type) {
- case *lnwire.UpdateFulfillHTLC:
- pktType = "SETTLE"
- case *lnwire.UpdateFailHTLC:
- pktType = "FAIL"
- default:
- err := er.Errorf("cannot tear down packet of type: %T", htlc)
- log.Errorf(err.String())
- return err
- }
-
- switch {
- case pkt.circuit.HasKeystone():
- log.Debugf("Tearing down open circuit with %s pkt, removing circuit=%v "+
- "with keystone=%v", pktType, pkt.inKey(), pkt.outKey())
-
- err := s.circuits.DeleteCircuits(pkt.inKey())
- if err != nil {
- log.Warnf("Failed to tear down open circuit (%s, %d) <-> (%s, %d) "+
- "with payment_hash-%v using %s pkt",
- pkt.incomingChanID, pkt.incomingHTLCID,
- pkt.outgoingChanID, pkt.outgoingHTLCID,
- pkt.circuit.PaymentHash, pktType)
- return err
- }
-
- log.Debugf("Closed completed %s circuit for %x: "+
- "(%s, %d) <-> (%s, %d)", pktType, pkt.circuit.PaymentHash,
- pkt.incomingChanID, pkt.incomingHTLCID,
- pkt.outgoingChanID, pkt.outgoingHTLCID)
-
- default:
- log.Debugf("Tearing down incomplete circuit with %s for inkey=%v",
- pktType, pkt.inKey())
-
- err := s.circuits.DeleteCircuits(pkt.inKey())
- if err != nil {
- log.Warnf("Failed to tear down pending %s circuit for %x: "+
- "(%s, %d)", pktType, pkt.circuit.PaymentHash,
- pkt.incomingChanID, pkt.incomingHTLCID)
- return err
- }
-
- log.Debugf("Removed pending onion circuit for %x: "+
- "(%s, %d)", pkt.circuit.PaymentHash,
- pkt.incomingChanID, pkt.incomingHTLCID)
- }
-
- return nil
-}
-
-// CloseLink creates and sends the close channel command to the target link
-// directing the specified closure type. If the closure type is CloseRegular,
-// targetFeePerKw parameter should be the ideal fee-per-kw that will be used as
-// a starting point for close negotiation. The deliveryScript parameter is an
-// optional parameter which sets a user specified script to close out to.
-func (s *Switch) CloseLink(chanPoint *wire.OutPoint,
- closeType ChannelCloseType, targetFeePerKw chainfee.SatPerKWeight,
- deliveryScript lnwire.DeliveryAddress) (chan interface{}, chan er.R) {
-
- // TODO(roasbeef) abstract out the close updates.
- updateChan := make(chan interface{}, 2)
- errChan := make(chan er.R, 1)
-
- command := &ChanClose{
- CloseType: closeType,
- ChanPoint: chanPoint,
- Updates: updateChan,
- TargetFeePerKw: targetFeePerKw,
- DeliveryScript: deliveryScript,
- Err: errChan,
- }
-
- select {
- case s.chanCloseRequests <- command:
- return updateChan, errChan
-
- case <-s.quit:
- errChan <- ErrSwitchExiting.Default()
- close(updateChan)
- return updateChan, errChan
- }
-}
-
-// htlcForwarder is responsible for optimally forwarding (and possibly
-// fragmenting) incoming/outgoing HTLCs amongst all active interfaces and their
-// links. The duties of the forwarder are similar to that of a network switch,
-// in that it facilitates multi-hop payments by acting as a central messaging
-// bus. The switch communicates will active links to create, manage, and tear
-// down active onion routed payments. Each active channel is modeled as
-// networked device with metadata such as the available payment bandwidth, and
-// total link capacity.
-//
-// NOTE: This MUST be run as a goroutine.
-func (s *Switch) htlcForwarder() {
- defer s.wg.Done()
-
- defer func() {
- s.blockEpochStream.Cancel()
-
- // Remove all links once we've been signalled for shutdown.
- var linksToStop []ChannelLink
- s.indexMtx.Lock()
- for _, link := range s.linkIndex {
- activeLink := s.removeLink(link.ChanID())
- if activeLink == nil {
- log.Errorf("unable to remove ChannelLink(%v) "+
- "on stop", link.ChanID())
- continue
- }
- linksToStop = append(linksToStop, activeLink)
- }
- for _, link := range s.pendingLinkIndex {
- pendingLink := s.removeLink(link.ChanID())
- if pendingLink == nil {
- log.Errorf("unable to remove ChannelLink(%v) "+
- "on stop", link.ChanID())
- continue
- }
- linksToStop = append(linksToStop, pendingLink)
- }
- s.indexMtx.Unlock()
-
- // Now that all pending and live links have been removed from
- // the forwarding indexes, stop each one before shutting down.
- // We'll shut them down in parallel to make exiting as fast as
- // possible.
- var wg sync.WaitGroup
- for _, link := range linksToStop {
- wg.Add(1)
- go func(l ChannelLink) {
- defer wg.Done()
- l.Stop()
- }(link)
- }
- wg.Wait()
-
- // Before we exit fully, we'll attempt to flush out any
- // forwarding events that may still be lingering since the last
- // batch flush.
- if err := s.FlushForwardingEvents(); err != nil {
- log.Errorf("unable to flush forwarding events: %v", err)
- }
- }()
-
- // TODO(roasbeef): cleared vs settled distinction
- var (
- totalNumUpdates uint64
- totalSatSent btcutil.Amount
- totalSatRecv btcutil.Amount
- )
- s.cfg.LogEventTicker.Resume()
- defer s.cfg.LogEventTicker.Stop()
-
- // Every 15 seconds, we'll flush out the forwarding events that
- // occurred during that period.
- s.cfg.FwdEventTicker.Resume()
- defer s.cfg.FwdEventTicker.Stop()
-
- defer s.cfg.AckEventTicker.Stop()
-
-out:
- for {
-
- // If the set of pending settle/fail entries is non-zero,
- // reinstate the ack ticker so we can batch ack them.
- if len(s.pendingSettleFails) > 0 {
- s.cfg.AckEventTicker.Resume()
- }
-
- select {
- case blockEpoch, ok := <-s.blockEpochStream.Epochs:
- if !ok {
- break out
- }
-
- atomic.StoreUint32(&s.bestHeight, uint32(blockEpoch.Height))
-
- // A local close request has arrived, we'll forward this to the
- // relevant link (if it exists) so the channel can be
- // cooperatively closed (if possible).
- case req := <-s.chanCloseRequests:
- chanID := lnwire.NewChanIDFromOutPoint(req.ChanPoint)
-
- s.indexMtx.RLock()
- link, ok := s.linkIndex[chanID]
- if !ok {
- s.indexMtx.RUnlock()
-
- req.Err <- er.Errorf("no peer for channel with "+
- "chan_id=%x", chanID[:])
- continue
- }
- s.indexMtx.RUnlock()
-
- peerPub := link.Peer().PubKey()
- log.Debugf("Requesting local channel close: peer=%v, "+
- "chan_id=%x", link.Peer(), chanID[:])
-
- go s.cfg.LocalChannelClose(peerPub[:], req)
-
- case resolutionMsg := <-s.resolutionMsgs:
- pkt := &htlcPacket{
- outgoingChanID: resolutionMsg.SourceChan,
- outgoingHTLCID: resolutionMsg.HtlcIndex,
- isResolution: true,
- }
-
- // Resolution messages will either be cancelling
- // backwards an existing HTLC, or settling a previously
- // outgoing HTLC. Based on this, we'll map the message
- // to the proper htlcPacket.
- if resolutionMsg.Failure != nil {
- pkt.htlc = &lnwire.UpdateFailHTLC{}
- } else {
- pkt.htlc = &lnwire.UpdateFulfillHTLC{
- PaymentPreimage: *resolutionMsg.PreImage,
- }
- }
-
- log.Infof("Received outside contract resolution, "+
- "mapping to: %v", spew.Sdump(pkt))
-
- // We don't check the error, as the only failure we can
- // encounter is due to the circuit already being
- // closed. This is fine, as processing this message is
- // meant to be idempotent.
- err := s.handlePacketForward(pkt)
- if err != nil {
- log.Errorf("Unable to forward resolution msg: %v", err)
- }
-
- // With the message processed, we'll now close out
- close(resolutionMsg.doneChan)
-
- // A new packet has arrived for forwarding, we'll interpret the
- // packet concretely, then either forward it along, or
- // interpret a return packet to a locally initialized one.
- case cmd := <-s.htlcPlex:
- cmd.err <- s.handlePacketForward(cmd.pkt)
-
- // When this time ticks, then it indicates that we should
- // collect all the forwarding events since the last internal,
- // and write them out to our log.
- case <-s.cfg.FwdEventTicker.Ticks():
- s.wg.Add(1)
- go func() {
- defer s.wg.Done()
-
- if err := s.FlushForwardingEvents(); err != nil {
- log.Errorf("unable to flush "+
- "forwarding events: %v", err)
- }
- }()
-
- // The log ticker has fired, so we'll calculate some forwarding
- // stats for the last 10 seconds to display within the logs to
- // users.
- case <-s.cfg.LogEventTicker.Ticks():
- // First, we'll collate the current running tally of
- // our forwarding stats.
- prevSatSent := totalSatSent
- prevSatRecv := totalSatRecv
- prevNumUpdates := totalNumUpdates
-
- var (
- newNumUpdates uint64
- newSatSent btcutil.Amount
- newSatRecv btcutil.Amount
- )
-
- // Next, we'll run through all the registered links and
- // compute their up-to-date forwarding stats.
- s.indexMtx.RLock()
- for _, link := range s.linkIndex {
- // TODO(roasbeef): when links first registered
- // stats printed.
- updates, sent, recv := link.Stats()
- newNumUpdates += updates
- newSatSent += sent.ToSatoshis()
- newSatRecv += recv.ToSatoshis()
- }
- s.indexMtx.RUnlock()
-
- var (
- diffNumUpdates uint64
- diffSatSent btcutil.Amount
- diffSatRecv btcutil.Amount
- )
-
- // If this is the first time we're computing these
- // stats, then the diff is just the new value. We do
- // this in order to avoid integer underflow issues.
- if prevNumUpdates == 0 {
- diffNumUpdates = newNumUpdates
- diffSatSent = newSatSent
- diffSatRecv = newSatRecv
- } else {
- diffNumUpdates = newNumUpdates - prevNumUpdates
- diffSatSent = newSatSent - prevSatSent
- diffSatRecv = newSatRecv - prevSatRecv
- }
-
- // If the diff of num updates is zero, then we haven't
- // forwarded anything in the last 10 seconds, so we can
- // skip this update.
- if diffNumUpdates == 0 {
- continue
- }
-
- // If the diff of num updates is negative, then some
- // links may have been unregistered from the switch, so
- // we'll update our stats to only include our registered
- // links.
- if int64(diffNumUpdates) < 0 {
- totalNumUpdates = newNumUpdates
- totalSatSent = newSatSent
- totalSatRecv = newSatRecv
- continue
- }
-
- // Otherwise, we'll log this diff, then accumulate the
- // new stats into the running total.
- log.Debugf("Sent %d satoshis and received %d satoshis "+
- "in the last 10 seconds (%f tx/sec)",
- diffSatSent, diffSatRecv,
- float64(diffNumUpdates)/10)
-
- totalNumUpdates += diffNumUpdates
- totalSatSent += diffSatSent
- totalSatRecv += diffSatRecv
-
- // The ack ticker has fired so if we have any settle/fail entries
- // for a forwarding package to ack, we will do so here in a batch
- // db call.
- case <-s.cfg.AckEventTicker.Ticks():
- // If the current set is empty, pause the ticker.
- if len(s.pendingSettleFails) == 0 {
- s.cfg.AckEventTicker.Pause()
- continue
- }
-
- // Batch ack the settle/fail entries.
- if err := s.ackSettleFail(s.pendingSettleFails...); err != nil {
- log.Errorf("Unable to ack batch of settle/fails: %v", err)
- continue
- }
-
- log.Tracef("Acked %d settle fails: %v", len(s.pendingSettleFails),
- log.C(func() string {
- return spew.Sdump(s.pendingSettleFails)
- }))
-
- // Reset the pendingSettleFails buffer while keeping acquired
- // memory.
- s.pendingSettleFails = s.pendingSettleFails[:0]
-
- case <-s.quit:
- return
- }
- }
-}
-
-// Start starts all helper goroutines required for the operation of the switch.
-func (s *Switch) Start() er.R {
- if !atomic.CompareAndSwapInt32(&s.started, 0, 1) {
- log.Warn("Htlc Switch already started")
- return er.New("htlc switch already started")
- }
-
- log.Infof("Starting HTLC Switch")
-
- blockEpochStream, err := s.cfg.Notifier.RegisterBlockEpochNtfn(nil)
- if err != nil {
- return err
- }
- s.blockEpochStream = blockEpochStream
-
- s.wg.Add(1)
- go s.htlcForwarder()
-
- if err := s.reforwardResponses(); err != nil {
- s.Stop()
- log.Errorf("unable to reforward responses: %v", err)
- return err
- }
-
- return nil
-}
-
-// reforwardResponses for every known, non-pending channel, loads all associated
-// forwarding packages and reforwards any Settle or Fail HTLCs found. This is
-// used to resurrect the switch's mailboxes after a restart.
-func (s *Switch) reforwardResponses() er.R {
- openChannels, err := s.cfg.DB.FetchAllOpenChannels()
- if err != nil {
- return err
- }
-
- for _, openChannel := range openChannels {
- shortChanID := openChannel.ShortChanID()
-
- // Locally-initiated payments never need reforwarding.
- if shortChanID == hop.Source {
- continue
- }
-
- // If the channel is pending, it should have no forwarding
- // packages, and nothing to reforward.
- if openChannel.IsPending {
- continue
- }
-
- // Channels in open or waiting-close may still have responses in
- // their forwarding packages. We will continue to reattempt
- // forwarding on startup until the channel is fully-closed.
- //
- // Load this channel's forwarding packages, and deliver them to
- // the switch.
- fwdPkgs, err := s.loadChannelFwdPkgs(shortChanID)
- if err != nil {
- log.Errorf("unable to load forwarding "+
- "packages for %v: %v", shortChanID, err)
- return err
- }
-
- s.reforwardSettleFails(fwdPkgs)
- }
-
- return nil
-}
-
-// loadChannelFwdPkgs loads all forwarding packages owned by the `source` short
-// channel identifier.
-func (s *Switch) loadChannelFwdPkgs(source lnwire.ShortChannelID) ([]*channeldb.FwdPkg, er.R) {
-
- var fwdPkgs []*channeldb.FwdPkg
- if err := kvdb.View(s.cfg.DB, func(tx kvdb.RTx) er.R {
- var err er.R
- fwdPkgs, err = s.cfg.SwitchPackager.LoadChannelFwdPkgs(
- tx, source,
- )
- return err
- }, func() {
- fwdPkgs = nil
- }); err != nil {
- return nil, err
- }
-
- return fwdPkgs, nil
-}
-
-// reforwardSettleFails parses the Settle and Fail HTLCs from the list of
-// forwarding packages, and reforwards those that have not been acknowledged.
-// This is intended to occur on startup, in order to recover the switch's
-// mailboxes, and to ensure that responses can be propagated in case the
-// outgoing link never comes back online.
-//
-// NOTE: This should mimic the behavior processRemoteSettleFails.
-func (s *Switch) reforwardSettleFails(fwdPkgs []*channeldb.FwdPkg) {
- for _, fwdPkg := range fwdPkgs {
- settleFails, err := lnwallet.PayDescsFromRemoteLogUpdates(
- fwdPkg.Source, fwdPkg.Height, fwdPkg.SettleFails,
- )
- if err != nil {
- log.Errorf("Unable to process remote log updates: %v",
- err)
- continue
- }
-
- switchPackets := make([]*htlcPacket, 0, len(settleFails))
- for i, pd := range settleFails {
-
- // Skip any settles or fails that have already been
- // acknowledged by the incoming link that originated the
- // forwarded Add.
- if fwdPkg.SettleFailFilter.Contains(uint16(i)) {
- continue
- }
-
- switch pd.EntryType {
-
- // A settle for an HTLC we previously forwarded HTLC has
- // been received. So we'll forward the HTLC to the
- // switch which will handle propagating the settle to
- // the prior hop.
- case lnwallet.Settle:
- settlePacket := &htlcPacket{
- outgoingChanID: fwdPkg.Source,
- outgoingHTLCID: pd.ParentIndex,
- destRef: pd.DestRef,
- htlc: &lnwire.UpdateFulfillHTLC{
- PaymentPreimage: pd.RPreimage,
- },
- }
-
- // Add the packet to the batch to be forwarded, and
- // notify the overflow queue that a spare spot has been
- // freed up within the commitment state.
- switchPackets = append(switchPackets, settlePacket)
-
- // A failureCode message for a previously forwarded HTLC has been
- // received. As a result a new slot will be freed up in our
- // commitment state, so we'll forward this to the switch so the
- // backwards undo can continue.
- case lnwallet.Fail:
- // Fetch the reason the HTLC was canceled so
- // we can continue to propagate it. This
- // failure originated from another node, so
- // the linkFailure field is not set on this
- // packet.
- failPacket := &htlcPacket{
- outgoingChanID: fwdPkg.Source,
- outgoingHTLCID: pd.ParentIndex,
- destRef: pd.DestRef,
- htlc: &lnwire.UpdateFailHTLC{
- Reason: lnwire.OpaqueReason(pd.FailReason),
- },
- }
-
- // Add the packet to the batch to be forwarded, and
- // notify the overflow queue that a spare spot has been
- // freed up within the commitment state.
- switchPackets = append(switchPackets, failPacket)
- }
- }
-
- // Since this send isn't tied to a specific link, we pass a nil
- // link quit channel, meaning the send will fail only if the
- // switch receives a shutdown request.
- if err := s.ForwardPackets(nil, switchPackets...); err != nil {
- log.Errorf("Unhandled error while reforwarding packets "+
- "settle/fail over htlcswitch: %v", err)
- }
- }
-}
-
-// Stop gracefully stops all active helper goroutines, then waits until they've
-// exited.
-func (s *Switch) Stop() er.R {
- if !atomic.CompareAndSwapInt32(&s.shutdown, 0, 1) {
- log.Warn("Htlc Switch already stopped")
- return er.New("htlc switch already shutdown")
- }
-
- log.Infof("HTLC Switch shutting down")
-
- close(s.quit)
-
- s.wg.Wait()
-
- // Wait until all active goroutines have finished exiting before
- // stopping the mailboxes, otherwise the mailbox map could still be
- // accessed and modified.
- s.mailOrchestrator.Stop()
-
- return nil
-}
-
-// AddLink is used to initiate the handling of the add link command. The
-// request will be propagated and handled in the main goroutine.
-func (s *Switch) AddLink(link ChannelLink) er.R {
- s.indexMtx.Lock()
- defer s.indexMtx.Unlock()
-
- chanID := link.ChanID()
-
- // First, ensure that this link is not already active in the switch.
- _, err := s.getLink(chanID)
- if err == nil {
- return er.Errorf("unable to add ChannelLink(%v), already "+
- "active", chanID)
- }
-
- // Get and attach the mailbox for this link, which buffers packets in
- // case there packets that we tried to deliver while this link was
- // offline.
- shortChanID := link.ShortChanID()
- mailbox := s.mailOrchestrator.GetOrCreateMailBox(chanID, shortChanID)
- link.AttachMailBox(mailbox)
-
- if err := link.Start(); err != nil {
- s.removeLink(chanID)
- return err
- }
-
- if shortChanID == hop.Source {
- log.Infof("Adding pending link chan_id=%v, short_chan_id=%v",
- chanID, shortChanID)
-
- s.pendingLinkIndex[chanID] = link
- } else {
- log.Infof("Adding live link chan_id=%v, short_chan_id=%v",
- chanID, shortChanID)
-
- s.addLiveLink(link)
- s.mailOrchestrator.BindLiveShortChanID(
- mailbox, chanID, shortChanID,
- )
- }
-
- return nil
-}
-
-// addLiveLink adds a link to all associated forwarding index, this makes it a
-// candidate for forwarding HTLCs.
-func (s *Switch) addLiveLink(link ChannelLink) {
- // We'll add the link to the linkIndex which lets us quickly
- // look up a channel when we need to close or register it, and
- // the forwarding index which'll be used when forwarding HTLC's
- // in the multi-hop setting.
- s.linkIndex[link.ChanID()] = link
- s.forwardingIndex[link.ShortChanID()] = link
-
- // Next we'll add the link to the interface index so we can
- // quickly look up all the channels for a particular node.
- peerPub := link.Peer().PubKey()
- if _, ok := s.interfaceIndex[peerPub]; !ok {
- s.interfaceIndex[peerPub] = make(map[lnwire.ChannelID]ChannelLink)
- }
- s.interfaceIndex[peerPub][link.ChanID()] = link
-}
-
-// GetLink is used to initiate the handling of the get link command. The
-// request will be propagated/handled to/in the main goroutine.
-func (s *Switch) GetLink(chanID lnwire.ChannelID) (ChannelLink, er.R) {
- s.indexMtx.RLock()
- defer s.indexMtx.RUnlock()
-
- return s.getLink(chanID)
-}
-
-// getLink returns the link stored in either the pending index or the live
-// lindex.
-func (s *Switch) getLink(chanID lnwire.ChannelID) (ChannelLink, er.R) {
- link, ok := s.linkIndex[chanID]
- if !ok {
- link, ok = s.pendingLinkIndex[chanID]
- if !ok {
- return nil, ErrChannelLinkNotFound.Default()
- }
- }
-
- return link, nil
-}
-
-// getLinkByShortID attempts to return the link which possesses the target
-// short channel ID.
-//
-// NOTE: This MUST be called with the indexMtx held.
-func (s *Switch) getLinkByShortID(chanID lnwire.ShortChannelID) (ChannelLink, er.R) {
- link, ok := s.forwardingIndex[chanID]
- if !ok {
- return nil, ErrChannelLinkNotFound.Default()
- }
-
- return link, nil
-}
-
-// HasActiveLink returns true if the given channel ID has a link in the link
-// index AND the link is eligible to forward.
-func (s *Switch) HasActiveLink(chanID lnwire.ChannelID) bool {
- s.indexMtx.RLock()
- defer s.indexMtx.RUnlock()
-
- if link, ok := s.linkIndex[chanID]; ok {
- return link.EligibleToForward()
- }
-
- return false
-}
-
-// RemoveLink purges the switch of any link associated with chanID. If a pending
-// or active link is not found, this method does nothing. Otherwise, the method
-// returns after the link has been completely shutdown.
-func (s *Switch) RemoveLink(chanID lnwire.ChannelID) {
- s.indexMtx.Lock()
- link := s.removeLink(chanID)
- s.indexMtx.Unlock()
-
- if link != nil {
- link.Stop()
- }
-}
-
-// removeLink is used to remove and stop the channel link.
-//
-// NOTE: This MUST be called with the indexMtx held.
-func (s *Switch) removeLink(chanID lnwire.ChannelID) ChannelLink {
- log.Infof("Removing channel link with ChannelID(%v)", chanID)
-
- link, err := s.getLink(chanID)
- if err != nil {
- return nil
- }
-
- // Remove the channel from live link indexes.
- delete(s.pendingLinkIndex, link.ChanID())
- delete(s.linkIndex, link.ChanID())
- delete(s.forwardingIndex, link.ShortChanID())
-
- // If the link has been added to the peer index, then we'll move to
- // delete the entry within the index.
- peerPub := link.Peer().PubKey()
- if peerIndex, ok := s.interfaceIndex[peerPub]; ok {
- delete(peerIndex, link.ChanID())
-
- // If after deletion, there are no longer any links, then we'll
- // remove the interface map all together.
- if len(peerIndex) == 0 {
- delete(s.interfaceIndex, peerPub)
- }
- }
-
- return link
-}
-
-// UpdateShortChanID updates the short chan ID for an existing channel. This is
-// required in the case of a re-org and re-confirmation or a channel, or in the
-// case that a link was added to the switch before its short chan ID was known.
-func (s *Switch) UpdateShortChanID(chanID lnwire.ChannelID) er.R {
- s.indexMtx.Lock()
- defer s.indexMtx.Unlock()
-
- // Locate the target link in the pending link index. If no such link
- // exists, then we will ignore the request.
- link, ok := s.pendingLinkIndex[chanID]
- if !ok {
- return er.Errorf("link %v not found", chanID)
- }
-
- oldShortChanID := link.ShortChanID()
-
- // Try to update the link's short channel ID, returning early if this
- // update failed.
- shortChanID, err := link.UpdateShortChanID()
- if err != nil {
- return err
- }
-
- // Reject any blank short channel ids.
- if shortChanID == hop.Source {
- return er.Errorf("refusing trivial short_chan_id for chan_id=%v"+
- "live link", chanID)
- }
-
- log.Infof("Updated short_chan_id for ChannelLink(%v): old=%v, new=%v",
- chanID, oldShortChanID, shortChanID)
-
- // Since the link was in the pending state before, we will remove it
- // from the pending link index and add it to the live link index so that
- // it can be available in forwarding.
- delete(s.pendingLinkIndex, chanID)
- s.addLiveLink(link)
-
- // Finally, alert the mail orchestrator to the change of short channel
- // ID, and deliver any unclaimed packets to the link.
- mailbox := s.mailOrchestrator.GetOrCreateMailBox(chanID, shortChanID)
- s.mailOrchestrator.BindLiveShortChanID(
- mailbox, chanID, shortChanID,
- )
-
- return nil
-}
-
-// GetLinksByInterface fetches all the links connected to a particular node
-// identified by the serialized compressed form of its public key.
-func (s *Switch) GetLinksByInterface(hop [33]byte) ([]ChannelLink, er.R) {
- s.indexMtx.RLock()
- defer s.indexMtx.RUnlock()
-
- return s.getLinks(hop)
-}
-
-// getLinks is function which returns the channel links of the peer by hop
-// destination id.
-//
-// NOTE: This MUST be called with the indexMtx held.
-func (s *Switch) getLinks(destination [33]byte) ([]ChannelLink, er.R) {
- links, ok := s.interfaceIndex[destination]
- if !ok {
- return nil, ErrNoLinksFound.Default()
- }
-
- channelLinks := make([]ChannelLink, 0, len(links))
- for _, link := range links {
- channelLinks = append(channelLinks, link)
- }
-
- return channelLinks, nil
-}
-
-// CircuitModifier returns a reference to subset of the interfaces provided by
-// the circuit map, to allow links to open and close circuits.
-func (s *Switch) CircuitModifier() CircuitModifier {
- return s.circuits
-}
-
-// CircuitLookup returns a reference to subset of the interfaces provided by the
-// circuit map, to allow looking up circuits.
-func (s *Switch) CircuitLookup() CircuitLookup {
- return s.circuits
-}
-
-// commitCircuits persistently adds a circuit to the switch's circuit map.
-func (s *Switch) commitCircuits(circuits ...*PaymentCircuit) (
- *CircuitFwdActions, er.R) {
-
- return s.circuits.CommitCircuits(circuits...)
-}
-
-// openCircuits preemptively writes the keystones for Adds that are about to be
-// added to a commitment txn.
-func (s *Switch) openCircuits(keystones ...Keystone) er.R {
- return s.circuits.OpenCircuits(keystones...)
-}
-
-// deleteCircuits persistently removes the circuit, and keystone if present,
-// from the circuit map.
-func (s *Switch) deleteCircuits(inKeys ...CircuitKey) er.R {
- return s.circuits.DeleteCircuits(inKeys...)
-}
-
-// FlushForwardingEvents flushes out the set of pending forwarding events to
-// the persistent log. This will be used by the switch to periodically flush
-// out the set of forwarding events to disk. External callers can also use this
-// method to ensure all data is flushed to dis before querying the log.
-func (s *Switch) FlushForwardingEvents() er.R {
- // First, we'll obtain a copy of the current set of pending forwarding
- // events.
- s.fwdEventMtx.Lock()
-
- // If we won't have any forwarding events, then we can exit early.
- if len(s.pendingFwdingEvents) == 0 {
- s.fwdEventMtx.Unlock()
- return nil
- }
-
- events := make([]channeldb.ForwardingEvent, len(s.pendingFwdingEvents))
- copy(events[:], s.pendingFwdingEvents[:])
-
- // With the copy obtained, we can now clear out the header pointer of
- // the current slice. This way, we can re-use the underlying storage
- // allocated for the slice.
- s.pendingFwdingEvents = s.pendingFwdingEvents[:0]
- s.fwdEventMtx.Unlock()
-
- // Finally, we'll write out the copied events to the persistent
- // forwarding log.
- return s.cfg.FwdingLog.AddForwardingEvents(events)
-}
-
-// BestHeight returns the best height known to the switch.
-func (s *Switch) BestHeight() uint32 {
- return atomic.LoadUint32(&s.bestHeight)
-}
diff --git a/lnd/htlcswitch/switch_test.go b/lnd/htlcswitch/switch_test.go
deleted file mode 100644
index e1216be5..00000000
--- a/lnd/htlcswitch/switch_test.go
+++ /dev/null
@@ -1,3303 +0,0 @@
-package htlcswitch
-
-import (
- "crypto/rand"
- "crypto/sha256"
- "io/ioutil"
- "reflect"
- "testing"
- "time"
-
- "github.com/davecgh/go-spew/spew"
- "github.com/pkt-cash/pktd/btcutil"
- "github.com/pkt-cash/pktd/btcutil/er"
- "github.com/pkt-cash/pktd/btcutil/util"
- "github.com/pkt-cash/pktd/lnd/channeldb"
- "github.com/pkt-cash/pktd/lnd/htlcswitch/hop"
- "github.com/pkt-cash/pktd/lnd/lntypes"
- "github.com/pkt-cash/pktd/lnd/lnwire"
- "github.com/pkt-cash/pktd/lnd/ticker"
-)
-
-var zeroCircuit = channeldb.CircuitKey{}
-
-func genPreimage() ([32]byte, er.R) {
- var preimage [32]byte
- if _, err := util.ReadFull(rand.Reader, preimage[:]); err != nil {
- return preimage, err
- }
- return preimage, nil
-}
-
-// TestSwitchAddDuplicateLink tests that the switch will reject duplicate links
-// for both pending and live links. It also tests that we can successfully
-// add a link after having removed it.
-func TestSwitchAddDuplicateLink(t *testing.T) {
- t.Parallel()
-
- alicePeer, err := newMockServer(
- t, "alice", testStartingHeight, nil, testDefaultDelta,
- )
- if err != nil {
- t.Fatalf("unable to create alice server: %v", err)
- }
-
- s, err := initSwitchWithDB(testStartingHeight, nil)
- if err != nil {
- t.Fatalf("unable to init switch: %v", err)
- }
- if err := s.Start(); err != nil {
- t.Fatalf("unable to start switch: %v", err)
- }
- defer s.Stop()
-
- chanID1, _, aliceChanID, _ := genIDs()
-
- pendingChanID := lnwire.ShortChannelID{}
-
- aliceChannelLink := newMockChannelLink(
- s, chanID1, pendingChanID, alicePeer, false,
- )
- if err := s.AddLink(aliceChannelLink); err != nil {
- t.Fatalf("unable to add alice link: %v", err)
- }
-
- // Alice should have a pending link, adding again should fail.
- if err := s.AddLink(aliceChannelLink); err == nil {
- t.Fatalf("adding duplicate link should have failed")
- }
-
- // Update the short chan id of the channel, so that the link goes live.
- aliceChannelLink.setLiveShortChanID(aliceChanID)
- err = s.UpdateShortChanID(chanID1)
- if err != nil {
- t.Fatalf("unable to update alice short_chan_id: %v", err)
- }
-
- // Alice should have a live link, adding again should fail.
- if err := s.AddLink(aliceChannelLink); err == nil {
- t.Fatalf("adding duplicate link should have failed")
- }
-
- // Remove the live link to ensure the indexes are cleared.
- s.RemoveLink(chanID1)
-
- // Alice has no links, adding should succeed.
- if err := s.AddLink(aliceChannelLink); err != nil {
- t.Fatalf("unable to add alice link: %v", err)
- }
-}
-
-// TestSwitchHasActiveLink tests the behavior of HasActiveLink, and asserts that
-// it only returns true if a link's short channel id has confirmed (meaning the
-// channel is no longer pending) and it's EligibleToForward method returns true,
-// i.e. it has received FundingLocked from the remote peer.
-func TestSwitchHasActiveLink(t *testing.T) {
- t.Parallel()
-
- alicePeer, err := newMockServer(
- t, "alice", testStartingHeight, nil, testDefaultDelta,
- )
- if err != nil {
- t.Fatalf("unable to create alice server: %v", err)
- }
-
- s, err := initSwitchWithDB(testStartingHeight, nil)
- if err != nil {
- t.Fatalf("unable to init switch: %v", err)
- }
- if err := s.Start(); err != nil {
- t.Fatalf("unable to start switch: %v", err)
- }
- defer s.Stop()
-
- chanID1, _, aliceChanID, _ := genIDs()
-
- pendingChanID := lnwire.ShortChannelID{}
-
- aliceChannelLink := newMockChannelLink(
- s, chanID1, pendingChanID, alicePeer, false,
- )
- if err := s.AddLink(aliceChannelLink); err != nil {
- t.Fatalf("unable to add alice link: %v", err)
- }
-
- // The link has been added, but it's still pending. HasActiveLink should
- // return false since the link has not been added to the linkIndex
- // containing live links.
- if s.HasActiveLink(chanID1) {
- t.Fatalf("link should not be active yet, still pending")
- }
-
- // Update the short chan id of the channel, so that the link goes live.
- aliceChannelLink.setLiveShortChanID(aliceChanID)
- err = s.UpdateShortChanID(chanID1)
- if err != nil {
- t.Fatalf("unable to update alice short_chan_id: %v", err)
- }
-
- // UpdateShortChanID will cause the mock link to become eligible to
- // forward. However, we can simulate the event where the short chan id
- // is confirmed, but funding locked has yet to be received by resetting
- // the mock link's eligibility to false.
- aliceChannelLink.eligible = false
-
- // Now, even though the link has been added to the linkIndex because the
- // short channel id has confirmed, we should still see HasActiveLink
- // fail because EligibleToForward should return false.
- if s.HasActiveLink(chanID1) {
- t.Fatalf("link should not be active yet, still ineligible")
- }
-
- // Finally, simulate the link receiving funding locked by setting its
- // eligibility to true.
- aliceChannelLink.eligible = true
-
- // The link should now be reported as active, since EligibleToForward
- // returns true and the link is in the linkIndex.
- if !s.HasActiveLink(chanID1) {
- t.Fatalf("link should not be active now")
- }
-}
-
-// TestSwitchSendPending checks the inability of htlc switch to forward adds
-// over pending links, and the UpdateShortChanID makes a pending link live.
-func TestSwitchSendPending(t *testing.T) {
- t.Parallel()
-
- alicePeer, err := newMockServer(
- t, "alice", testStartingHeight, nil, testDefaultDelta,
- )
- if err != nil {
- t.Fatalf("unable to create alice server: %v", err)
- }
-
- bobPeer, err := newMockServer(
- t, "bob", testStartingHeight, nil, testDefaultDelta,
- )
- if err != nil {
- t.Fatalf("unable to create bob server: %v", err)
- }
-
- s, err := initSwitchWithDB(testStartingHeight, nil)
- if err != nil {
- t.Fatalf("unable to init switch: %v", err)
- }
- if err := s.Start(); err != nil {
- t.Fatalf("unable to start switch: %v", err)
- }
- defer s.Stop()
-
- chanID1, chanID2, aliceChanID, bobChanID := genIDs()
-
- pendingChanID := lnwire.ShortChannelID{}
-
- aliceChannelLink := newMockChannelLink(
- s, chanID1, pendingChanID, alicePeer, false,
- )
- if err := s.AddLink(aliceChannelLink); err != nil {
- t.Fatalf("unable to add alice link: %v", err)
- }
-
- bobChannelLink := newMockChannelLink(
- s, chanID2, bobChanID, bobPeer, true,
- )
- if err := s.AddLink(bobChannelLink); err != nil {
- t.Fatalf("unable to add bob link: %v", err)
- }
-
- // Create request which should is being forwarded from Bob channel
- // link to Alice channel link.
- preimage, err := genPreimage()
- if err != nil {
- t.Fatalf("unable to generate preimage: %v", err)
- }
- rhash := sha256.Sum256(preimage[:])
- packet := &htlcPacket{
- incomingChanID: bobChanID,
- incomingHTLCID: 0,
- outgoingChanID: aliceChanID,
- obfuscator: NewMockObfuscator(),
- htlc: &lnwire.UpdateAddHTLC{
- PaymentHash: rhash,
- Amount: 1,
- },
- }
-
- // Send the ADD packet, this should not be forwarded out to the link
- // since there are no eligible links.
- if err = s.ForwardPackets(nil, packet); err != nil {
- t.Fatal(err)
- }
- select {
- case p := <-bobChannelLink.packets:
- if p.linkFailure != nil {
- err = er.E(p.linkFailure)
- }
- case <-time.After(time.Second):
- t.Fatal("no timely reply from switch")
- }
- errr := er.Wrapped(err)
- linkErr, ok := errr.(*LinkError)
- if !ok {
- t.Fatalf("expected link error, got: %T", err)
- }
- if linkErr.WireMessage().Code() != lnwire.CodeUnknownNextPeer {
- t.Fatalf("expected fail unknown next peer, got: %T",
- linkErr.WireMessage().Code())
- }
-
- // No message should be sent, since the packet was failed.
- select {
- case <-aliceChannelLink.packets:
- t.Fatal("expected not to receive message")
- case <-time.After(time.Second):
- }
-
- // Since the packet should have been failed, there should be no active
- // circuits.
- if s.circuits.NumOpen() != 0 {
- t.Fatal("wrong amount of circuits")
- }
-
- // Now, update Alice's link with her final short channel id. This should
- // move the link to the live state.
- aliceChannelLink.setLiveShortChanID(aliceChanID)
- err = s.UpdateShortChanID(chanID1)
- if err != nil {
- t.Fatalf("unable to update alice short_chan_id: %v", err)
- }
-
- // Increment the packet's HTLC index, so that it does not collide with
- // the prior attempt.
- packet.incomingHTLCID++
-
- // Handle the request and checks that bob channel link received it.
- if err := s.ForwardPackets(nil, packet); err != nil {
- t.Fatalf("unexpected forward failure: %v", err)
- }
-
- // Since Alice's link is now active, this packet should succeed.
- select {
- case <-aliceChannelLink.packets:
- case <-time.After(time.Second):
- t.Fatal("request was not propagated to alice")
- }
-}
-
-// TestSwitchForward checks the ability of htlc switch to forward add/settle
-// requests.
-func TestSwitchForward(t *testing.T) {
- t.Parallel()
-
- alicePeer, err := newMockServer(
- t, "alice", testStartingHeight, nil, testDefaultDelta,
- )
- if err != nil {
- t.Fatalf("unable to create alice server: %v", err)
- }
- bobPeer, err := newMockServer(
- t, "bob", testStartingHeight, nil, testDefaultDelta,
- )
- if err != nil {
- t.Fatalf("unable to create bob server: %v", err)
- }
-
- s, err := initSwitchWithDB(testStartingHeight, nil)
- if err != nil {
- t.Fatalf("unable to init switch: %v", err)
- }
- if err := s.Start(); err != nil {
- t.Fatalf("unable to start switch: %v", err)
- }
- defer s.Stop()
-
- chanID1, chanID2, aliceChanID, bobChanID := genIDs()
-
- aliceChannelLink := newMockChannelLink(
- s, chanID1, aliceChanID, alicePeer, true,
- )
- bobChannelLink := newMockChannelLink(
- s, chanID2, bobChanID, bobPeer, true,
- )
- if err := s.AddLink(aliceChannelLink); err != nil {
- t.Fatalf("unable to add alice link: %v", err)
- }
- if err := s.AddLink(bobChannelLink); err != nil {
- t.Fatalf("unable to add bob link: %v", err)
- }
-
- // Create request which should be forwarded from Alice channel link to
- // bob channel link.
- preimage, err := genPreimage()
- if err != nil {
- t.Fatalf("unable to generate preimage: %v", err)
- }
- rhash := sha256.Sum256(preimage[:])
- packet := &htlcPacket{
- incomingChanID: aliceChannelLink.ShortChanID(),
- incomingHTLCID: 0,
- outgoingChanID: bobChannelLink.ShortChanID(),
- obfuscator: NewMockObfuscator(),
- htlc: &lnwire.UpdateAddHTLC{
- PaymentHash: rhash,
- Amount: 1,
- },
- }
-
- // Handle the request and checks that bob channel link received it.
- if err := s.ForwardPackets(nil, packet); err != nil {
- t.Fatal(err)
- }
-
- select {
- case <-bobChannelLink.packets:
- if err := bobChannelLink.completeCircuit(packet); err != nil {
- t.Fatalf("unable to complete payment circuit: %v", err)
- }
- case <-time.After(time.Second):
- t.Fatal("request was not propagated to destination")
- }
-
- if s.circuits.NumOpen() != 1 {
- t.Fatal("wrong amount of circuits")
- }
-
- if !s.IsForwardedHTLC(bobChannelLink.ShortChanID(), 0) {
- t.Fatal("htlc should be identified as forwarded")
- }
-
- // Create settle request pretending that bob link handled the add htlc
- // request and sent the htlc settle request back. This request should
- // be forwarder back to Alice link.
- packet = &htlcPacket{
- outgoingChanID: bobChannelLink.ShortChanID(),
- outgoingHTLCID: 0,
- amount: 1,
- htlc: &lnwire.UpdateFulfillHTLC{
- PaymentPreimage: preimage,
- },
- }
-
- // Handle the request and checks that payment circuit works properly.
- if err := s.ForwardPackets(nil, packet); err != nil {
- t.Fatal(err)
- }
-
- select {
- case pkt := <-aliceChannelLink.packets:
- if err := aliceChannelLink.deleteCircuit(pkt); err != nil {
- t.Fatalf("unable to remove circuit: %v", err)
- }
- case <-time.After(time.Second):
- t.Fatal("request was not propagated to channelPoint")
- }
-
- if s.circuits.NumOpen() != 0 {
- t.Fatal("wrong amount of circuits")
- }
-}
-
-func TestSwitchForwardFailAfterFullAdd(t *testing.T) {
- t.Parallel()
-
- chanID1, chanID2, aliceChanID, bobChanID := genIDs()
-
- alicePeer, err := newMockServer(
- t, "alice", testStartingHeight, nil, testDefaultDelta,
- )
- if err != nil {
- t.Fatalf("unable to create alice server: %v", err)
- }
- bobPeer, err := newMockServer(
- t, "bob", testStartingHeight, nil, testDefaultDelta,
- )
- if err != nil {
- t.Fatalf("unable to create bob server: %v", err)
- }
-
- tempPath, errr := ioutil.TempDir("", "circuitdb")
- if errr != nil {
- t.Fatalf("unable to temporary path: %v", errr)
- }
-
- cdb, err := channeldb.Open(tempPath)
- if err != nil {
- t.Fatalf("unable to open channeldb: %v", err)
- }
-
- s, err := initSwitchWithDB(testStartingHeight, cdb)
- if err != nil {
- t.Fatalf("unable to init switch: %v", err)
- }
- if err := s.Start(); err != nil {
- t.Fatalf("unable to start switch: %v", err)
- }
-
- // Even though we intend to Stop s later in the test, it is safe to
- // defer this Stop since its execution it is protected by an atomic
- // guard, guaranteeing it executes at most once.
- defer s.Stop()
-
- aliceChannelLink := newMockChannelLink(
- s, chanID1, aliceChanID, alicePeer, true,
- )
- bobChannelLink := newMockChannelLink(
- s, chanID2, bobChanID, bobPeer, true,
- )
- if err := s.AddLink(aliceChannelLink); err != nil {
- t.Fatalf("unable to add alice link: %v", err)
- }
- if err := s.AddLink(bobChannelLink); err != nil {
- t.Fatalf("unable to add bob link: %v", err)
- }
-
- // Create request which should be forwarded from Alice channel link to
- // bob channel link.
- preimage := [sha256.Size]byte{1}
- rhash := sha256.Sum256(preimage[:])
- ogPacket := &htlcPacket{
- incomingChanID: aliceChannelLink.ShortChanID(),
- incomingHTLCID: 0,
- outgoingChanID: bobChannelLink.ShortChanID(),
- obfuscator: NewMockObfuscator(),
- htlc: &lnwire.UpdateAddHTLC{
- PaymentHash: rhash,
- Amount: 1,
- },
- }
-
- if s.circuits.NumPending() != 0 {
- t.Fatalf("wrong amount of half circuits")
- }
- if s.circuits.NumOpen() != 0 {
- t.Fatalf("wrong amount of circuits")
- }
-
- // Handle the request and checks that bob channel link received it.
- if err := s.ForwardPackets(nil, ogPacket); err != nil {
- t.Fatal(err)
- }
-
- if s.circuits.NumPending() != 1 {
- t.Fatalf("wrong amount of half circuits")
- }
- if s.circuits.NumOpen() != 0 {
- t.Fatalf("wrong amount of circuits")
- }
-
- // Pull packet from bob's link, but do not perform a full add.
- select {
- case packet := <-bobChannelLink.packets:
- // Complete the payment circuit and assign the outgoing htlc id
- // before restarting.
- if err := bobChannelLink.completeCircuit(packet); err != nil {
- t.Fatalf("unable to complete payment circuit: %v", err)
- }
-
- case <-time.After(time.Second):
- t.Fatal("request was not propagated to destination")
- }
-
- if s.circuits.NumPending() != 1 {
- t.Fatalf("wrong amount of half circuits")
- }
- if s.circuits.NumOpen() != 1 {
- t.Fatalf("wrong amount of circuits")
- }
-
- // Now we will restart bob, leaving the forwarding decision for this
- // htlc is in the half-added state.
- if err := s.Stop(); err != nil {
- t.Fatalf(err.String())
- }
-
- if err := cdb.Close(); err != nil {
- t.Fatalf(err.String())
- }
-
- cdb2, err := channeldb.Open(tempPath)
- if err != nil {
- t.Fatalf("unable to reopen channeldb: %v", err)
- }
-
- s2, err := initSwitchWithDB(testStartingHeight, cdb2)
- if err != nil {
- t.Fatalf("unable reinit switch: %v", err)
- }
- if err := s2.Start(); err != nil {
- t.Fatalf("unable to restart switch: %v", err)
- }
-
- // Even though we intend to Stop s2 later in the test, it is safe to
- // defer this Stop since its execution it is protected by an atomic
- // guard, guaranteeing it executes at most once.
- defer s2.Stop()
-
- aliceChannelLink = newMockChannelLink(
- s2, chanID1, aliceChanID, alicePeer, true,
- )
- bobChannelLink = newMockChannelLink(
- s2, chanID2, bobChanID, bobPeer, true,
- )
- if err := s2.AddLink(aliceChannelLink); err != nil {
- t.Fatalf("unable to add alice link: %v", err)
- }
- if err := s2.AddLink(bobChannelLink); err != nil {
- t.Fatalf("unable to add bob link: %v", err)
- }
-
- if s2.circuits.NumPending() != 1 {
- t.Fatalf("wrong amount of half circuits")
- }
- if s2.circuits.NumOpen() != 1 {
- t.Fatalf("wrong amount of circuits")
- }
-
- // Craft a failure message from the remote peer.
- fail := &htlcPacket{
- outgoingChanID: bobChannelLink.ShortChanID(),
- outgoingHTLCID: 0,
- amount: 1,
- htlc: &lnwire.UpdateFailHTLC{},
- }
-
- // Send the fail packet from the remote peer through the switch.
- if err := s2.ForwardPackets(nil, fail); err != nil {
- t.Fatalf(err.String())
- }
-
- // Pull packet from alice's link, as it should have gone through
- // successfully.
- select {
- case pkt := <-aliceChannelLink.packets:
- if err := aliceChannelLink.completeCircuit(pkt); err != nil {
- t.Fatalf("unable to remove circuit: %v", err)
- }
- case <-time.After(time.Second):
- t.Fatal("request was not propagated to destination")
- }
-
- // Circuit map should be empty now.
- if s2.circuits.NumPending() != 0 {
- t.Fatalf("wrong amount of half circuits")
- }
- if s2.circuits.NumOpen() != 0 {
- t.Fatalf("wrong amount of circuits")
- }
-
- // Send the fail packet from the remote peer through the switch.
- if err := s.ForwardPackets(nil, fail); err != nil {
- t.Fatal(err)
- }
- select {
- case <-aliceChannelLink.packets:
- t.Fatalf("expected duplicate fail to not arrive at the destination")
- case <-time.After(time.Second):
- }
-}
-
-func TestSwitchForwardSettleAfterFullAdd(t *testing.T) {
- t.Parallel()
-
- chanID1, chanID2, aliceChanID, bobChanID := genIDs()
-
- alicePeer, err := newMockServer(
- t, "alice", testStartingHeight, nil, testDefaultDelta,
- )
- if err != nil {
- t.Fatalf("unable to create alice server: %v", err)
- }
- bobPeer, err := newMockServer(
- t, "bob", testStartingHeight, nil, testDefaultDelta,
- )
- if err != nil {
- t.Fatalf("unable to create bob server: %v", err)
- }
-
- tempPath, errr := ioutil.TempDir("", "circuitdb")
- if errr != nil {
- t.Fatalf("unable to temporary path: %v", errr)
- }
-
- cdb, err := channeldb.Open(tempPath)
- if err != nil {
- t.Fatalf("unable to open channeldb: %v", err)
- }
-
- s, err := initSwitchWithDB(testStartingHeight, cdb)
- if err != nil {
- t.Fatalf("unable to init switch: %v", err)
- }
- if err := s.Start(); err != nil {
- t.Fatalf("unable to start switch: %v", err)
- }
-
- // Even though we intend to Stop s later in the test, it is safe to
- // defer this Stop since its execution it is protected by an atomic
- // guard, guaranteeing it executes at most once.
- defer s.Stop()
-
- aliceChannelLink := newMockChannelLink(
- s, chanID1, aliceChanID, alicePeer, true,
- )
- bobChannelLink := newMockChannelLink(
- s, chanID2, bobChanID, bobPeer, true,
- )
- if err := s.AddLink(aliceChannelLink); err != nil {
- t.Fatalf("unable to add alice link: %v", err)
- }
- if err := s.AddLink(bobChannelLink); err != nil {
- t.Fatalf("unable to add bob link: %v", err)
- }
-
- // Create request which should be forwarded from Alice channel link to
- // bob channel link.
- preimage := [sha256.Size]byte{1}
- rhash := sha256.Sum256(preimage[:])
- ogPacket := &htlcPacket{
- incomingChanID: aliceChannelLink.ShortChanID(),
- incomingHTLCID: 0,
- outgoingChanID: bobChannelLink.ShortChanID(),
- obfuscator: NewMockObfuscator(),
- htlc: &lnwire.UpdateAddHTLC{
- PaymentHash: rhash,
- Amount: 1,
- },
- }
-
- if s.circuits.NumPending() != 0 {
- t.Fatalf("wrong amount of half circuits")
- }
- if s.circuits.NumOpen() != 0 {
- t.Fatalf("wrong amount of circuits")
- }
-
- // Handle the request and checks that bob channel link received it.
- if err := s.ForwardPackets(nil, ogPacket); err != nil {
- t.Fatal(err)
- }
-
- if s.circuits.NumPending() != 1 {
- t.Fatalf("wrong amount of half circuits")
- }
- if s.circuits.NumOpen() != 0 {
- t.Fatalf("wrong amount of circuits")
- }
-
- // Pull packet from bob's link, but do not perform a full add.
- select {
- case packet := <-bobChannelLink.packets:
- // Complete the payment circuit and assign the outgoing htlc id
- // before restarting.
- if err := bobChannelLink.completeCircuit(packet); err != nil {
- t.Fatalf("unable to complete payment circuit: %v", err)
- }
-
- case <-time.After(time.Second):
- t.Fatal("request was not propagated to destination")
- }
-
- if s.circuits.NumPending() != 1 {
- t.Fatalf("wrong amount of half circuits")
- }
- if s.circuits.NumOpen() != 1 {
- t.Fatalf("wrong amount of circuits")
- }
-
- // Now we will restart bob, leaving the forwarding decision for this
- // htlc is in the half-added state.
- if err := s.Stop(); err != nil {
- t.Fatalf(err.String())
- }
-
- if err := cdb.Close(); err != nil {
- t.Fatalf(err.String())
- }
-
- cdb2, err := channeldb.Open(tempPath)
- if err != nil {
- t.Fatalf("unable to reopen channeldb: %v", err)
- }
-
- s2, err := initSwitchWithDB(testStartingHeight, cdb2)
- if err != nil {
- t.Fatalf("unable reinit switch: %v", err)
- }
- if err := s2.Start(); err != nil {
- t.Fatalf("unable to restart switch: %v", err)
- }
-
- // Even though we intend to Stop s2 later in the test, it is safe to
- // defer this Stop since its execution it is protected by an atomic
- // guard, guaranteeing it executes at most once.
- defer s2.Stop()
-
- aliceChannelLink = newMockChannelLink(
- s2, chanID1, aliceChanID, alicePeer, true,
- )
- bobChannelLink = newMockChannelLink(
- s2, chanID2, bobChanID, bobPeer, true,
- )
- if err := s2.AddLink(aliceChannelLink); err != nil {
- t.Fatalf("unable to add alice link: %v", err)
- }
- if err := s2.AddLink(bobChannelLink); err != nil {
- t.Fatalf("unable to add bob link: %v", err)
- }
-
- if s2.circuits.NumPending() != 1 {
- t.Fatalf("wrong amount of half circuits")
- }
- if s2.circuits.NumOpen() != 1 {
- t.Fatalf("wrong amount of circuits")
- }
-
- // Craft a settle message from the remote peer.
- settle := &htlcPacket{
- outgoingChanID: bobChannelLink.ShortChanID(),
- outgoingHTLCID: 0,
- amount: 1,
- htlc: &lnwire.UpdateFulfillHTLC{
- PaymentPreimage: preimage,
- },
- }
-
- // Send the settle packet from the remote peer through the switch.
- if err := s2.ForwardPackets(nil, settle); err != nil {
- t.Fatalf(err.String())
- }
-
- // Pull packet from alice's link, as it should have gone through
- // successfully.
- select {
- case packet := <-aliceChannelLink.packets:
- if err := aliceChannelLink.completeCircuit(packet); err != nil {
- t.Fatalf("unable to complete circuit with in key=%s: %v",
- packet.inKey(), err)
- }
- case <-time.After(time.Second):
- t.Fatal("request was not propagated to destination")
- }
-
- // Circuit map should be empty now.
- if s2.circuits.NumPending() != 0 {
- t.Fatalf("wrong amount of half circuits")
- }
- if s2.circuits.NumOpen() != 0 {
- t.Fatalf("wrong amount of circuits")
- }
-
- // Send the settle packet again, which not arrive at destination.
- if err := s2.ForwardPackets(nil, settle); err != nil {
- t.Fatal(err)
- }
- select {
- case <-bobChannelLink.packets:
- t.Fatalf("expected duplicate fail to not arrive at the destination")
- case <-time.After(time.Second):
- }
-}
-
-func TestSwitchForwardDropAfterFullAdd(t *testing.T) {
- t.Parallel()
-
- chanID1, chanID2, aliceChanID, bobChanID := genIDs()
-
- alicePeer, err := newMockServer(
- t, "alice", testStartingHeight, nil, testDefaultDelta,
- )
- if err != nil {
- t.Fatalf("unable to create alice server: %v", err)
- }
- bobPeer, err := newMockServer(
- t, "bob", testStartingHeight, nil, testDefaultDelta,
- )
- if err != nil {
- t.Fatalf("unable to create bob server: %v", err)
- }
-
- tempPath, errr := ioutil.TempDir("", "circuitdb")
- if errr != nil {
- t.Fatalf("unable to temporary path: %v", errr)
- }
-
- cdb, err := channeldb.Open(tempPath)
- if err != nil {
- t.Fatalf("unable to open channeldb: %v", err)
- }
-
- s, err := initSwitchWithDB(testStartingHeight, cdb)
- if err != nil {
- t.Fatalf("unable to init switch: %v", err)
- }
- if err := s.Start(); err != nil {
- t.Fatalf("unable to start switch: %v", err)
- }
-
- // Even though we intend to Stop s later in the test, it is safe to
- // defer this Stop since its execution it is protected by an atomic
- // guard, guaranteeing it executes at most once.
- defer s.Stop()
-
- aliceChannelLink := newMockChannelLink(
- s, chanID1, aliceChanID, alicePeer, true,
- )
- bobChannelLink := newMockChannelLink(
- s, chanID2, bobChanID, bobPeer, true,
- )
- if err := s.AddLink(aliceChannelLink); err != nil {
- t.Fatalf("unable to add alice link: %v", err)
- }
- if err := s.AddLink(bobChannelLink); err != nil {
- t.Fatalf("unable to add bob link: %v", err)
- }
-
- // Create request which should be forwarded from Alice channel link to
- // bob channel link.
- preimage := [sha256.Size]byte{1}
- rhash := sha256.Sum256(preimage[:])
- ogPacket := &htlcPacket{
- incomingChanID: aliceChannelLink.ShortChanID(),
- incomingHTLCID: 0,
- outgoingChanID: bobChannelLink.ShortChanID(),
- obfuscator: NewMockObfuscator(),
- htlc: &lnwire.UpdateAddHTLC{
- PaymentHash: rhash,
- Amount: 1,
- },
- }
-
- if s.circuits.NumPending() != 0 {
- t.Fatalf("wrong amount of half circuits")
- }
- if s.circuits.NumOpen() != 0 {
- t.Fatalf("wrong amount of circuits")
- }
-
- // Handle the request and checks that bob channel link received it.
- if err := s.ForwardPackets(nil, ogPacket); err != nil {
- t.Fatal(err)
- }
-
- if s.circuits.NumPending() != 1 {
- t.Fatalf("wrong amount of half circuits")
- }
- if s.circuits.NumOpen() != 0 {
- t.Fatalf("wrong amount of half circuits")
- }
-
- // Pull packet from bob's link, but do not perform a full add.
- select {
- case packet := <-bobChannelLink.packets:
- // Complete the payment circuit and assign the outgoing htlc id
- // before restarting.
- if err := bobChannelLink.completeCircuit(packet); err != nil {
- t.Fatalf("unable to complete payment circuit: %v", err)
- }
- case <-time.After(time.Second):
- t.Fatal("request was not propagated to destination")
- }
-
- // Now we will restart bob, leaving the forwarding decision for this
- // htlc is in the half-added state.
- if err := s.Stop(); err != nil {
- t.Fatalf(err.String())
- }
-
- if err := cdb.Close(); err != nil {
- t.Fatalf(err.String())
- }
-
- cdb2, err := channeldb.Open(tempPath)
- if err != nil {
- t.Fatalf("unable to reopen channeldb: %v", err)
- }
-
- s2, err := initSwitchWithDB(testStartingHeight, cdb2)
- if err != nil {
- t.Fatalf("unable reinit switch: %v", err)
- }
- if err := s2.Start(); err != nil {
- t.Fatalf("unable to restart switch: %v", err)
- }
-
- // Even though we intend to Stop s2 later in the test, it is safe to
- // defer this Stop since its execution it is protected by an atomic
- // guard, guaranteeing it executes at most once.
- defer s2.Stop()
-
- aliceChannelLink = newMockChannelLink(
- s2, chanID1, aliceChanID, alicePeer, true,
- )
- bobChannelLink = newMockChannelLink(
- s2, chanID2, bobChanID, bobPeer, true,
- )
- if err := s2.AddLink(aliceChannelLink); err != nil {
- t.Fatalf("unable to add alice link: %v", err)
- }
- if err := s2.AddLink(bobChannelLink); err != nil {
- t.Fatalf("unable to add bob link: %v", err)
- }
-
- if s2.circuits.NumPending() != 1 {
- t.Fatalf("wrong amount of half circuits")
- }
- if s2.circuits.NumOpen() != 1 {
- t.Fatalf("wrong amount of half circuits")
- }
-
- // Resend the failed htlc. The packet will be dropped silently since the
- // switch will detect that it has been half added previously.
- if err := s2.ForwardPackets(nil, ogPacket); err != nil {
- t.Fatal(err)
- }
-
- // After detecting an incomplete forward, the fail packet should have
- // been returned to the sender.
- select {
- case <-aliceChannelLink.packets:
- t.Fatal("request should not have returned to source")
- case <-bobChannelLink.packets:
- t.Fatal("request should not have forwarded to destination")
- case <-time.After(time.Second):
- }
-}
-
-func TestSwitchForwardFailAfterHalfAdd(t *testing.T) {
- t.Parallel()
-
- chanID1, chanID2, aliceChanID, bobChanID := genIDs()
-
- alicePeer, err := newMockServer(
- t, "alice", testStartingHeight, nil, testDefaultDelta,
- )
- if err != nil {
- t.Fatalf("unable to create alice server: %v", err)
- }
- bobPeer, err := newMockServer(
- t, "bob", testStartingHeight, nil, testDefaultDelta,
- )
- if err != nil {
- t.Fatalf("unable to create bob server: %v", err)
- }
-
- tempPath, errr := ioutil.TempDir("", "circuitdb")
- if errr != nil {
- t.Fatalf("unable to temporary path: %v", errr)
- }
-
- cdb, err := channeldb.Open(tempPath)
- if err != nil {
- t.Fatalf("unable to open channeldb: %v", err)
- }
-
- s, err := initSwitchWithDB(testStartingHeight, cdb)
- if err != nil {
- t.Fatalf("unable to init switch: %v", err)
- }
- if err := s.Start(); err != nil {
- t.Fatalf("unable to start switch: %v", err)
- }
-
- // Even though we intend to Stop s later in the test, it is safe to
- // defer this Stop since its execution it is protected by an atomic
- // guard, guaranteeing it executes at most once.
- defer s.Stop()
-
- aliceChannelLink := newMockChannelLink(
- s, chanID1, aliceChanID, alicePeer, true,
- )
- bobChannelLink := newMockChannelLink(
- s, chanID2, bobChanID, bobPeer, true,
- )
- if err := s.AddLink(aliceChannelLink); err != nil {
- t.Fatalf("unable to add alice link: %v", err)
- }
- if err := s.AddLink(bobChannelLink); err != nil {
- t.Fatalf("unable to add bob link: %v", err)
- }
-
- // Create request which should be forwarded from Alice channel link to
- // bob channel link.
- preimage := [sha256.Size]byte{1}
- rhash := sha256.Sum256(preimage[:])
- ogPacket := &htlcPacket{
- incomingChanID: aliceChannelLink.ShortChanID(),
- incomingHTLCID: 0,
- outgoingChanID: bobChannelLink.ShortChanID(),
- obfuscator: NewMockObfuscator(),
- htlc: &lnwire.UpdateAddHTLC{
- PaymentHash: rhash,
- Amount: 1,
- },
- }
-
- if s.circuits.NumPending() != 0 {
- t.Fatalf("wrong amount of half circuits")
- }
- if s.circuits.NumOpen() != 0 {
- t.Fatalf("wrong amount of circuits")
- }
-
- // Handle the request and checks that bob channel link received it.
- if err := s.ForwardPackets(nil, ogPacket); err != nil {
- t.Fatal(err)
- }
-
- if s.circuits.NumPending() != 1 {
- t.Fatalf("wrong amount of half circuits")
- }
- if s.circuits.NumOpen() != 0 {
- t.Fatalf("wrong amount of half circuits")
- }
-
- // Pull packet from bob's link, but do not perform a full add.
- select {
- case <-bobChannelLink.packets:
- case <-time.After(time.Second):
- t.Fatal("request was not propagated to destination")
- }
-
- // Now we will restart bob, leaving the forwarding decision for this
- // htlc is in the half-added state.
- if err := s.Stop(); err != nil {
- t.Fatalf(err.String())
- }
-
- if err := cdb.Close(); err != nil {
- t.Fatalf(err.String())
- }
-
- cdb2, err := channeldb.Open(tempPath)
- if err != nil {
- t.Fatalf("unable to reopen channeldb: %v", err)
- }
-
- s2, err := initSwitchWithDB(testStartingHeight, cdb2)
- if err != nil {
- t.Fatalf("unable reinit switch: %v", err)
- }
- if err := s2.Start(); err != nil {
- t.Fatalf("unable to restart switch: %v", err)
- }
-
- // Even though we intend to Stop s2 later in the test, it is safe to
- // defer this Stop since its execution it is protected by an atomic
- // guard, guaranteeing it executes at most once.
- defer s2.Stop()
-
- aliceChannelLink = newMockChannelLink(
- s2, chanID1, aliceChanID, alicePeer, true,
- )
- bobChannelLink = newMockChannelLink(
- s2, chanID2, bobChanID, bobPeer, true,
- )
- if err := s2.AddLink(aliceChannelLink); err != nil {
- t.Fatalf("unable to add alice link: %v", err)
- }
- if err := s2.AddLink(bobChannelLink); err != nil {
- t.Fatalf("unable to add bob link: %v", err)
- }
-
- if s2.circuits.NumPending() != 1 {
- t.Fatalf("wrong amount of half circuits")
- }
- if s2.circuits.NumOpen() != 0 {
- t.Fatalf("wrong amount of half circuits")
- }
-
- // Resend the failed htlc, it should be returned to alice since the
- // switch will detect that it has been half added previously.
- err = s2.ForwardPackets(nil, ogPacket)
- if err != nil {
- t.Fatal(err)
- }
-
- // After detecting an incomplete forward, the fail packet should have
- // been returned to the sender.
- select {
- case pkt := <-aliceChannelLink.packets:
- linkErr := pkt.linkFailure
- if linkErr.FailureDetail != OutgoingFailureIncompleteForward {
- t.Fatalf("expected incomplete forward, got: %v",
- linkErr.FailureDetail)
- }
- case <-time.After(time.Second):
- t.Fatal("request was not propagated to destination")
- }
-}
-
-// TestSwitchForwardCircuitPersistence checks the ability of htlc switch to
-// maintain the proper entries in the circuit map in the face of restarts.
-func TestSwitchForwardCircuitPersistence(t *testing.T) {
- t.Parallel()
-
- chanID1, chanID2, aliceChanID, bobChanID := genIDs()
-
- alicePeer, err := newMockServer(
- t, "alice", testStartingHeight, nil, testDefaultDelta,
- )
- if err != nil {
- t.Fatalf("unable to create alice server: %v", err)
- }
- bobPeer, err := newMockServer(
- t, "bob", testStartingHeight, nil, testDefaultDelta,
- )
- if err != nil {
- t.Fatalf("unable to create bob server: %v", err)
- }
-
- tempPath, errr := ioutil.TempDir("", "circuitdb")
- if errr != nil {
- t.Fatalf("unable to temporary path: %v", errr)
- }
-
- cdb, err := channeldb.Open(tempPath)
- if err != nil {
- t.Fatalf("unable to open channeldb: %v", err)
- }
-
- s, err := initSwitchWithDB(testStartingHeight, cdb)
- if err != nil {
- t.Fatalf("unable to init switch: %v", err)
- }
- if err := s.Start(); err != nil {
- t.Fatalf("unable to start switch: %v", err)
- }
-
- // Even though we intend to Stop s later in the test, it is safe to
- // defer this Stop since its execution it is protected by an atomic
- // guard, guaranteeing it executes at most once.
- defer s.Stop()
-
- aliceChannelLink := newMockChannelLink(
- s, chanID1, aliceChanID, alicePeer, true,
- )
- bobChannelLink := newMockChannelLink(
- s, chanID2, bobChanID, bobPeer, true,
- )
- if err := s.AddLink(aliceChannelLink); err != nil {
- t.Fatalf("unable to add alice link: %v", err)
- }
- if err := s.AddLink(bobChannelLink); err != nil {
- t.Fatalf("unable to add bob link: %v", err)
- }
-
- // Create request which should be forwarded from Alice channel link to
- // bob channel link.
- preimage := [sha256.Size]byte{1}
- rhash := sha256.Sum256(preimage[:])
- ogPacket := &htlcPacket{
- incomingChanID: aliceChannelLink.ShortChanID(),
- incomingHTLCID: 0,
- outgoingChanID: bobChannelLink.ShortChanID(),
- obfuscator: NewMockObfuscator(),
- htlc: &lnwire.UpdateAddHTLC{
- PaymentHash: rhash,
- Amount: 1,
- },
- }
-
- if s.circuits.NumPending() != 0 {
- t.Fatalf("wrong amount of half circuits")
- }
- if s.circuits.NumOpen() != 0 {
- t.Fatalf("wrong amount of circuits")
- }
-
- // Handle the request and checks that bob channel link received it.
- if err := s.ForwardPackets(nil, ogPacket); err != nil {
- t.Fatal(err)
- }
-
- if s.circuits.NumPending() != 1 {
- t.Fatalf("wrong amount of half circuits")
- }
- if s.circuits.NumOpen() != 0 {
- t.Fatalf("wrong amount of circuits")
- }
-
- // Retrieve packet from outgoing link and cache until after restart.
- var packet *htlcPacket
- select {
- case packet = <-bobChannelLink.packets:
- case <-time.After(time.Second):
- t.Fatal("request was not propagated to destination")
- }
-
- if err := s.Stop(); err != nil {
- t.Fatalf(err.String())
- }
-
- if err := cdb.Close(); err != nil {
- t.Fatalf(err.String())
- }
-
- cdb2, err := channeldb.Open(tempPath)
- if err != nil {
- t.Fatalf("unable to reopen channeldb: %v", err)
- }
-
- s2, err := initSwitchWithDB(testStartingHeight, cdb2)
- if err != nil {
- t.Fatalf("unable reinit switch: %v", err)
- }
- if err := s2.Start(); err != nil {
- t.Fatalf("unable to restart switch: %v", err)
- }
-
- // Even though we intend to Stop s2 later in the test, it is safe to
- // defer this Stop since its execution it is protected by an atomic
- // guard, guaranteeing it executes at most once.
- defer s2.Stop()
-
- aliceChannelLink = newMockChannelLink(
- s2, chanID1, aliceChanID, alicePeer, true,
- )
- bobChannelLink = newMockChannelLink(
- s2, chanID2, bobChanID, bobPeer, true,
- )
- if err := s2.AddLink(aliceChannelLink); err != nil {
- t.Fatalf("unable to add alice link: %v", err)
- }
- if err := s2.AddLink(bobChannelLink); err != nil {
- t.Fatalf("unable to add bob link: %v", err)
- }
-
- if s2.circuits.NumPending() != 1 {
- t.Fatalf("wrong amount of half circuits")
- }
- if s2.circuits.NumOpen() != 0 {
- t.Fatalf("wrong amount of half circuits")
- }
-
- // Now that the switch has restarted, complete the payment circuit.
- if err := bobChannelLink.completeCircuit(packet); err != nil {
- t.Fatalf("unable to complete payment circuit: %v", err)
- }
-
- if s2.circuits.NumPending() != 1 {
- t.Fatalf("wrong amount of half circuits")
- }
- if s2.circuits.NumOpen() != 1 {
- t.Fatal("wrong amount of circuits")
- }
-
- // Create settle request pretending that bob link handled the add htlc
- // request and sent the htlc settle request back. This request should
- // be forwarder back to Alice link.
- ogPacket = &htlcPacket{
- outgoingChanID: bobChannelLink.ShortChanID(),
- outgoingHTLCID: 0,
- amount: 1,
- htlc: &lnwire.UpdateFulfillHTLC{
- PaymentPreimage: preimage,
- },
- }
-
- // Handle the request and checks that payment circuit works properly.
- if err := s2.ForwardPackets(nil, ogPacket); err != nil {
- t.Fatal(err)
- }
-
- select {
- case packet = <-aliceChannelLink.packets:
- if err := aliceChannelLink.completeCircuit(packet); err != nil {
- t.Fatalf("unable to complete circuit with in key=%s: %v",
- packet.inKey(), err)
- }
- case <-time.After(time.Second):
- t.Fatal("request was not propagated to channelPoint")
- }
-
- if s2.circuits.NumPending() != 0 {
- t.Fatalf("wrong amount of half circuits, want 1, got %d",
- s2.circuits.NumPending())
- }
- if s2.circuits.NumOpen() != 0 {
- t.Fatal("wrong amount of circuits")
- }
-
- if err := s2.Stop(); err != nil {
- t.Fatal(err)
- }
-
- if err := cdb2.Close(); err != nil {
- t.Fatalf(err.String())
- }
-
- cdb3, err := channeldb.Open(tempPath)
- if err != nil {
- t.Fatalf("unable to reopen channeldb: %v", err)
- }
-
- s3, err := initSwitchWithDB(testStartingHeight, cdb3)
- if err != nil {
- t.Fatalf("unable reinit switch: %v", err)
- }
- if err := s3.Start(); err != nil {
- t.Fatalf("unable to restart switch: %v", err)
- }
- defer s3.Stop()
-
- aliceChannelLink = newMockChannelLink(
- s3, chanID1, aliceChanID, alicePeer, true,
- )
- bobChannelLink = newMockChannelLink(
- s3, chanID2, bobChanID, bobPeer, true,
- )
- if err := s3.AddLink(aliceChannelLink); err != nil {
- t.Fatalf("unable to add alice link: %v", err)
- }
- if err := s3.AddLink(bobChannelLink); err != nil {
- t.Fatalf("unable to add bob link: %v", err)
- }
-
- if s3.circuits.NumPending() != 0 {
- t.Fatalf("wrong amount of half circuits")
- }
- if s3.circuits.NumOpen() != 0 {
- t.Fatalf("wrong amount of circuits")
- }
-}
-
-type multiHopFwdTest struct {
- name string
- eligible1, eligible2 bool
- failure1, failure2 *LinkError
- expectedReply lnwire.FailCode
-}
-
-// TestCircularForwards tests the allowing/disallowing of circular payments
-// through the same channel in the case where the switch is configured to allow
-// and disallow same channel circular forwards.
-func TestCircularForwards(t *testing.T) {
- chanID1, aliceChanID := genID()
- preimage := [sha256.Size]byte{1}
- hash := sha256.Sum256(preimage[:])
-
- tests := []struct {
- name string
- allowCircularPayment bool
- expectedErr error
- }{
- {
- name: "circular payment allowed",
- allowCircularPayment: true,
- expectedErr: nil,
- },
- {
- name: "circular payment disallowed",
- allowCircularPayment: false,
- expectedErr: NewDetailedLinkError(
- lnwire.NewTemporaryChannelFailure(nil),
- OutgoingFailureCircularRoute,
- ),
- },
- }
-
- for _, test := range tests {
- test := test
- t.Run(test.name, func(t *testing.T) {
- t.Parallel()
-
- alicePeer, err := newMockServer(
- t, "alice", testStartingHeight, nil,
- testDefaultDelta,
- )
- if err != nil {
- t.Fatalf("unable to create alice server: %v",
- err)
- }
-
- s, err := initSwitchWithDB(testStartingHeight, nil)
- if err != nil {
- t.Fatalf("unable to init switch: %v", err)
- }
- if err := s.Start(); err != nil {
- t.Fatalf("unable to start switch: %v", err)
- }
- defer func() { _ = s.Stop() }()
-
- // Set the switch to allow or disallow circular routes
- // according to the test's requirements.
- s.cfg.AllowCircularRoute = test.allowCircularPayment
-
- aliceChannelLink := newMockChannelLink(
- s, chanID1, aliceChanID, alicePeer, true,
- )
-
- if err := s.AddLink(aliceChannelLink); err != nil {
- t.Fatalf("unable to add alice link: %v", err)
- }
-
- // Create a new packet that loops through alice's link
- // in a circle.
- obfuscator := NewMockObfuscator()
- packet := &htlcPacket{
- incomingChanID: aliceChannelLink.ShortChanID(),
- outgoingChanID: aliceChannelLink.ShortChanID(),
- htlc: &lnwire.UpdateAddHTLC{
- PaymentHash: hash,
- Amount: 1,
- },
- obfuscator: obfuscator,
- }
-
- // Attempt to forward the packet and check for the expected
- // error.
- if err = s.ForwardPackets(nil, packet); err != nil {
- t.Fatal(err)
- }
- select {
- case p := <-aliceChannelLink.packets:
- if p.linkFailure != nil {
- err = er.E(p.linkFailure)
- }
- case <-time.After(time.Second):
- t.Fatal("no timely reply from switch")
- }
- errr := er.Wrapped(err)
- if !reflect.DeepEqual(errr, test.expectedErr) {
- t.Fatalf("expected: %v, got: %v",
- test.expectedErr, err)
- }
-
- // Ensure that no circuits were opened.
- if s.circuits.NumOpen() > 0 {
- t.Fatal("do not expect any open circuits")
- }
- })
- }
-}
-
-// TestCheckCircularForward tests the error returned by checkCircularForward
-// in cases where we allow and disallow same channel circular forwards.
-func TestCheckCircularForward(t *testing.T) {
- tests := []struct {
- name string
-
- // allowCircular determines whether we should allow circular
- // forwards.
- allowCircular bool
-
- // incomingLink is the link that the htlc arrived on.
- incomingLink lnwire.ShortChannelID
-
- // outgoingLink is the link that the htlc forward
- // is destined to leave on.
- outgoingLink lnwire.ShortChannelID
-
- // expectedErr is the error we expect to be returned.
- expectedErr *LinkError
- }{
- {
- name: "not circular, allowed in config",
- allowCircular: true,
- incomingLink: lnwire.NewShortChanIDFromInt(123),
- outgoingLink: lnwire.NewShortChanIDFromInt(321),
- expectedErr: nil,
- },
- {
- name: "not circular, not allowed in config",
- allowCircular: false,
- incomingLink: lnwire.NewShortChanIDFromInt(123),
- outgoingLink: lnwire.NewShortChanIDFromInt(321),
- expectedErr: nil,
- },
- {
- name: "circular, allowed in config",
- allowCircular: true,
- incomingLink: lnwire.NewShortChanIDFromInt(123),
- outgoingLink: lnwire.NewShortChanIDFromInt(123),
- expectedErr: nil,
- },
- {
- name: "circular, not allowed in config",
- allowCircular: false,
- incomingLink: lnwire.NewShortChanIDFromInt(123),
- outgoingLink: lnwire.NewShortChanIDFromInt(123),
- expectedErr: NewDetailedLinkError(
- lnwire.NewTemporaryChannelFailure(nil),
- OutgoingFailureCircularRoute,
- ),
- },
- }
-
- for _, test := range tests {
- test := test
-
- t.Run(test.name, func(t *testing.T) {
- t.Parallel()
-
- // Check for a circular forward, the hash passed can
- // be nil because it is only used for logging.
- err := checkCircularForward(
- test.incomingLink, test.outgoingLink,
- test.allowCircular, lntypes.Hash{},
- )
- if !reflect.DeepEqual(err, test.expectedErr) {
- t.Fatalf("expected: %v, got: %v",
- test.expectedErr, err)
- }
- })
- }
-}
-
-// TestSkipIneligibleLinksMultiHopForward tests that if a multi-hop HTLC comes
-// along, then we won't attempt to froward it down al ink that isn't yet able
-// to forward any HTLC's.
-func TestSkipIneligibleLinksMultiHopForward(t *testing.T) {
- tests := []multiHopFwdTest{
- // None of the channels is eligible.
- {
- name: "not eligible",
- expectedReply: lnwire.CodeUnknownNextPeer,
- },
-
- // Channel one has a policy failure and the other channel isn't
- // available.
- {
- name: "policy fail",
- eligible1: true,
- failure1: NewLinkError(
- lnwire.NewFinalIncorrectCltvExpiry(0),
- ),
- expectedReply: lnwire.CodeFinalIncorrectCltvExpiry,
- },
-
- // The requested channel is not eligible, but the packet is
- // forwarded through the other channel.
- {
- name: "non-strict success",
- eligible2: true,
- expectedReply: lnwire.CodeNone,
- },
-
- // The requested channel has insufficient bandwidth and the
- // other channel's policy isn't satisfied.
- {
- name: "non-strict policy fail",
- eligible1: true,
- failure1: NewDetailedLinkError(
- lnwire.NewTemporaryChannelFailure(nil),
- OutgoingFailureInsufficientBalance,
- ),
- eligible2: true,
- failure2: NewLinkError(
- lnwire.NewFinalIncorrectCltvExpiry(0),
- ),
- expectedReply: lnwire.CodeTemporaryChannelFailure,
- },
- }
-
- for _, test := range tests {
- test := test
- t.Run(test.name, func(t *testing.T) {
- testSkipIneligibleLinksMultiHopForward(t, &test)
- })
- }
-}
-
-// testSkipIneligibleLinksMultiHopForward tests that if a multi-hop HTLC comes
-// along, then we won't attempt to froward it down al ink that isn't yet able
-// to forward any HTLC's.
-func testSkipIneligibleLinksMultiHopForward(t *testing.T,
- testCase *multiHopFwdTest) {
-
- t.Parallel()
-
- var packet *htlcPacket
-
- alicePeer, err := newMockServer(
- t, "alice", testStartingHeight, nil, testDefaultDelta,
- )
- if err != nil {
- t.Fatalf("unable to create alice server: %v", err)
- }
- bobPeer, err := newMockServer(
- t, "bob", testStartingHeight, nil, testDefaultDelta,
- )
- if err != nil {
- t.Fatalf("unable to create bob server: %v", err)
- }
-
- s, err := initSwitchWithDB(testStartingHeight, nil)
- if err != nil {
- t.Fatalf("unable to init switch: %v", err)
- }
- if err := s.Start(); err != nil {
- t.Fatalf("unable to start switch: %v", err)
- }
- defer s.Stop()
-
- chanID1, aliceChanID := genID()
- aliceChannelLink := newMockChannelLink(
- s, chanID1, aliceChanID, alicePeer, true,
- )
-
- // We'll create a link for Bob, but mark the link as unable to forward
- // any new outgoing HTLC's.
- chanID2, bobChanID2 := genID()
- bobChannelLink1 := newMockChannelLink(
- s, chanID2, bobChanID2, bobPeer, testCase.eligible1,
- )
- bobChannelLink1.checkHtlcForwardResult = testCase.failure1
-
- chanID3, bobChanID3 := genID()
- bobChannelLink2 := newMockChannelLink(
- s, chanID3, bobChanID3, bobPeer, testCase.eligible2,
- )
- bobChannelLink2.checkHtlcForwardResult = testCase.failure2
-
- if err := s.AddLink(aliceChannelLink); err != nil {
- t.Fatalf("unable to add alice link: %v", err)
- }
- if err := s.AddLink(bobChannelLink1); err != nil {
- t.Fatalf("unable to add bob link: %v", err)
- }
- if err := s.AddLink(bobChannelLink2); err != nil {
- t.Fatalf("unable to add bob link: %v", err)
- }
-
- // Create a new packet that's destined for Bob as an incoming HTLC from
- // Alice.
- preimage := [sha256.Size]byte{1}
- rhash := sha256.Sum256(preimage[:])
- obfuscator := NewMockObfuscator()
- packet = &htlcPacket{
- incomingChanID: aliceChannelLink.ShortChanID(),
- incomingHTLCID: 0,
- outgoingChanID: bobChannelLink1.ShortChanID(),
- htlc: &lnwire.UpdateAddHTLC{
- PaymentHash: rhash,
- Amount: 1,
- },
- obfuscator: obfuscator,
- }
-
- // The request to forward should fail as
- if err := s.ForwardPackets(nil, packet); err != nil {
- t.Fatal(err)
- }
-
- // We select from all links and extract the error if exists.
- // The packet must be selected but we don't always expect a link error.
- var linkError *LinkError
- select {
- case p := <-aliceChannelLink.packets:
- linkError = p.linkFailure
- case p := <-bobChannelLink1.packets:
- linkError = p.linkFailure
- case p := <-bobChannelLink2.packets:
- linkError = p.linkFailure
- case <-time.After(time.Second):
- t.Fatal("no timely reply from switch")
- }
- failure := obfuscator.(*mockObfuscator).failure
- if testCase.expectedReply == lnwire.CodeNone {
- if linkError != nil {
- t.Fatalf("forwarding should have succeeded")
- }
- if failure != nil {
- t.Fatalf("unexpected failure %T", failure)
- }
- } else {
- if linkError == nil {
- t.Fatalf("forwarding should have failed due to " +
- "inactive link")
- }
- if failure.Code() != testCase.expectedReply {
- t.Fatalf("unexpected failure %T", failure)
- }
- }
-
- if s.circuits.NumOpen() != 0 {
- t.Fatal("wrong amount of circuits")
- }
-}
-
-// TestSkipIneligibleLinksLocalForward ensures that the switch will not attempt
-// to forward any HTLC's down a link that isn't yet eligible for forwarding.
-func TestSkipIneligibleLinksLocalForward(t *testing.T) {
- t.Parallel()
-
- testSkipLinkLocalForward(t, false, nil)
-}
-
-// TestSkipPolicyUnsatisfiedLinkLocalForward ensures that the switch will not
-// attempt to send locally initiated HTLCs that would violate the channel policy
-// down a link.
-func TestSkipPolicyUnsatisfiedLinkLocalForward(t *testing.T) {
- t.Parallel()
-
- testSkipLinkLocalForward(t, true, lnwire.NewTemporaryChannelFailure(nil))
-}
-
-func testSkipLinkLocalForward(t *testing.T, eligible bool,
- policyResult lnwire.FailureMessage) {
-
- // We'll create a single link for this test, marking it as being unable
- // to forward form the get go.
- alicePeer, err := newMockServer(
- t, "alice", testStartingHeight, nil, testDefaultDelta,
- )
- if err != nil {
- t.Fatalf("unable to create alice server: %v", err)
- }
-
- s, err := initSwitchWithDB(testStartingHeight, nil)
- if err != nil {
- t.Fatalf("unable to init switch: %v", err)
- }
- if err := s.Start(); err != nil {
- t.Fatalf("unable to start switch: %v", err)
- }
- defer s.Stop()
-
- chanID1, _, aliceChanID, _ := genIDs()
-
- aliceChannelLink := newMockChannelLink(
- s, chanID1, aliceChanID, alicePeer, eligible,
- )
- aliceChannelLink.checkHtlcTransitResult = NewLinkError(
- policyResult,
- )
- if err := s.AddLink(aliceChannelLink); err != nil {
- t.Fatalf("unable to add alice link: %v", err)
- }
-
- preimage, err := genPreimage()
- if err != nil {
- t.Fatalf("unable to generate preimage: %v", err)
- }
- rhash := sha256.Sum256(preimage[:])
- addMsg := &lnwire.UpdateAddHTLC{
- PaymentHash: rhash,
- Amount: 1,
- }
-
- // We'll attempt to send out a new HTLC that has Alice as the first
- // outgoing link. This should fail as Alice isn't yet able to forward
- // any active HTLC's.
- err = s.SendHTLC(aliceChannelLink.ShortChanID(), 0, addMsg)
- if err == nil {
- t.Fatalf("local forward should fail due to inactive link")
- }
-
- if s.circuits.NumOpen() != 0 {
- t.Fatal("wrong amount of circuits")
- }
-}
-
-// TestSwitchCancel checks that if htlc was rejected we remove unused
-// circuits.
-func TestSwitchCancel(t *testing.T) {
- t.Parallel()
-
- alicePeer, err := newMockServer(
- t, "alice", testStartingHeight, nil, testDefaultDelta,
- )
- if err != nil {
- t.Fatalf("unable to create alice server: %v", err)
- }
- bobPeer, err := newMockServer(
- t, "bob", testStartingHeight, nil, testDefaultDelta,
- )
- if err != nil {
- t.Fatalf("unable to create bob server: %v", err)
- }
-
- s, err := initSwitchWithDB(testStartingHeight, nil)
- if err != nil {
- t.Fatalf("unable to init switch: %v", err)
- }
- if err := s.Start(); err != nil {
- t.Fatalf("unable to start switch: %v", err)
- }
- defer s.Stop()
-
- chanID1, chanID2, aliceChanID, bobChanID := genIDs()
-
- aliceChannelLink := newMockChannelLink(
- s, chanID1, aliceChanID, alicePeer, true,
- )
- bobChannelLink := newMockChannelLink(
- s, chanID2, bobChanID, bobPeer, true,
- )
- if err := s.AddLink(aliceChannelLink); err != nil {
- t.Fatalf("unable to add alice link: %v", err)
- }
- if err := s.AddLink(bobChannelLink); err != nil {
- t.Fatalf("unable to add bob link: %v", err)
- }
-
- // Create request which should be forwarder from alice channel link
- // to bob channel link.
- preimage, err := genPreimage()
- if err != nil {
- t.Fatalf("unable to generate preimage: %v", err)
- }
- rhash := sha256.Sum256(preimage[:])
- request := &htlcPacket{
- incomingChanID: aliceChannelLink.ShortChanID(),
- incomingHTLCID: 0,
- outgoingChanID: bobChannelLink.ShortChanID(),
- obfuscator: NewMockObfuscator(),
- htlc: &lnwire.UpdateAddHTLC{
- PaymentHash: rhash,
- Amount: 1,
- },
- }
-
- // Handle the request and checks that bob channel link received it.
- if err := s.ForwardPackets(nil, request); err != nil {
- t.Fatal(err)
- }
-
- select {
- case packet := <-bobChannelLink.packets:
- if err := bobChannelLink.completeCircuit(packet); err != nil {
- t.Fatalf("unable to complete payment circuit: %v", err)
- }
-
- case <-time.After(time.Second):
- t.Fatal("request was not propagated to destination")
- }
-
- if s.circuits.NumPending() != 1 {
- t.Fatalf("wrong amount of half circuits")
- }
- if s.circuits.NumOpen() != 1 {
- t.Fatal("wrong amount of circuits")
- }
-
- // Create settle request pretending that bob channel link handled
- // the add htlc request and sent the htlc settle request back. This
- // request should be forwarder back to alice channel link.
- request = &htlcPacket{
- outgoingChanID: bobChannelLink.ShortChanID(),
- outgoingHTLCID: 0,
- amount: 1,
- htlc: &lnwire.UpdateFailHTLC{},
- }
-
- // Handle the request and checks that payment circuit works properly.
- if err := s.ForwardPackets(nil, request); err != nil {
- t.Fatal(err)
- }
-
- select {
- case pkt := <-aliceChannelLink.packets:
- if err := aliceChannelLink.completeCircuit(pkt); err != nil {
- t.Fatalf("unable to remove circuit: %v", err)
- }
-
- case <-time.After(time.Second):
- t.Fatal("request was not propagated to channelPoint")
- }
-
- if s.circuits.NumPending() != 0 {
- t.Fatal("wrong amount of circuits")
- }
- if s.circuits.NumOpen() != 0 {
- t.Fatal("wrong amount of circuits")
- }
-}
-
-// TestSwitchAddSamePayment tests that we send the payment with the same
-// payment hash.
-func TestSwitchAddSamePayment(t *testing.T) {
- t.Parallel()
-
- chanID1, chanID2, aliceChanID, bobChanID := genIDs()
-
- alicePeer, err := newMockServer(
- t, "alice", testStartingHeight, nil, testDefaultDelta,
- )
- if err != nil {
- t.Fatalf("unable to create alice server: %v", err)
- }
- bobPeer, err := newMockServer(
- t, "bob", testStartingHeight, nil, testDefaultDelta,
- )
- if err != nil {
- t.Fatalf("unable to create bob server: %v", err)
- }
-
- s, err := initSwitchWithDB(testStartingHeight, nil)
- if err != nil {
- t.Fatalf("unable to init switch: %v", err)
- }
- if err := s.Start(); err != nil {
- t.Fatalf("unable to start switch: %v", err)
- }
- defer s.Stop()
-
- aliceChannelLink := newMockChannelLink(
- s, chanID1, aliceChanID, alicePeer, true,
- )
- bobChannelLink := newMockChannelLink(
- s, chanID2, bobChanID, bobPeer, true,
- )
- if err := s.AddLink(aliceChannelLink); err != nil {
- t.Fatalf("unable to add alice link: %v", err)
- }
- if err := s.AddLink(bobChannelLink); err != nil {
- t.Fatalf("unable to add bob link: %v", err)
- }
-
- // Create request which should be forwarder from alice channel link
- // to bob channel link.
- preimage, err := genPreimage()
- if err != nil {
- t.Fatalf("unable to generate preimage: %v", err)
- }
- rhash := sha256.Sum256(preimage[:])
- request := &htlcPacket{
- incomingChanID: aliceChannelLink.ShortChanID(),
- incomingHTLCID: 0,
- outgoingChanID: bobChannelLink.ShortChanID(),
- obfuscator: NewMockObfuscator(),
- htlc: &lnwire.UpdateAddHTLC{
- PaymentHash: rhash,
- Amount: 1,
- },
- }
-
- // Handle the request and checks that bob channel link received it.
- if err := s.ForwardPackets(nil, request); err != nil {
- t.Fatal(err)
- }
-
- select {
- case packet := <-bobChannelLink.packets:
- if err := bobChannelLink.completeCircuit(packet); err != nil {
- t.Fatalf("unable to complete payment circuit: %v", err)
- }
-
- case <-time.After(time.Second):
- t.Fatal("request was not propagated to destination")
- }
-
- if s.circuits.NumOpen() != 1 {
- t.Fatal("wrong amount of circuits")
- }
-
- request = &htlcPacket{
- incomingChanID: aliceChannelLink.ShortChanID(),
- incomingHTLCID: 1,
- outgoingChanID: bobChannelLink.ShortChanID(),
- obfuscator: NewMockObfuscator(),
- htlc: &lnwire.UpdateAddHTLC{
- PaymentHash: rhash,
- Amount: 1,
- },
- }
-
- // Handle the request and checks that bob channel link received it.
- if err := s.ForwardPackets(nil, request); err != nil {
- t.Fatal(err)
- }
-
- select {
- case packet := <-bobChannelLink.packets:
- if err := bobChannelLink.completeCircuit(packet); err != nil {
- t.Fatalf("unable to complete payment circuit: %v", err)
- }
-
- case <-time.After(time.Second):
- t.Fatal("request was not propagated to destination")
- }
-
- if s.circuits.NumOpen() != 2 {
- t.Fatal("wrong amount of circuits")
- }
-
- // Create settle request pretending that bob channel link handled
- // the add htlc request and sent the htlc settle request back. This
- // request should be forwarder back to alice channel link.
- request = &htlcPacket{
- outgoingChanID: bobChannelLink.ShortChanID(),
- outgoingHTLCID: 0,
- amount: 1,
- htlc: &lnwire.UpdateFailHTLC{},
- }
-
- // Handle the request and checks that payment circuit works properly.
- if err := s.ForwardPackets(nil, request); err != nil {
- t.Fatal(err)
- }
-
- select {
- case pkt := <-aliceChannelLink.packets:
- if err := aliceChannelLink.completeCircuit(pkt); err != nil {
- t.Fatalf("unable to remove circuit: %v", err)
- }
-
- case <-time.After(time.Second):
- t.Fatal("request was not propagated to channelPoint")
- }
-
- if s.circuits.NumOpen() != 1 {
- t.Fatal("wrong amount of circuits")
- }
-
- request = &htlcPacket{
- outgoingChanID: bobChannelLink.ShortChanID(),
- outgoingHTLCID: 1,
- amount: 1,
- htlc: &lnwire.UpdateFailHTLC{},
- }
-
- // Handle the request and checks that payment circuit works properly.
- if err := s.ForwardPackets(nil, request); err != nil {
- t.Fatal(err)
- }
-
- select {
- case pkt := <-aliceChannelLink.packets:
- if err := aliceChannelLink.completeCircuit(pkt); err != nil {
- t.Fatalf("unable to remove circuit: %v", err)
- }
-
- case <-time.After(time.Second):
- t.Fatal("request was not propagated to channelPoint")
- }
-
- if s.circuits.NumOpen() != 0 {
- t.Fatal("wrong amount of circuits")
- }
-}
-
-// TestSwitchSendPayment tests ability of htlc switch to respond to the
-// users when response is came back from channel link.
-func TestSwitchSendPayment(t *testing.T) {
- t.Parallel()
-
- alicePeer, err := newMockServer(
- t, "alice", testStartingHeight, nil, testDefaultDelta,
- )
- if err != nil {
- t.Fatalf("unable to create alice server: %v", err)
- }
-
- s, err := initSwitchWithDB(testStartingHeight, nil)
- if err != nil {
- t.Fatalf("unable to init switch: %v", err)
- }
- if err := s.Start(); err != nil {
- t.Fatalf("unable to start switch: %v", err)
- }
- defer s.Stop()
-
- chanID1, _, aliceChanID, _ := genIDs()
-
- aliceChannelLink := newMockChannelLink(
- s, chanID1, aliceChanID, alicePeer, true,
- )
- if err := s.AddLink(aliceChannelLink); err != nil {
- t.Fatalf("unable to add link: %v", err)
- }
-
- // Create request which should be forwarder from alice channel link
- // to bob channel link.
- preimage, err := genPreimage()
- if err != nil {
- t.Fatalf("unable to generate preimage: %v", err)
- }
- rhash := sha256.Sum256(preimage[:])
- update := &lnwire.UpdateAddHTLC{
- PaymentHash: rhash,
- Amount: 1,
- }
- paymentID := uint64(123)
-
- // First check that the switch will correctly respond that this payment
- // ID is unknown.
- _, err = s.GetPaymentResult(
- paymentID, rhash, newMockDeobfuscator(),
- )
- if !ErrPaymentIDNotFound.Is(err) {
- t.Fatalf("expected ErrPaymentIDNotFound, got %v", err)
- }
-
- // Handle the request and checks that bob channel link received it.
- errChan := make(chan er.R)
- go func() {
- err := s.SendHTLC(
- aliceChannelLink.ShortChanID(), paymentID, update,
- )
- if err != nil {
- errChan <- err
- return
- }
-
- resultChan, err := s.GetPaymentResult(
- paymentID, rhash, newMockDeobfuscator(),
- )
- if err != nil {
- errChan <- err
- return
- }
-
- result, ok := <-resultChan
- if !ok {
- errChan <- er.Errorf("shutting down")
- }
-
- if result.Error != nil {
- errChan <- result.Error
- return
- }
-
- errChan <- nil
- }()
-
- select {
- case packet := <-aliceChannelLink.packets:
- if err := aliceChannelLink.completeCircuit(packet); err != nil {
- t.Fatalf("unable to complete payment circuit: %v", err)
- }
-
- case err := <-errChan:
- if err != nil {
- t.Fatalf("unable to send payment: %v", err)
- }
- case <-time.After(time.Second):
- t.Fatal("request was not propagated to destination")
- }
-
- if s.circuits.NumOpen() != 1 {
- t.Fatal("wrong amount of circuits")
- }
-
- // Create fail request pretending that bob channel link handled
- // the add htlc request with error and sent the htlc fail request
- // back. This request should be forwarded back to alice channel link.
- obfuscator := NewMockObfuscator()
- failure := lnwire.NewFailIncorrectDetails(update.Amount, 100)
- reason, err := obfuscator.EncryptFirstHop(failure)
- if err != nil {
- t.Fatalf("unable obfuscate failure: %v", err)
- }
-
- if s.IsForwardedHTLC(aliceChannelLink.ShortChanID(), update.ID) {
- t.Fatal("htlc should be identified as not forwarded")
- }
- packet := &htlcPacket{
- outgoingChanID: aliceChannelLink.ShortChanID(),
- outgoingHTLCID: 0,
- amount: 1,
- htlc: &lnwire.UpdateFailHTLC{
- Reason: reason,
- },
- }
-
- if err := s.ForwardPackets(nil, packet); err != nil {
- t.Fatalf("can't forward htlc packet: %v", err)
- }
-
- select {
- case err := <-errChan:
- assertFailureCode(
- t, err, lnwire.CodeIncorrectOrUnknownPaymentDetails,
- )
- case <-time.After(time.Second):
- t.Fatal("err wasn't received")
- }
-}
-
-// TestLocalPaymentNoForwardingEvents tests that if we send a series of locally
-// initiated payments, then they aren't reflected in the forwarding log.
-func TestLocalPaymentNoForwardingEvents(t *testing.T) {
- t.Parallel()
-
- // First, we'll create our traditional three hop network. We'll only be
- // interacting with and asserting the state of the first end point for
- // this test.
- channels, cleanUp, _, err := createClusterChannels(
- btcutil.UnitsPerCoin()*3,
- btcutil.UnitsPerCoin()*5)
- if err != nil {
- t.Fatalf("unable to create channel: %v", err)
- }
- defer cleanUp()
-
- n := newThreeHopNetwork(t, channels.aliceToBob, channels.bobToAlice,
- channels.bobToCarol, channels.carolToBob, testStartingHeight)
- if err := n.start(); err != nil {
- t.Fatalf("unable to start three hop network: %v", err)
- }
-
- // We'll now craft and send a payment from Alice to Bob.
- amount := lnwire.NewMSatFromSatoshis(btcutil.UnitsPerCoin())
- htlcAmt, totalTimelock, hops := generateHops(
- amount, testStartingHeight, n.firstBobChannelLink,
- )
-
- // With the payment crafted, we'll send it from Alice to Bob. We'll
- // wait for Alice to receive the preimage for the payment before
- // proceeding.
- receiver := n.bobServer
- firstHop := n.firstBobChannelLink.ShortChanID()
- _, err = makePayment(
- n.aliceServer, receiver, firstHop, hops, amount, htlcAmt,
- totalTimelock,
- ).Wait(30 * time.Second)
- if err != nil {
- t.Fatalf("unable to make the payment: %v", err)
- }
-
- // At this point, we'll forcibly stop the three hop network. Doing
- // this will cause any pending forwarding events to be flushed by the
- // various switches in the network.
- n.stop()
-
- // With all the switches stopped, we'll fetch Alice's mock forwarding
- // event log.
- log, ok := n.aliceServer.htlcSwitch.cfg.FwdingLog.(*mockForwardingLog)
- if !ok {
- t.Fatalf("mockForwardingLog assertion failed")
- }
- log.Lock()
- defer log.Unlock()
-
- // If we examine the memory of the forwarding log, then it should be
- // blank.
- if len(log.events) != 0 {
- t.Fatalf("log should have no events, instead has: %v",
- spew.Sdump(log.events))
- }
-}
-
-// TestMultiHopPaymentForwardingEvents tests that if we send a series of
-// multi-hop payments via Alice->Bob->Carol. Then Bob properly logs forwarding
-// events, while Alice and Carol don't.
-func TestMultiHopPaymentForwardingEvents(t *testing.T) {
- t.Parallel()
-
- // First, we'll create our traditional three hop network.
- channels, cleanUp, _, err := createClusterChannels(
- btcutil.UnitsPerCoin()*3,
- btcutil.UnitsPerCoin()*5)
- if err != nil {
- t.Fatalf("unable to create channel: %v", err)
- }
- defer cleanUp()
-
- n := newThreeHopNetwork(t, channels.aliceToBob, channels.bobToAlice,
- channels.bobToCarol, channels.carolToBob, testStartingHeight)
- if err := n.start(); err != nil {
- t.Fatalf("unable to start three hop network: %v", err)
- }
-
- // We'll make now 10 payments, of 100k satoshis each from Alice to
- // Carol via Bob.
- const numPayments = 10
- finalAmt := lnwire.NewMSatFromSatoshis(100000)
- htlcAmt, totalTimelock, hops := generateHops(
- finalAmt, testStartingHeight, n.firstBobChannelLink,
- n.carolChannelLink,
- )
- firstHop := n.firstBobChannelLink.ShortChanID()
- for i := 0; i < numPayments/2; i++ {
- _, err := makePayment(
- n.aliceServer, n.carolServer, firstHop, hops, finalAmt,
- htlcAmt, totalTimelock,
- ).Wait(30 * time.Second)
- if err != nil {
- t.Fatalf("unable to send payment: %v", err)
- }
- }
-
- bobLog, ok := n.bobServer.htlcSwitch.cfg.FwdingLog.(*mockForwardingLog)
- if !ok {
- t.Fatalf("mockForwardingLog assertion failed")
- }
-
- // After sending 5 of the payments, trigger the forwarding ticker, to
- // make sure the events are properly flushed.
- bobTicker, ok := n.bobServer.htlcSwitch.cfg.FwdEventTicker.(*ticker.Force)
- if !ok {
- t.Fatalf("mockTicker assertion failed")
- }
-
- // We'll trigger the ticker, and wait for the events to appear in Bob's
- // forwarding log.
- timeout := time.After(15 * time.Second)
- for {
- select {
- case bobTicker.Force <- time.Now():
- case <-time.After(1 * time.Second):
- t.Fatalf("unable to force tick")
- }
-
- // If all 5 events is found in Bob's log, we can break out and
- // continue the test.
- bobLog.Lock()
- if len(bobLog.events) == 5 {
- bobLog.Unlock()
- break
- }
- bobLog.Unlock()
-
- // Otherwise wait a little bit before checking again.
- select {
- case <-time.After(50 * time.Millisecond):
- case <-timeout:
- bobLog.Lock()
- defer bobLog.Unlock()
- t.Fatalf("expected 5 events in event log, instead "+
- "found: %v", spew.Sdump(bobLog.events))
- }
- }
-
- // Send the remaining payments.
- for i := numPayments / 2; i < numPayments; i++ {
- _, err := makePayment(
- n.aliceServer, n.carolServer, firstHop, hops, finalAmt,
- htlcAmt, totalTimelock,
- ).Wait(30 * time.Second)
- if err != nil {
- t.Fatalf("unable to send payment: %v", err)
- }
- }
-
- // With all 10 payments sent. We'll now manually stop each of the
- // switches so we can examine their end state.
- n.stop()
-
- // Alice and Carol shouldn't have any recorded forwarding events, as
- // they were the source and the sink for these payment flows.
- aliceLog, ok := n.aliceServer.htlcSwitch.cfg.FwdingLog.(*mockForwardingLog)
- if !ok {
- t.Fatalf("mockForwardingLog assertion failed")
- }
- aliceLog.Lock()
- defer aliceLog.Unlock()
- if len(aliceLog.events) != 0 {
- t.Fatalf("log should have no events, instead has: %v",
- spew.Sdump(aliceLog.events))
- }
-
- carolLog, ok := n.carolServer.htlcSwitch.cfg.FwdingLog.(*mockForwardingLog)
- if !ok {
- t.Fatalf("mockForwardingLog assertion failed")
- }
- carolLog.Lock()
- defer carolLog.Unlock()
- if len(carolLog.events) != 0 {
- t.Fatalf("log should have no events, instead has: %v",
- spew.Sdump(carolLog.events))
- }
-
- // Bob on the other hand, should have 10 events.
- bobLog.Lock()
- defer bobLog.Unlock()
- if len(bobLog.events) != 10 {
- t.Fatalf("log should have 10 events, instead has: %v",
- spew.Sdump(bobLog.events))
- }
-
- // Each of the 10 events should have had all fields set properly.
- for _, event := range bobLog.events {
- // The incoming and outgoing channels should properly be set for
- // the event.
- if event.IncomingChanID != n.aliceChannelLink.ShortChanID() {
- t.Fatalf("chan id mismatch: expected %v, got %v",
- event.IncomingChanID,
- n.aliceChannelLink.ShortChanID())
- }
- if event.OutgoingChanID != n.carolChannelLink.ShortChanID() {
- t.Fatalf("chan id mismatch: expected %v, got %v",
- event.OutgoingChanID,
- n.carolChannelLink.ShortChanID())
- }
-
- // Additionally, the incoming and outgoing amounts should also
- // be properly set.
- if event.AmtIn != htlcAmt {
- t.Fatalf("incoming amt mismatch: expected %v, got %v",
- event.AmtIn, htlcAmt)
- }
- if event.AmtOut != finalAmt {
- t.Fatalf("outgoing amt mismatch: expected %v, got %v",
- event.AmtOut, finalAmt)
- }
- }
-}
-
-// TestUpdateFailMalformedHTLCErrorConversion tests that we're able to properly
-// convert malformed HTLC errors that originate at the direct link, as well as
-// during multi-hop HTLC forwarding.
-func TestUpdateFailMalformedHTLCErrorConversion(t *testing.T) {
- t.Parallel()
-
- // First, we'll create our traditional three hop network.
- channels, cleanUp, _, err := createClusterChannels(
- btcutil.UnitsPerCoin()*3, btcutil.UnitsPerCoin()*5,
- )
- if err != nil {
- t.Fatalf("unable to create channel: %v", err)
- }
- defer cleanUp()
-
- n := newThreeHopNetwork(
- t, channels.aliceToBob, channels.bobToAlice,
- channels.bobToCarol, channels.carolToBob, testStartingHeight,
- )
- if err := n.start(); err != nil {
- t.Fatalf("unable to start three hop network: %v", err)
- }
-
- assertPaymentFailure := func(t *testing.T) {
- // With the decoder modified, we'll now attempt to send a
- // payment from Alice to carol.
- finalAmt := lnwire.NewMSatFromSatoshis(100000)
- htlcAmt, totalTimelock, hops := generateHops(
- finalAmt, testStartingHeight, n.firstBobChannelLink,
- n.carolChannelLink,
- )
- firstHop := n.firstBobChannelLink.ShortChanID()
- _, err = makePayment(
- n.aliceServer, n.carolServer, firstHop, hops, finalAmt,
- htlcAmt, totalTimelock,
- ).Wait(30 * time.Second)
-
- // The payment should fail as Carol is unable to decode the
- // onion blob sent to her.
- if err == nil {
- t.Fatalf("unable to send payment: %v", err)
- }
-
- errr := er.Wrapped(err)
- routingErr := errr.(ClearTextError)
- failureMsg := routingErr.WireMessage()
- if _, ok := failureMsg.(*lnwire.FailInvalidOnionKey); !ok {
- t.Fatalf("expected onion failure instead got: %v",
- routingErr.WireMessage())
- }
- }
-
- t.Run("multi-hop error conversion", func(t *testing.T) {
- // Now that we have our network up, we'll modify the hop
- // iterator for the Bob <-> Carol channel to fail to decode in
- // order to simulate either a replay attack or an issue
- // decoding the onion.
- n.carolOnionDecoder.decodeFail = true
-
- assertPaymentFailure(t)
- })
-
- t.Run("direct channel error conversion", func(t *testing.T) {
- // Similar to the above test case, we'll now make the Alice <->
- // Bob link always fail to decode an onion. This differs from
- // the above test case in that there's no encryption on the
- // error at all since Alice will directly receive a
- // UpdateFailMalformedHTLC message.
- n.bobOnionDecoder.decodeFail = true
-
- assertPaymentFailure(t)
- })
-}
-
-// TestSwitchGetPaymentResult tests that the switch interacts as expected with
-// the circuit map and network result store when looking up the result of a
-// payment ID. This is important for not to lose results under concurrent
-// lookup and receiving results.
-func TestSwitchGetPaymentResult(t *testing.T) {
- t.Parallel()
-
- const paymentID = 123
- var preimg lntypes.Preimage
- preimg[0] = 3
-
- s, err := initSwitchWithDB(testStartingHeight, nil)
- if err != nil {
- t.Fatalf("unable to init switch: %v", err)
- }
- if err := s.Start(); err != nil {
- t.Fatalf("unable to start switch: %v", err)
- }
- defer s.Stop()
-
- lookup := make(chan *PaymentCircuit, 1)
- s.circuits = &mockCircuitMap{
- lookup: lookup,
- }
-
- // If the payment circuit is not found in the circuit map, the payment
- // result must be found in the store if available. Since we haven't
- // added anything to the store yet, ErrPaymentIDNotFound should be
- // returned.
- lookup <- nil
- _, err = s.GetPaymentResult(
- paymentID, lntypes.Hash{}, newMockDeobfuscator(),
- )
- if !ErrPaymentIDNotFound.Is(err) {
- t.Fatalf("expected ErrPaymentIDNotFound, got %v", err)
- }
-
- // Next let the lookup find the circuit in the circuit map. It should
- // subscribe to payment results, and return the result when available.
- lookup <- &PaymentCircuit{}
- resultChan, err := s.GetPaymentResult(
- paymentID, lntypes.Hash{}, newMockDeobfuscator(),
- )
- if err != nil {
- t.Fatalf("unable to get payment result: %v", err)
- }
-
- // Add the result to the store.
- n := &networkResult{
- msg: &lnwire.UpdateFulfillHTLC{
- PaymentPreimage: preimg,
- },
- unencrypted: true,
- isResolution: true,
- }
-
- err = s.networkResults.storeResult(paymentID, n)
- if err != nil {
- t.Fatalf("unable to store result: %v", err)
- }
-
- // The result should be availble.
- select {
- case res, ok := <-resultChan:
- if !ok {
- t.Fatalf("channel was closed")
- }
-
- if res.Error != nil {
- t.Fatalf("got unexpected error result")
- }
-
- if res.Preimage != preimg {
- t.Fatalf("expected preimg %v, got %v",
- preimg, res.Preimage)
- }
-
- case <-time.After(1 * time.Second):
- t.Fatalf("result not received")
- }
-
- // As a final test, try to get the result again. Now that is no longer
- // in the circuit map, it should be immediately available from the
- // store.
- lookup <- nil
- resultChan, err = s.GetPaymentResult(
- paymentID, lntypes.Hash{}, newMockDeobfuscator(),
- )
- if err != nil {
- t.Fatalf("unable to get payment result: %v", err)
- }
-
- select {
- case res, ok := <-resultChan:
- if !ok {
- t.Fatalf("channel was closed")
- }
-
- if res.Error != nil {
- t.Fatalf("got unexpected error result")
- }
-
- if res.Preimage != preimg {
- t.Fatalf("expected preimg %v, got %v",
- preimg, res.Preimage)
- }
-
- case <-time.After(1 * time.Second):
- t.Fatalf("result not received")
- }
-}
-
-// TestInvalidFailure tests that the switch returns an unreadable failure error
-// if the failure cannot be decrypted.
-func TestInvalidFailure(t *testing.T) {
- t.Parallel()
-
- alicePeer, err := newMockServer(
- t, "alice", testStartingHeight, nil, testDefaultDelta,
- )
- if err != nil {
- t.Fatalf("unable to create alice server: %v", err)
- }
-
- s, err := initSwitchWithDB(testStartingHeight, nil)
- if err != nil {
- t.Fatalf("unable to init switch: %v", err)
- }
- if err := s.Start(); err != nil {
- t.Fatalf("unable to start switch: %v", err)
- }
- defer s.Stop()
-
- chanID1, _, aliceChanID, _ := genIDs()
-
- // Set up a mock channel link.
- aliceChannelLink := newMockChannelLink(
- s, chanID1, aliceChanID, alicePeer, true,
- )
- if err := s.AddLink(aliceChannelLink); err != nil {
- t.Fatalf("unable to add link: %v", err)
- }
-
- // Create a request which should be forwarded to the mock channel link.
- preimage, err := genPreimage()
- if err != nil {
- t.Fatalf("unable to generate preimage: %v", err)
- }
- rhash := sha256.Sum256(preimage[:])
- update := &lnwire.UpdateAddHTLC{
- PaymentHash: rhash,
- Amount: 1,
- }
-
- paymentID := uint64(123)
-
- // Send the request.
- err = s.SendHTLC(
- aliceChannelLink.ShortChanID(), paymentID, update,
- )
- if err != nil {
- t.Fatalf("unable to send payment: %v", err)
- }
-
- // Catch the packet and complete the circuit so that the switch is ready
- // for a response.
- select {
- case packet := <-aliceChannelLink.packets:
- if err := aliceChannelLink.completeCircuit(packet); err != nil {
- t.Fatalf("unable to complete payment circuit: %v", err)
- }
-
- case <-time.After(time.Second):
- t.Fatal("request was not propagated to destination")
- }
-
- // Send response packet with an unreadable failure message to the
- // switch. The reason failed is not relevant, because we mock the
- // decryption.
- packet := &htlcPacket{
- outgoingChanID: aliceChannelLink.ShortChanID(),
- outgoingHTLCID: 0,
- amount: 1,
- htlc: &lnwire.UpdateFailHTLC{
- Reason: []byte{1, 2, 3},
- },
- }
-
- if err := s.ForwardPackets(nil, packet); err != nil {
- t.Fatalf("can't forward htlc packet: %v", err)
- }
-
- // Get payment result from switch. We expect an unreadable failure
- // message error.
- deobfuscator := SphinxErrorDecrypter{
- OnionErrorDecrypter: &mockOnionErrorDecryptor{
- err: ErrUnreadableFailureMessage.Default(),
- },
- }
-
- resultChan, err := s.GetPaymentResult(
- paymentID, rhash, &deobfuscator,
- )
- if err != nil {
- t.Fatal(err)
- }
-
- select {
- case result := <-resultChan:
- if !ErrUnreadableFailureMessage.Is(result.Error) {
- t.Fatal("expected unreadable failure message")
- }
-
- case <-time.After(time.Second):
- t.Fatal("err wasn't received")
- }
-
- // Modify the decryption to simulate that decryption went alright, but
- // the failure cannot be decoded.
- deobfuscator = SphinxErrorDecrypter{
- OnionErrorDecrypter: &mockOnionErrorDecryptor{
- sourceIdx: 2,
- message: []byte{200},
- },
- }
-
- resultChan, err = s.GetPaymentResult(
- paymentID, rhash, &deobfuscator,
- )
- if err != nil {
- t.Fatal(err)
- }
-
- select {
- case result := <-resultChan:
- errr := er.Wrapped(result.Error)
- rtErr, ok := errr.(ClearTextError)
- if !ok {
- t.Fatal("expected ClearTextError")
- }
- source, ok := rtErr.(*ForwardingError)
- if !ok {
- t.Fatalf("expected forwarding error, got: %T", rtErr)
- }
- if source.FailureSourceIdx != 2 {
- t.Fatal("unexpected error source index")
- }
- if rtErr.WireMessage() != nil {
- t.Fatal("expected empty failure message")
- }
-
- case <-time.After(time.Second):
- t.Fatal("err wasn't received")
- }
-}
-
-// htlcNotifierEvents is a function that generates a set of expected htlc
-// notifier evetns for each node in a three hop network with the dynamic
-// values provided. These functions take dynamic values so that changes to
-// external systems (such as our default timelock delta) do not break
-// these tests.
-type htlcNotifierEvents func(channels *clusterChannels, htlcID uint64,
- ts time.Time, htlc *lnwire.UpdateAddHTLC,
- hops []*hop.Payload) ([]interface{}, []interface{}, []interface{})
-
-// TestHtlcNotifier tests the notifying of htlc events that are routed over a
-// three hop network. It sets up an Alice -> Bob -> Carol network and routes
-// payments from Alice -> Carol to test events from the perspective of a
-// sending (Alice), forwarding (Bob) and receiving (Carol) node. Test cases
-// are present for saduccessful and failed payments.
-func TestHtlcNotifier(t *testing.T) {
- tests := []struct {
- name string
-
- // Options is a set of options to apply to the three hop
- // network's servers.
- options []serverOption
-
- // expectedEvents is a function which returns an expected set
- // of events for the test.
- expectedEvents htlcNotifierEvents
-
- // iterations is the number of times we will send a payment,
- // this is used to send more than one payment to force non-
- // zero htlc indexes to make sure we aren't just checking
- // default values.
- iterations int
- }{
- {
- name: "successful three hop payment",
- options: nil,
- expectedEvents: func(channels *clusterChannels,
- htlcID uint64, ts time.Time,
- htlc *lnwire.UpdateAddHTLC,
- hops []*hop.Payload) ([]interface{},
- []interface{}, []interface{}) {
-
- return getThreeHopEvents(
- channels, htlcID, ts, htlc, hops, nil,
- )
- },
- iterations: 2,
- },
- {
- name: "failed at forwarding link",
- // Set a functional option which disables bob as a
- // forwarding node to force a payment error.
- options: []serverOption{
- serverOptionRejectHtlc(false, true, false),
- },
- expectedEvents: func(channels *clusterChannels,
- htlcID uint64, ts time.Time,
- htlc *lnwire.UpdateAddHTLC,
- hops []*hop.Payload) ([]interface{},
- []interface{}, []interface{}) {
-
- return getThreeHopEvents(
- channels, htlcID, ts, htlc, hops,
- &LinkError{
- msg: &lnwire.FailChannelDisabled{},
- FailureDetail: OutgoingFailureForwardsDisabled,
- },
- )
- },
- iterations: 1,
- },
- }
-
- for _, test := range tests {
- test := test
-
- t.Run(test.name, func(t *testing.T) {
- testHtcNotifier(
- t, test.options, test.iterations,
- test.expectedEvents,
- )
- })
- }
-}
-
-// testHtcNotifier runs a htlc notifier test.
-func testHtcNotifier(t *testing.T, testOpts []serverOption, iterations int,
- getEvents htlcNotifierEvents) {
-
- t.Parallel()
-
- // First, we'll create our traditional three hop
- // network.
- channels, cleanUp, _, err := createClusterChannels(
- btcutil.UnitsPerCoin()*3,
- btcutil.UnitsPerCoin()*5)
- if err != nil {
- t.Fatalf("unable to create channel: %v", err)
- }
- defer cleanUp()
-
- // Mock time so that all events are reported with a static timestamp.
- now := time.Now()
- mockTime := func() time.Time {
- return now
- }
-
- // Create htlc notifiers for each server in the three hop network and
- // start them.
- aliceNotifier := NewHtlcNotifier(mockTime)
- if err := aliceNotifier.Start(); err != nil {
- t.Fatalf("could not start alice notifier")
- }
- defer aliceNotifier.Stop()
-
- bobNotifier := NewHtlcNotifier(mockTime)
- if err := bobNotifier.Start(); err != nil {
- t.Fatalf("could not start bob notifier")
- }
- defer bobNotifier.Stop()
-
- carolNotifier := NewHtlcNotifier(mockTime)
- if err := carolNotifier.Start(); err != nil {
- t.Fatalf("could not start carol notifier")
- }
- defer carolNotifier.Stop()
-
- // Create a notifier server option which will set our htlc notifiers
- // for the three hop network.
- notifierOption := serverOptionWithHtlcNotifier(
- aliceNotifier, bobNotifier, carolNotifier,
- )
-
- // Add the htlcNotifier option to any other options
- // set in the test.
- options := append(testOpts, notifierOption)
-
- n := newThreeHopNetwork(
- t, channels.aliceToBob,
- channels.bobToAlice, channels.bobToCarol,
- channels.carolToBob, testStartingHeight,
- options...,
- )
- if err := n.start(); err != nil {
- t.Fatalf("unable to start three hop "+
- "network: %v", err)
- }
- defer n.stop()
-
- // Before we forward anything, subscribe to htlc events
- // from each notifier.
- aliceEvents, err := aliceNotifier.SubscribeHtlcEvents()
- if err != nil {
- t.Fatalf("could not subscribe to alice's"+
- " events: %v", err)
- }
- defer aliceEvents.Cancel()
-
- bobEvents, err := bobNotifier.SubscribeHtlcEvents()
- if err != nil {
- t.Fatalf("could not subscribe to bob's"+
- " events: %v", err)
- }
- defer bobEvents.Cancel()
-
- carolEvents, err := carolNotifier.SubscribeHtlcEvents()
- if err != nil {
- t.Fatalf("could not subscribe to carol's"+
- " events: %v", err)
- }
- defer carolEvents.Cancel()
-
- // Send multiple payments, as specified by the test to test incrementing
- // of htlc ids.
- for i := 0; i < iterations; i++ {
- // We'll start off by making a payment from
- // Alice -> Bob -> Carol.
- htlc, hops := n.sendThreeHopPayment(t)
-
- alice, bob, carol := getEvents(
- channels, uint64(i), now, htlc, hops,
- )
-
- checkHtlcEvents(t, aliceEvents.Updates(), alice)
- checkHtlcEvents(t, bobEvents.Updates(), bob)
- checkHtlcEvents(t, carolEvents.Updates(), carol)
-
- }
-}
-
-// checkHtlcEvents checks that a subscription has the set of htlc events
-// we expect it to have.
-func checkHtlcEvents(t *testing.T, events <-chan interface{},
- expectedEvents []interface{}) {
-
- t.Helper()
-
- for _, expected := range expectedEvents {
- select {
- case event := <-events:
- if !reflect.DeepEqual(event, expected) {
- t.Fatalf("expected %v, got: %v", expected,
- event)
- }
-
- case <-time.After(5 * time.Second):
- t.Fatalf("expected event: %v", expected)
- }
- }
-}
-
-// sendThreeHopPayment is a helper function which sends a payment over
-// Alice -> Bob -> Carol in a three hop network and returns Alice's first htlc
-// and the remainder of the hops.
-func (n *threeHopNetwork) sendThreeHopPayment(t *testing.T) (*lnwire.UpdateAddHTLC,
- []*hop.Payload) {
-
- amount := lnwire.NewMSatFromSatoshis(btcutil.UnitsPerCoin())
-
- htlcAmt, totalTimelock, hops := generateHops(amount, testStartingHeight,
- n.firstBobChannelLink, n.carolChannelLink)
- blob, err := generateRoute(hops...)
- if err != nil {
- t.Fatal(err)
- }
- invoice, htlc, pid, err := generatePayment(
- amount, htlcAmt, totalTimelock, blob,
- )
- if err != nil {
- t.Fatal(err)
- }
-
- err = n.carolServer.registry.AddInvoice(*invoice, htlc.PaymentHash)
- if err != nil {
- t.Fatalf("unable to add invoice in carol registry: %v", err)
- }
-
- if err := n.aliceServer.htlcSwitch.SendHTLC(
- n.firstBobChannelLink.ShortChanID(), pid, htlc,
- ); err != nil {
- t.Fatalf("could not send htlc")
- }
-
- return htlc, hops
-}
-
-// getThreeHopEvents gets the set of htlc events that we expect for a payment
-// from Alice -> Bob -> Carol. If a non-nil link error is provided, the set
-// of events will fail on Bob's outgoing link.
-func getThreeHopEvents(channels *clusterChannels, htlcID uint64,
- ts time.Time, htlc *lnwire.UpdateAddHTLC, hops []*hop.Payload,
- linkError *LinkError) ([]interface{}, []interface{}, []interface{}) {
-
- aliceKey := HtlcKey{
- IncomingCircuit: zeroCircuit,
- OutgoingCircuit: channeldb.CircuitKey{
- ChanID: channels.aliceToBob.ShortChanID(),
- HtlcID: htlcID,
- },
- }
-
- // Alice always needs a forwarding event because she initiates the
- // send.
- aliceEvents := []interface{}{
- &ForwardingEvent{
- HtlcKey: aliceKey,
- HtlcInfo: HtlcInfo{
- OutgoingTimeLock: htlc.Expiry,
- OutgoingAmt: htlc.Amount,
- },
- HtlcEventType: HtlcEventTypeSend,
- Timestamp: ts,
- },
- }
-
- bobKey := HtlcKey{
- IncomingCircuit: channeldb.CircuitKey{
- ChanID: channels.bobToAlice.ShortChanID(),
- HtlcID: htlcID,
- },
- OutgoingCircuit: channeldb.CircuitKey{
- ChanID: channels.bobToCarol.ShortChanID(),
- HtlcID: htlcID,
- },
- }
-
- bobInfo := HtlcInfo{
- IncomingTimeLock: htlc.Expiry,
- IncomingAmt: htlc.Amount,
- OutgoingTimeLock: hops[1].FwdInfo.OutgoingCTLV,
- OutgoingAmt: hops[1].FwdInfo.AmountToForward,
- }
-
- // If we expect the payment to fail, we add failures for alice and
- // bob, and no events for carol because the payment never reaches her.
- if linkError != nil {
- aliceEvents = append(aliceEvents,
- &ForwardingFailEvent{
- HtlcKey: aliceKey,
- HtlcEventType: HtlcEventTypeSend,
- Timestamp: ts,
- },
- )
-
- bobEvents := []interface{}{
- &LinkFailEvent{
- HtlcKey: bobKey,
- HtlcInfo: bobInfo,
- HtlcEventType: HtlcEventTypeForward,
- LinkError: linkError,
- Incoming: false,
- Timestamp: ts,
- },
- }
-
- return aliceEvents, bobEvents, nil
- }
-
- // If we want to get events for a successful payment, we add a settle
- // for alice, a forward and settle for bob and a receive settle for
- // carol.
- aliceEvents = append(
- aliceEvents,
- &SettleEvent{
- HtlcKey: aliceKey,
- HtlcEventType: HtlcEventTypeSend,
- Timestamp: ts,
- },
- )
-
- bobEvents := []interface{}{
- &ForwardingEvent{
- HtlcKey: bobKey,
- HtlcInfo: bobInfo,
- HtlcEventType: HtlcEventTypeForward,
- Timestamp: ts,
- },
- &SettleEvent{
- HtlcKey: bobKey,
- HtlcEventType: HtlcEventTypeForward,
- Timestamp: ts,
- },
- }
-
- carolEvents := []interface{}{
- &SettleEvent{
- HtlcKey: HtlcKey{
- IncomingCircuit: channeldb.CircuitKey{
- ChanID: channels.carolToBob.ShortChanID(),
- HtlcID: htlcID,
- },
- OutgoingCircuit: zeroCircuit,
- },
- HtlcEventType: HtlcEventTypeReceive,
- Timestamp: ts,
- },
- }
-
- return aliceEvents, bobEvents, carolEvents
-}
-
-type mockForwardInterceptor struct {
- intercepted InterceptedForward
-}
-
-func (m *mockForwardInterceptor) InterceptForwardHtlc(intercepted InterceptedForward) bool {
-
- m.intercepted = intercepted
- return true
-}
-
-func (m *mockForwardInterceptor) settle(preimage lntypes.Preimage) er.R {
- return m.intercepted.Settle(preimage)
-}
-
-func (m *mockForwardInterceptor) fail() er.R {
- return m.intercepted.Fail()
-}
-
-func (m *mockForwardInterceptor) resume() er.R {
- return m.intercepted.Resume()
-}
-
-func assertNumCircuits(t *testing.T, s *Switch, pending, opened int) {
- if s.circuits.NumPending() != pending {
- t.Fatal("wrong amount of half circuits")
- }
- if s.circuits.NumOpen() != opened {
- t.Fatal("wrong amount of circuits")
- }
-}
-
-func assertOutgoingLinkReceive(t *testing.T, targetLink *mockChannelLink,
- expectReceive bool) {
-
- // Pull packet from targetLink link.
- select {
- case packet := <-targetLink.packets:
- if !expectReceive {
- t.Fatal("forward was intercepted, shouldn't land at bob link")
- } else if err := targetLink.completeCircuit(packet); err != nil {
- t.Fatalf("unable to complete payment circuit: %v", err)
- }
-
- case <-time.After(time.Second):
- if expectReceive {
- t.Fatal("request was not propagated to destination")
- }
- }
-}
-
-func TestSwitchHoldForward(t *testing.T) {
- t.Parallel()
-
- chanID1, chanID2, aliceChanID, bobChanID := genIDs()
-
- alicePeer, err := newMockServer(
- t, "alice", testStartingHeight, nil, testDefaultDelta,
- )
- if err != nil {
- t.Fatalf("unable to create alice server: %v", err)
- }
- bobPeer, err := newMockServer(
- t, "bob", testStartingHeight, nil, testDefaultDelta,
- )
- if err != nil {
- t.Fatalf("unable to create bob server: %v", err)
- }
-
- tempPath, errr := ioutil.TempDir("", "circuitdb")
- if errr != nil {
- t.Fatalf("unable to temporary path: %v", errr)
- }
-
- cdb, err := channeldb.Open(tempPath)
- if err != nil {
- t.Fatalf("unable to open channeldb: %v", err)
- }
-
- s, err := initSwitchWithDB(testStartingHeight, cdb)
- if err != nil {
- t.Fatalf("unable to init switch: %v", err)
- }
- if err := s.Start(); err != nil {
- t.Fatalf("unable to start switch: %v", err)
- }
-
- defer func() {
- if err := s.Stop(); err != nil {
- t.Fatalf(err.String())
- }
- }()
-
- aliceChannelLink := newMockChannelLink(
- s, chanID1, aliceChanID, alicePeer, true,
- )
- bobChannelLink := newMockChannelLink(
- s, chanID2, bobChanID, bobPeer, true,
- )
- if err := s.AddLink(aliceChannelLink); err != nil {
- t.Fatalf("unable to add alice link: %v", err)
- }
- if err := s.AddLink(bobChannelLink); err != nil {
- t.Fatalf("unable to add bob link: %v", err)
- }
-
- // Create request which should be forwarded from Alice channel link to
- // bob channel link.
- preimage := [sha256.Size]byte{1}
- rhash := sha256.Sum256(preimage[:])
- ogPacket := &htlcPacket{
- incomingChanID: aliceChannelLink.ShortChanID(),
- incomingHTLCID: 0,
- outgoingChanID: bobChannelLink.ShortChanID(),
- obfuscator: NewMockObfuscator(),
- htlc: &lnwire.UpdateAddHTLC{
- PaymentHash: rhash,
- Amount: 1,
- },
- }
-
- forwardInterceptor := &mockForwardInterceptor{}
- switchForwardInterceptor := NewInterceptableSwitch(s)
- switchForwardInterceptor.SetInterceptor(forwardInterceptor.InterceptForwardHtlc)
- linkQuit := make(chan struct{})
-
- // Test resume a hold forward
- assertNumCircuits(t, s, 0, 0)
- if err := switchForwardInterceptor.ForwardPackets(linkQuit, ogPacket); err != nil {
- t.Fatalf("can't forward htlc packet: %v", err)
- }
- assertNumCircuits(t, s, 0, 0)
- assertOutgoingLinkReceive(t, bobChannelLink, false)
-
- if err := forwardInterceptor.resume(); err != nil {
- t.Fatalf("failed to resume forward")
- }
- assertOutgoingLinkReceive(t, bobChannelLink, true)
- assertNumCircuits(t, s, 1, 1)
-
- // settling the htlc to close the circuit.
- settle := &htlcPacket{
- outgoingChanID: bobChannelLink.ShortChanID(),
- outgoingHTLCID: 0,
- amount: 1,
- htlc: &lnwire.UpdateFulfillHTLC{
- PaymentPreimage: preimage,
- },
- }
- if err := switchForwardInterceptor.ForwardPackets(linkQuit, settle); err != nil {
- t.Fatalf("can't forward htlc packet: %v", err)
- }
- assertOutgoingLinkReceive(t, aliceChannelLink, true)
- assertNumCircuits(t, s, 0, 0)
-
- // Test failing a hold forward
- if err := switchForwardInterceptor.ForwardPackets(linkQuit, ogPacket); err != nil {
- t.Fatalf("can't forward htlc packet: %v", err)
- }
- assertNumCircuits(t, s, 0, 0)
- assertOutgoingLinkReceive(t, bobChannelLink, false)
-
- if err := forwardInterceptor.fail(); err != nil {
- t.Fatalf("failed to cancel forward %v", err)
- }
- assertOutgoingLinkReceive(t, bobChannelLink, false)
- assertOutgoingLinkReceive(t, aliceChannelLink, true)
- assertNumCircuits(t, s, 0, 0)
-
- // Test settling a hold forward
- if err := switchForwardInterceptor.ForwardPackets(linkQuit, ogPacket); err != nil {
- t.Fatalf("can't forward htlc packet: %v", err)
- }
- assertNumCircuits(t, s, 0, 0)
- assertOutgoingLinkReceive(t, bobChannelLink, false)
-
- if err := forwardInterceptor.settle(preimage); err != nil {
- t.Fatal("failed to cancel forward")
- }
- assertOutgoingLinkReceive(t, bobChannelLink, false)
- assertOutgoingLinkReceive(t, aliceChannelLink, true)
- assertNumCircuits(t, s, 0, 0)
-}
diff --git a/lnd/htlcswitch/test_utils.go b/lnd/htlcswitch/test_utils.go
deleted file mode 100644
index 2a704a86..00000000
--- a/lnd/htlcswitch/test_utils.go
+++ /dev/null
@@ -1,1425 +0,0 @@
-package htlcswitch
-
-import (
- "bytes"
- crand "crypto/rand"
- "crypto/sha256"
- "encoding/binary"
- "io/ioutil"
- "math/big"
- "net"
- "os"
- "runtime"
- "runtime/pprof"
- "sync/atomic"
- "testing"
- "time"
-
- "github.com/pkt-cash/pktd/btcec"
- "github.com/pkt-cash/pktd/btcutil"
- "github.com/pkt-cash/pktd/btcutil/er"
- "github.com/pkt-cash/pktd/chaincfg/chainhash"
- sphinx "github.com/pkt-cash/pktd/lightning-onion"
- "github.com/pkt-cash/pktd/lnd/channeldb"
- "github.com/pkt-cash/pktd/lnd/channeldb/kvdb"
- "github.com/pkt-cash/pktd/lnd/contractcourt"
- "github.com/pkt-cash/pktd/lnd/htlcswitch/hop"
- "github.com/pkt-cash/pktd/lnd/input"
- "github.com/pkt-cash/pktd/lnd/keychain"
- "github.com/pkt-cash/pktd/lnd/lnpeer"
- "github.com/pkt-cash/pktd/lnd/lntest/mock"
- "github.com/pkt-cash/pktd/lnd/lntest/wait"
- "github.com/pkt-cash/pktd/lnd/lntypes"
- "github.com/pkt-cash/pktd/lnd/lnwallet"
- "github.com/pkt-cash/pktd/lnd/lnwallet/chainfee"
- "github.com/pkt-cash/pktd/lnd/lnwire"
- "github.com/pkt-cash/pktd/lnd/shachain"
- "github.com/pkt-cash/pktd/lnd/ticker"
- "github.com/pkt-cash/pktd/wire"
-)
-
-var (
- alicePrivKey = []byte("alice priv key")
- bobPrivKey = []byte("bob priv key")
- carolPrivKey = []byte("carol priv key")
-
- testSig = &btcec.Signature{
- R: new(big.Int),
- S: new(big.Int),
- }
- wireSig, _ = lnwire.NewSigFromSignature(testSig)
-
- _, _ = testSig.R.SetString("6372440660162918006277497454296753625158993"+
- "5445068131219452686511677818569431", 10)
- _, _ = testSig.S.SetString("1880105606924982582529128710493133386286603"+
- "3135609736119018462340006816851118", 10)
-
- // testTx is used as the default funding txn for single-funder channels.
- testTx = &wire.MsgTx{
- Version: 1,
- TxIn: []*wire.TxIn{
- {
- PreviousOutPoint: wire.OutPoint{
- Hash: chainhash.Hash{},
- Index: 0xffffffff,
- },
- SignatureScript: []byte{0x04, 0x31, 0xdc, 0x00, 0x1b, 0x01, 0x62},
- Sequence: 0xffffffff,
- },
- },
- TxOut: []*wire.TxOut{
- {
- Value: 5000000000,
- PkScript: []byte{
- 0x41, // OP_DATA_65
- 0x04, 0xd6, 0x4b, 0xdf, 0xd0, 0x9e, 0xb1, 0xc5,
- 0xfe, 0x29, 0x5a, 0xbd, 0xeb, 0x1d, 0xca, 0x42,
- 0x81, 0xbe, 0x98, 0x8e, 0x2d, 0xa0, 0xb6, 0xc1,
- 0xc6, 0xa5, 0x9d, 0xc2, 0x26, 0xc2, 0x86, 0x24,
- 0xe1, 0x81, 0x75, 0xe8, 0x51, 0xc9, 0x6b, 0x97,
- 0x3d, 0x81, 0xb0, 0x1c, 0xc3, 0x1f, 0x04, 0x78,
- 0x34, 0xbc, 0x06, 0xd6, 0xd6, 0xed, 0xf6, 0x20,
- 0xd1, 0x84, 0x24, 0x1a, 0x6a, 0xed, 0x8b, 0x63,
- 0xa6, // 65-byte signature
- 0xac, // OP_CHECKSIG
- },
- },
- },
- LockTime: 5,
- }
-
- testBatchTimeout = 50 * time.Millisecond
-)
-
-var idSeqNum uint64
-
-// genID generates a unique tuple to identify a test channel.
-func genID() (lnwire.ChannelID, lnwire.ShortChannelID) {
- id := atomic.AddUint64(&idSeqNum, 1)
-
- var scratch [8]byte
-
- binary.BigEndian.PutUint64(scratch[:], id)
- hash1, _ := chainhash.NewHash(bytes.Repeat(scratch[:], 4))
-
- chanPoint1 := wire.NewOutPoint(hash1, uint32(id))
- chanID1 := lnwire.NewChanIDFromOutPoint(chanPoint1)
- aliceChanID := lnwire.NewShortChanIDFromInt(id)
-
- return chanID1, aliceChanID
-}
-
-// genIDs generates ids for two test channels.
-func genIDs() (lnwire.ChannelID, lnwire.ChannelID, lnwire.ShortChannelID,
- lnwire.ShortChannelID) {
-
- chanID1, aliceChanID := genID()
- chanID2, bobChanID := genID()
-
- return chanID1, chanID2, aliceChanID, bobChanID
-}
-
-// mockGetChanUpdateMessage helper function which returns topology update of
-// the channel
-func mockGetChanUpdateMessage(cid lnwire.ShortChannelID) (*lnwire.ChannelUpdate, er.R) {
- return &lnwire.ChannelUpdate{
- Signature: wireSig,
- }, nil
-}
-
-// generateRandomBytes returns securely generated random bytes.
-// It will return an error if the system's secure random
-// number generator fails to function correctly, in which
-// case the caller should not continue.
-func generateRandomBytes(n int) ([]byte, er.R) {
- b := make([]byte, n)
-
- // TODO(roasbeef): should use counter in tests (atomic) rather than
- // this
-
- _, err := crand.Read(b)
- // Note that Err == nil only if we read len(b) bytes.
- if err != nil {
- return nil, er.E(err)
- }
-
- return b, nil
-}
-
-type testLightningChannel struct {
- channel *lnwallet.LightningChannel
- restore func() (*lnwallet.LightningChannel, er.R)
-}
-
-// createTestChannel creates the channel and returns our and remote channels
-// representations.
-//
-// TODO(roasbeef): need to factor out, similar func re-used in many parts of codebase
-func createTestChannel(alicePrivKey, bobPrivKey []byte,
- aliceAmount, bobAmount, aliceReserve, bobReserve btcutil.Amount,
- chanID lnwire.ShortChannelID) (*testLightningChannel,
- *testLightningChannel, func(), er.R) {
-
- aliceKeyPriv, aliceKeyPub := btcec.PrivKeyFromBytes(btcec.S256(), alicePrivKey)
- bobKeyPriv, bobKeyPub := btcec.PrivKeyFromBytes(btcec.S256(), bobPrivKey)
-
- channelCapacity := aliceAmount + bobAmount
- csvTimeoutAlice := uint32(5)
- csvTimeoutBob := uint32(4)
-
- aliceConstraints := &channeldb.ChannelConstraints{
- DustLimit: btcutil.Amount(200),
- MaxPendingAmount: lnwire.NewMSatFromSatoshis(
- channelCapacity),
- ChanReserve: aliceReserve,
- MinHTLC: 0,
- MaxAcceptedHtlcs: input.MaxHTLCNumber / 2,
- CsvDelay: uint16(csvTimeoutAlice),
- }
-
- bobConstraints := &channeldb.ChannelConstraints{
- DustLimit: btcutil.Amount(800),
- MaxPendingAmount: lnwire.NewMSatFromSatoshis(
- channelCapacity),
- ChanReserve: bobReserve,
- MinHTLC: 0,
- MaxAcceptedHtlcs: input.MaxHTLCNumber / 2,
- CsvDelay: uint16(csvTimeoutBob),
- }
-
- var hash [sha256.Size]byte
- randomSeed, err := generateRandomBytes(sha256.Size)
- if err != nil {
- return nil, nil, nil, err
- }
- copy(hash[:], randomSeed)
-
- prevOut := &wire.OutPoint{
- Hash: chainhash.Hash(hash),
- Index: 0,
- }
- fundingTxIn := wire.NewTxIn(prevOut, nil, nil)
-
- aliceCfg := channeldb.ChannelConfig{
- ChannelConstraints: *aliceConstraints,
- MultiSigKey: keychain.KeyDescriptor{
- PubKey: aliceKeyPub,
- },
- RevocationBasePoint: keychain.KeyDescriptor{
- PubKey: aliceKeyPub,
- },
- PaymentBasePoint: keychain.KeyDescriptor{
- PubKey: aliceKeyPub,
- },
- DelayBasePoint: keychain.KeyDescriptor{
- PubKey: aliceKeyPub,
- },
- HtlcBasePoint: keychain.KeyDescriptor{
- PubKey: aliceKeyPub,
- },
- }
- bobCfg := channeldb.ChannelConfig{
- ChannelConstraints: *bobConstraints,
- MultiSigKey: keychain.KeyDescriptor{
- PubKey: bobKeyPub,
- },
- RevocationBasePoint: keychain.KeyDescriptor{
- PubKey: bobKeyPub,
- },
- PaymentBasePoint: keychain.KeyDescriptor{
- PubKey: bobKeyPub,
- },
- DelayBasePoint: keychain.KeyDescriptor{
- PubKey: bobKeyPub,
- },
- HtlcBasePoint: keychain.KeyDescriptor{
- PubKey: bobKeyPub,
- },
- }
-
- bobRoot, err := chainhash.NewHash(bobKeyPriv.Serialize())
- if err != nil {
- return nil, nil, nil, err
- }
- bobPreimageProducer := shachain.NewRevocationProducer(*bobRoot)
- bobFirstRevoke, err := bobPreimageProducer.AtIndex(0)
- if err != nil {
- return nil, nil, nil, err
- }
- bobCommitPoint := input.ComputeCommitmentPoint(bobFirstRevoke[:])
-
- aliceRoot, err := chainhash.NewHash(aliceKeyPriv.Serialize())
- if err != nil {
- return nil, nil, nil, err
- }
- alicePreimageProducer := shachain.NewRevocationProducer(*aliceRoot)
- aliceFirstRevoke, err := alicePreimageProducer.AtIndex(0)
- if err != nil {
- return nil, nil, nil, err
- }
- aliceCommitPoint := input.ComputeCommitmentPoint(aliceFirstRevoke[:])
-
- aliceCommitTx, bobCommitTx, err := lnwallet.CreateCommitmentTxns(
- aliceAmount, bobAmount, &aliceCfg, &bobCfg, aliceCommitPoint,
- bobCommitPoint, *fundingTxIn, channeldb.SingleFunderTweaklessBit,
- )
- if err != nil {
- return nil, nil, nil, err
- }
-
- alicePath, errr := ioutil.TempDir("", "alicedb")
- if errr != nil {
- return nil, nil, nil, er.E(errr)
- }
-
- dbAlice, err := channeldb.Open(alicePath)
- if err != nil {
- return nil, nil, nil, err
- }
-
- bobPath, errr := ioutil.TempDir("", "bobdb")
- if errr != nil {
- return nil, nil, nil, er.E(errr)
- }
-
- dbBob, err := channeldb.Open(bobPath)
- if err != nil {
- return nil, nil, nil, err
- }
-
- estimator := chainfee.NewStaticEstimator(6000, 0)
- feePerKw, err := estimator.EstimateFeePerKW(1)
- if err != nil {
- return nil, nil, nil, err
- }
- commitFee := feePerKw.FeeForWeight(724)
-
- const broadcastHeight = 1
- bobAddr := &net.TCPAddr{
- IP: net.ParseIP("127.0.0.1"),
- Port: 18555,
- }
-
- aliceAddr := &net.TCPAddr{
- IP: net.ParseIP("127.0.0.1"),
- Port: 18556,
- }
-
- aliceCommit := channeldb.ChannelCommitment{
- CommitHeight: 0,
- LocalBalance: lnwire.NewMSatFromSatoshis(aliceAmount - commitFee),
- RemoteBalance: lnwire.NewMSatFromSatoshis(bobAmount),
- CommitFee: commitFee,
- FeePerKw: btcutil.Amount(feePerKw),
- CommitTx: aliceCommitTx,
- CommitSig: bytes.Repeat([]byte{1}, 71),
- }
- bobCommit := channeldb.ChannelCommitment{
- CommitHeight: 0,
- LocalBalance: lnwire.NewMSatFromSatoshis(bobAmount),
- RemoteBalance: lnwire.NewMSatFromSatoshis(aliceAmount - commitFee),
- CommitFee: commitFee,
- FeePerKw: btcutil.Amount(feePerKw),
- CommitTx: bobCommitTx,
- CommitSig: bytes.Repeat([]byte{1}, 71),
- }
-
- aliceChannelState := &channeldb.OpenChannel{
- LocalChanCfg: aliceCfg,
- RemoteChanCfg: bobCfg,
- IdentityPub: aliceKeyPub,
- FundingOutpoint: *prevOut,
- ChanType: channeldb.SingleFunderTweaklessBit,
- IsInitiator: true,
- Capacity: channelCapacity,
- RemoteCurrentRevocation: bobCommitPoint,
- RevocationProducer: alicePreimageProducer,
- RevocationStore: shachain.NewRevocationStore(),
- LocalCommitment: aliceCommit,
- RemoteCommitment: aliceCommit,
- ShortChannelID: chanID,
- Db: dbAlice,
- Packager: channeldb.NewChannelPackager(chanID),
- FundingTxn: testTx,
- }
-
- bobChannelState := &channeldb.OpenChannel{
- LocalChanCfg: bobCfg,
- RemoteChanCfg: aliceCfg,
- IdentityPub: bobKeyPub,
- FundingOutpoint: *prevOut,
- ChanType: channeldb.SingleFunderTweaklessBit,
- IsInitiator: false,
- Capacity: channelCapacity,
- RemoteCurrentRevocation: aliceCommitPoint,
- RevocationProducer: bobPreimageProducer,
- RevocationStore: shachain.NewRevocationStore(),
- LocalCommitment: bobCommit,
- RemoteCommitment: bobCommit,
- ShortChannelID: chanID,
- Db: dbBob,
- Packager: channeldb.NewChannelPackager(chanID),
- }
-
- if err := aliceChannelState.SyncPending(bobAddr, broadcastHeight); err != nil {
- return nil, nil, nil, err
- }
-
- if err := bobChannelState.SyncPending(aliceAddr, broadcastHeight); err != nil {
- return nil, nil, nil, err
- }
-
- cleanUpFunc := func() {
- dbAlice.Close()
- dbBob.Close()
- os.RemoveAll(bobPath)
- os.RemoveAll(alicePath)
- }
-
- aliceSigner := &mock.SingleSigner{Privkey: aliceKeyPriv}
- bobSigner := &mock.SingleSigner{Privkey: bobKeyPriv}
-
- alicePool := lnwallet.NewSigPool(runtime.NumCPU(), aliceSigner)
- channelAlice, err := lnwallet.NewLightningChannel(
- aliceSigner, aliceChannelState, alicePool,
- )
- if err != nil {
- return nil, nil, nil, err
- }
- alicePool.Start()
-
- bobPool := lnwallet.NewSigPool(runtime.NumCPU(), bobSigner)
- channelBob, err := lnwallet.NewLightningChannel(
- bobSigner, bobChannelState, bobPool,
- )
- if err != nil {
- return nil, nil, nil, err
- }
- bobPool.Start()
-
- // Now that the channel are open, simulate the start of a session by
- // having Alice and Bob extend their revocation windows to each other.
- aliceNextRevoke, err := channelAlice.NextRevocationKey()
- if err != nil {
- return nil, nil, nil, err
- }
- if err := channelBob.InitNextRevocation(aliceNextRevoke); err != nil {
- return nil, nil, nil, err
- }
-
- bobNextRevoke, err := channelBob.NextRevocationKey()
- if err != nil {
- return nil, nil, nil, err
- }
- if err := channelAlice.InitNextRevocation(bobNextRevoke); err != nil {
- return nil, nil, nil, err
- }
-
- restoreAlice := func() (*lnwallet.LightningChannel, er.R) {
- aliceStoredChannels, err := dbAlice.FetchOpenChannels(aliceKeyPub)
- switch {
- case err == nil:
- case kvdb.ErrDatabaseNotOpen.Is(err):
- dbAlice, err = channeldb.Open(dbAlice.Path())
- if err != nil {
- return nil, er.Errorf("unable to reopen alice "+
- "db: %v", err)
- }
-
- aliceStoredChannels, err = dbAlice.FetchOpenChannels(aliceKeyPub)
- if err != nil {
- return nil, er.Errorf("unable to fetch alice "+
- "channel: %v", err)
- }
- default:
- return nil, er.Errorf("unable to fetch alice channel: "+
- "%v", err)
- }
-
- var aliceStoredChannel *channeldb.OpenChannel
- for _, channel := range aliceStoredChannels {
- if channel.FundingOutpoint.String() == prevOut.String() {
- aliceStoredChannel = channel
- break
- }
- }
-
- if aliceStoredChannel == nil {
- return nil, er.New("unable to find stored alice channel")
- }
-
- newAliceChannel, errr := lnwallet.NewLightningChannel(
- aliceSigner, aliceStoredChannel, alicePool,
- )
- if errr != nil {
- return nil, er.Errorf("unable to create new channel: %v",
- errr)
- }
-
- return newAliceChannel, nil
- }
-
- restoreBob := func() (*lnwallet.LightningChannel, er.R) {
- bobStoredChannels, err := dbBob.FetchOpenChannels(bobKeyPub)
- switch {
- case err == nil:
- case kvdb.ErrDatabaseNotOpen.Is(err):
- dbBob, errr := channeldb.Open(dbBob.Path())
- if errr != nil {
- return nil, er.Errorf("unable to reopen bob "+
- "db: %v", errr)
- }
-
- bobStoredChannels, err = dbBob.FetchOpenChannels(bobKeyPub)
- if err != nil {
- return nil, er.Errorf("unable to fetch bob "+
- "channel: %v", err)
- }
- default:
- return nil, er.Errorf("unable to fetch bob channel: "+
- "%v", err)
- }
-
- var bobStoredChannel *channeldb.OpenChannel
- for _, channel := range bobStoredChannels {
- if channel.FundingOutpoint.String() == prevOut.String() {
- bobStoredChannel = channel
- break
- }
- }
-
- if bobStoredChannel == nil {
- return nil, er.New("unable to find stored bob channel")
- }
-
- newBobChannel, errr := lnwallet.NewLightningChannel(
- bobSigner, bobStoredChannel, bobPool,
- )
- if errr != nil {
- return nil, er.Errorf("unable to create new channel: %v",
- errr)
- }
- return newBobChannel, nil
- }
-
- testLightningChannelAlice := &testLightningChannel{
- channel: channelAlice,
- restore: restoreAlice,
- }
-
- testLightningChannelBob := &testLightningChannel{
- channel: channelBob,
- restore: restoreBob,
- }
-
- return testLightningChannelAlice, testLightningChannelBob, cleanUpFunc,
- nil
-}
-
-// getChanID retrieves the channel point from an lnnwire message.
-func getChanID(msg lnwire.Message) (lnwire.ChannelID, er.R) {
- var chanID lnwire.ChannelID
- switch msg := msg.(type) {
- case *lnwire.UpdateAddHTLC:
- chanID = msg.ChanID
- case *lnwire.UpdateFulfillHTLC:
- chanID = msg.ChanID
- case *lnwire.UpdateFailHTLC:
- chanID = msg.ChanID
- case *lnwire.RevokeAndAck:
- chanID = msg.ChanID
- case *lnwire.CommitSig:
- chanID = msg.ChanID
- case *lnwire.ChannelReestablish:
- chanID = msg.ChanID
- case *lnwire.FundingLocked:
- chanID = msg.ChanID
- case *lnwire.UpdateFee:
- chanID = msg.ChanID
- default:
- return chanID, er.Errorf("unknown type: %T", msg)
- }
-
- return chanID, nil
-}
-
-// generateHoldPayment generates the htlc add request by given path blob and
-// invoice which should be added by destination peer.
-func generatePaymentWithPreimage(invoiceAmt, htlcAmt lnwire.MilliSatoshi,
- timelock uint32, blob [lnwire.OnionPacketSize]byte,
- preimage *lntypes.Preimage, rhash, payAddr [32]byte) (
- *channeldb.Invoice, *lnwire.UpdateAddHTLC, uint64, er.R) {
-
- // Create the db invoice. Normally the payment requests needs to be set,
- // because it is decoded in InvoiceRegistry to obtain the cltv expiry.
- // But because the mock registry used in tests is mocking the decode
- // step and always returning the value of testInvoiceCltvExpiry, we
- // don't need to bother here with creating and signing a payment
- // request.
-
- invoice := &channeldb.Invoice{
- CreationDate: time.Now(),
- Terms: channeldb.ContractTerm{
- FinalCltvDelta: testInvoiceCltvExpiry,
- Value: invoiceAmt,
- PaymentPreimage: preimage,
- PaymentAddr: payAddr,
- Features: lnwire.NewFeatureVector(
- nil, lnwire.Features,
- ),
- },
- HodlInvoice: preimage == nil,
- }
-
- htlc := &lnwire.UpdateAddHTLC{
- PaymentHash: rhash,
- Amount: htlcAmt,
- Expiry: timelock,
- OnionBlob: blob,
- }
-
- pid, err := generateRandomBytes(8)
- if err != nil {
- return nil, nil, 0, err
- }
- paymentID := binary.BigEndian.Uint64(pid)
-
- return invoice, htlc, paymentID, nil
-}
-
-// generatePayment generates the htlc add request by given path blob and
-// invoice which should be added by destination peer.
-func generatePayment(invoiceAmt, htlcAmt lnwire.MilliSatoshi, timelock uint32,
- blob [lnwire.OnionPacketSize]byte) (*channeldb.Invoice,
- *lnwire.UpdateAddHTLC, uint64, er.R) {
-
- var preimage lntypes.Preimage
- r, err := generateRandomBytes(sha256.Size)
- if err != nil {
- return nil, nil, 0, err
- }
- copy(preimage[:], r)
-
- rhash := sha256.Sum256(preimage[:])
-
- var payAddr [sha256.Size]byte
- r, err = generateRandomBytes(sha256.Size)
- if err != nil {
- return nil, nil, 0, err
- }
- copy(payAddr[:], r)
-
- return generatePaymentWithPreimage(
- invoiceAmt, htlcAmt, timelock, blob, &preimage, rhash, payAddr,
- )
-}
-
-// generateRoute generates the path blob by given array of peers.
-func generateRoute(hops ...*hop.Payload) (
- [lnwire.OnionPacketSize]byte, er.R) {
-
- var blob [lnwire.OnionPacketSize]byte
- if len(hops) == 0 {
- return blob, er.New("empty path")
- }
-
- iterator := newMockHopIterator(hops...)
-
- w := bytes.NewBuffer(blob[0:0])
- if err := iterator.EncodeNextHop(w); err != nil {
- return blob, err
- }
-
- return blob, nil
-
-}
-
-// threeHopNetwork is used for managing the created cluster of 3 hops.
-type threeHopNetwork struct {
- aliceServer *mockServer
- aliceChannelLink *channelLink
- aliceOnionDecoder *mockIteratorDecoder
-
- bobServer *mockServer
- firstBobChannelLink *channelLink
- secondBobChannelLink *channelLink
- bobOnionDecoder *mockIteratorDecoder
-
- carolServer *mockServer
- carolChannelLink *channelLink
- carolOnionDecoder *mockIteratorDecoder
-
- hopNetwork
-}
-
-// generateHops creates the per hop payload, the total amount to be sent, and
-// also the time lock value needed to route an HTLC with the target amount over
-// the specified path.
-func generateHops(payAmt lnwire.MilliSatoshi, startingHeight uint32,
- path ...*channelLink) (lnwire.MilliSatoshi, uint32, []*hop.Payload) {
-
- totalTimelock := startingHeight
- runningAmt := payAmt
-
- hops := make([]*hop.Payload, len(path))
- for i := len(path) - 1; i >= 0; i-- {
- // If this is the last hop, then the next hop is the special
- // "exit node". Otherwise, we look to the "prior" hop.
- nextHop := hop.Exit
- if i != len(path)-1 {
- nextHop = path[i+1].channel.ShortChanID()
- }
-
- var timeLock uint32
- // If this is the last, hop, then the time lock will be their
- // specified delta policy plus our starting height.
- if i == len(path)-1 {
- totalTimelock += testInvoiceCltvExpiry
- timeLock = totalTimelock
- } else {
- // Otherwise, the outgoing time lock should be the
- // incoming timelock minus their specified delta.
- delta := path[i+1].cfg.FwrdingPolicy.TimeLockDelta
- totalTimelock += delta
- timeLock = totalTimelock - delta
- }
-
- // Finally, we'll need to calculate the amount to forward. For
- // the last hop, it's just the payment amount.
- amount := payAmt
- if i != len(path)-1 {
- prevHop := hops[i+1]
- prevAmount := prevHop.ForwardingInfo().AmountToForward
-
- fee := ExpectedFee(path[i].cfg.FwrdingPolicy, prevAmount)
- runningAmt += fee
-
- // Otherwise, for a node to forward an HTLC, then
- // following inequality most hold true:
- // * amt_in - fee >= amt_to_forward
- amount = runningAmt - fee
- }
-
- var nextHopBytes [8]byte
- binary.BigEndian.PutUint64(nextHopBytes[:], nextHop.ToUint64())
-
- hops[i] = hop.NewLegacyPayload(&sphinx.HopData{
- Realm: [1]byte{}, // hop.BitcoinNetwork
- NextAddress: nextHopBytes,
- ForwardAmount: uint64(amount),
- OutgoingCltv: timeLock,
- })
- }
-
- return runningAmt, totalTimelock, hops
-}
-
-type paymentResponse struct {
- rhash lntypes.Hash
- err chan er.R
-}
-
-func (r *paymentResponse) Wait(d time.Duration) (lntypes.Hash, er.R) {
- return r.rhash, waitForPaymentResult(r.err, d)
-}
-
-// waitForPaymentResult waits for either an error to be received on c or a
-// timeout.
-func waitForPaymentResult(c chan er.R, d time.Duration) er.R {
- select {
- case err := <-c:
- close(c)
- return err
- case <-time.After(d):
- return er.New("htlc was not settled in time")
- }
-}
-
-// waitForPayFuncResult executes the given function and waits for a result with
-// a timeout.
-func waitForPayFuncResult(payFunc func() er.R, d time.Duration) er.R {
- errChan := make(chan er.R)
- go func() {
- errChan <- payFunc()
- }()
-
- return waitForPaymentResult(errChan, d)
-}
-
-// makePayment takes the destination node and amount as input, sends the
-// payment and returns the error channel to wait for error to be received and
-// invoice in order to check its status after the payment finished.
-//
-// With this function you can send payments:
-// * from Alice to Bob
-// * from Alice to Carol through the Bob
-// * from Alice to some another peer through the Bob
-func makePayment(sendingPeer, receivingPeer lnpeer.Peer,
- firstHop lnwire.ShortChannelID, hops []*hop.Payload,
- invoiceAmt, htlcAmt lnwire.MilliSatoshi,
- timelock uint32) *paymentResponse {
-
- paymentErr := make(chan er.R, 1)
- var rhash lntypes.Hash
-
- invoice, payFunc, err := preparePayment(sendingPeer, receivingPeer,
- firstHop, hops, invoiceAmt, htlcAmt, timelock,
- )
- if err != nil {
- paymentErr <- err
- return &paymentResponse{
- rhash: rhash,
- err: paymentErr,
- }
- }
-
- rhash = invoice.Terms.PaymentPreimage.Hash()
-
- // Send payment and expose err channel.
- go func() {
- paymentErr <- payFunc()
- }()
-
- return &paymentResponse{
- rhash: rhash,
- err: paymentErr,
- }
-}
-
-// preparePayment creates an invoice at the receivingPeer and returns a function
-// that, when called, launches the payment from the sendingPeer.
-func preparePayment(sendingPeer, receivingPeer lnpeer.Peer,
- firstHop lnwire.ShortChannelID, hops []*hop.Payload,
- invoiceAmt, htlcAmt lnwire.MilliSatoshi,
- timelock uint32) (*channeldb.Invoice, func() er.R, er.R) {
-
- sender := sendingPeer.(*mockServer)
- receiver := receivingPeer.(*mockServer)
-
- // Generate route convert it to blob, and return next destination for
- // htlc add request.
- blob, err := generateRoute(hops...)
- if err != nil {
- return nil, nil, err
- }
-
- // Generate payment: invoice and htlc.
- invoice, htlc, pid, err := generatePayment(
- invoiceAmt, htlcAmt, timelock, blob,
- )
- if err != nil {
- return nil, nil, err
- }
-
- // Check who is last in the route and add invoice to server registry.
- hash := invoice.Terms.PaymentPreimage.Hash()
- if err := receiver.registry.AddInvoice(*invoice, hash); err != nil {
- return nil, nil, err
- }
-
- // Send payment and expose err channel.
- return invoice, func() er.R {
- err := sender.htlcSwitch.SendHTLC(
- firstHop, pid, htlc,
- )
- if err != nil {
- return err
- }
- resultChan, err := sender.htlcSwitch.GetPaymentResult(
- pid, hash, newMockDeobfuscator(),
- )
- if err != nil {
- return err
- }
-
- result, ok := <-resultChan
- if !ok {
- return er.Errorf("shutting down")
- }
-
- if result.Error != nil {
- return result.Error
- }
-
- return nil
- }, nil
-}
-
-// start starts the three hop network alice,bob,carol servers.
-func (n *threeHopNetwork) start() er.R {
- if err := n.aliceServer.Start(); err != nil {
- return err
- }
- if err := n.bobServer.Start(); err != nil {
- return err
- }
- if err := n.carolServer.Start(); err != nil {
- return err
- }
-
- return waitLinksEligible(map[string]*channelLink{
- "alice": n.aliceChannelLink,
- "bob first": n.firstBobChannelLink,
- "bob second": n.secondBobChannelLink,
- "carol": n.carolChannelLink,
- })
-}
-
-// stop stops nodes and cleanup its databases.
-func (n *threeHopNetwork) stop() {
- done := make(chan struct{})
- go func() {
- n.aliceServer.Stop()
- done <- struct{}{}
- }()
-
- go func() {
- n.bobServer.Stop()
- done <- struct{}{}
- }()
-
- go func() {
- n.carolServer.Stop()
- done <- struct{}{}
- }()
-
- for i := 0; i < 3; i++ {
- <-done
- }
-}
-
-type clusterChannels struct {
- aliceToBob *lnwallet.LightningChannel
- bobToAlice *lnwallet.LightningChannel
- bobToCarol *lnwallet.LightningChannel
- carolToBob *lnwallet.LightningChannel
-}
-
-// createClusterChannels creates lightning channels which are needed for
-// network cluster to be initialized.
-func createClusterChannels(aliceToBob, bobToCarol btcutil.Amount) (
- *clusterChannels, func(), func() (*clusterChannels, er.R), er.R) {
-
- _, _, firstChanID, secondChanID := genIDs()
-
- // Create lightning channels between Alice<->Bob and Bob<->Carol
- aliceChannel, firstBobChannel, cleanAliceBob, err :=
- createTestChannel(alicePrivKey, bobPrivKey, aliceToBob,
- aliceToBob, 0, 0, firstChanID)
- if err != nil {
- return nil, nil, nil, er.Errorf("unable to create "+
- "alice<->bob channel: %v", err)
- }
-
- secondBobChannel, carolChannel, cleanBobCarol, err :=
- createTestChannel(bobPrivKey, carolPrivKey, bobToCarol,
- bobToCarol, 0, 0, secondChanID)
- if err != nil {
- cleanAliceBob()
- return nil, nil, nil, er.Errorf("unable to create "+
- "bob<->carol channel: %v", err)
- }
-
- cleanUp := func() {
- cleanAliceBob()
- cleanBobCarol()
- }
-
- restoreFromDb := func() (*clusterChannels, er.R) {
-
- a2b, err := aliceChannel.restore()
- if err != nil {
- return nil, err
- }
-
- b2a, err := firstBobChannel.restore()
- if err != nil {
- return nil, err
- }
-
- b2c, err := secondBobChannel.restore()
- if err != nil {
- return nil, err
- }
-
- c2b, err := carolChannel.restore()
- if err != nil {
- return nil, err
- }
-
- return &clusterChannels{
- aliceToBob: a2b,
- bobToAlice: b2a,
- bobToCarol: b2c,
- carolToBob: c2b,
- }, nil
- }
-
- return &clusterChannels{
- aliceToBob: aliceChannel.channel,
- bobToAlice: firstBobChannel.channel,
- bobToCarol: secondBobChannel.channel,
- carolToBob: carolChannel.channel,
- }, cleanUp, restoreFromDb, nil
-}
-
-// newThreeHopNetwork function creates the following topology and returns the
-// control object to manage this cluster:
-//
-// alice bob carol
-// server - <-connection-> - server - - <-connection-> - - - server
-// | | |
-// alice htlc bob htlc carol htlc
-// switch switch \ switch
-// | | \ |
-// | | \ |
-// alice first bob second bob carol
-// channel link channel link channel link channel link
-//
-// This function takes server options which can be used to apply custom
-// settings to alice, bob and carol.
-func newThreeHopNetwork(t testing.TB, aliceChannel, firstBobChannel,
- secondBobChannel, carolChannel *lnwallet.LightningChannel,
- startingHeight uint32, opts ...serverOption) *threeHopNetwork {
-
- aliceDb := aliceChannel.State().Db
- bobDb := firstBobChannel.State().Db
- carolDb := carolChannel.State().Db
-
- hopNetwork := newHopNetwork()
-
- // Create three peers/servers.
- aliceServer, err := newMockServer(
- t, "alice", startingHeight, aliceDb, hopNetwork.defaultDelta,
- )
- if err != nil {
- t.Fatalf("unable to create alice server: %v", err)
- }
- bobServer, err := newMockServer(
- t, "bob", startingHeight, bobDb, hopNetwork.defaultDelta,
- )
- if err != nil {
- t.Fatalf("unable to create bob server: %v", err)
- }
- carolServer, err := newMockServer(
- t, "carol", startingHeight, carolDb, hopNetwork.defaultDelta,
- )
- if err != nil {
- t.Fatalf("unable to create carol server: %v", err)
- }
-
- // Apply all additional functional options to the servers before
- // creating any links.
- for _, option := range opts {
- option(aliceServer, bobServer, carolServer)
- }
-
- // Create mock decoder instead of sphinx one in order to mock the route
- // which htlc should follow.
- aliceDecoder := newMockIteratorDecoder()
- bobDecoder := newMockIteratorDecoder()
- carolDecoder := newMockIteratorDecoder()
-
- aliceChannelLink, err := hopNetwork.createChannelLink(aliceServer,
- bobServer, aliceChannel, aliceDecoder,
- )
- if err != nil {
- t.Fatal(err)
- }
-
- firstBobChannelLink, err := hopNetwork.createChannelLink(bobServer,
- aliceServer, firstBobChannel, bobDecoder)
- if err != nil {
- t.Fatal(err)
- }
-
- secondBobChannelLink, err := hopNetwork.createChannelLink(bobServer,
- carolServer, secondBobChannel, bobDecoder)
- if err != nil {
- t.Fatal(err)
- }
-
- carolChannelLink, err := hopNetwork.createChannelLink(carolServer,
- bobServer, carolChannel, carolDecoder)
- if err != nil {
- t.Fatal(err)
- }
-
- return &threeHopNetwork{
- aliceServer: aliceServer,
- aliceChannelLink: aliceChannelLink.(*channelLink),
- aliceOnionDecoder: aliceDecoder,
-
- bobServer: bobServer,
- firstBobChannelLink: firstBobChannelLink.(*channelLink),
- secondBobChannelLink: secondBobChannelLink.(*channelLink),
- bobOnionDecoder: bobDecoder,
-
- carolServer: carolServer,
- carolChannelLink: carolChannelLink.(*channelLink),
- carolOnionDecoder: carolDecoder,
-
- hopNetwork: *hopNetwork,
- }
-}
-
-// serverOption is a function which alters the three servers created for
-// a three hop network to allow custom settings on each server.
-type serverOption func(aliceServer, bobServer, carolServer *mockServer)
-
-// serverOptionWithHtlcNotifier is a functional option for the creation of
-// three hop network servers which allows setting of htlc notifiers.
-// Note that these notifiers should be started and stopped by the calling
-// function.
-func serverOptionWithHtlcNotifier(alice, bob,
- carol *HtlcNotifier) serverOption {
-
- return func(aliceServer, bobServer, carolServer *mockServer) {
- aliceServer.htlcSwitch.cfg.HtlcNotifier = alice
- bobServer.htlcSwitch.cfg.HtlcNotifier = bob
- carolServer.htlcSwitch.cfg.HtlcNotifier = carol
- }
-}
-
-// serverOptionRejectHtlc is the functional option for setting the reject
-// htlc config option in each server's switch.
-func serverOptionRejectHtlc(alice, bob, carol bool) serverOption {
- return func(aliceServer, bobServer, carolServer *mockServer) {
- aliceServer.htlcSwitch.cfg.RejectHTLC = alice
- bobServer.htlcSwitch.cfg.RejectHTLC = bob
- carolServer.htlcSwitch.cfg.RejectHTLC = carol
- }
-}
-
-// createTwoClusterChannels creates lightning channels which are needed for
-// a 2 hop network cluster to be initialized.
-func createTwoClusterChannels(aliceToBob, bobToCarol btcutil.Amount) (
- *testLightningChannel, *testLightningChannel,
- func(), er.R) {
-
- _, _, firstChanID, _ := genIDs()
-
- // Create lightning channels between Alice<->Bob and Bob<->Carol
- alice, bob, cleanAliceBob, err :=
- createTestChannel(alicePrivKey, bobPrivKey, aliceToBob,
- aliceToBob, 0, 0, firstChanID)
- if err != nil {
- return nil, nil, nil, er.Errorf("unable to create "+
- "alice<->bob channel: %v", err)
- }
-
- return alice, bob, cleanAliceBob, nil
-}
-
-// hopNetwork is the base struct for two and three hop networks
-type hopNetwork struct {
- feeEstimator *mockFeeEstimator
- globalPolicy ForwardingPolicy
- obfuscator hop.ErrorEncrypter
-
- defaultDelta uint32
-}
-
-func newHopNetwork() *hopNetwork {
- defaultDelta := uint32(6)
-
- globalPolicy := ForwardingPolicy{
- MinHTLCOut: lnwire.NewMSatFromSatoshis(5),
- BaseFee: lnwire.NewMSatFromSatoshis(1),
- TimeLockDelta: defaultDelta,
- }
- obfuscator := NewMockObfuscator()
-
- feeEstimator := &mockFeeEstimator{
- byteFeeIn: make(chan chainfee.SatPerKWeight),
- quit: make(chan struct{}),
- }
-
- return &hopNetwork{
- feeEstimator: feeEstimator,
- globalPolicy: globalPolicy,
- obfuscator: obfuscator,
- defaultDelta: defaultDelta,
- }
-}
-
-func (h *hopNetwork) createChannelLink(server, peer *mockServer,
- channel *lnwallet.LightningChannel,
- decoder *mockIteratorDecoder) (ChannelLink, er.R) {
-
- const (
- fwdPkgTimeout = 15 * time.Second
- minFeeUpdateTimeout = 30 * time.Minute
- maxFeeUpdateTimeout = 40 * time.Minute
- )
-
- link := NewChannelLink(
- ChannelLinkConfig{
- Switch: server.htlcSwitch,
- FwrdingPolicy: h.globalPolicy,
- Peer: peer,
- Circuits: server.htlcSwitch.CircuitModifier(),
- ForwardPackets: server.htlcSwitch.ForwardPackets,
- DecodeHopIterators: decoder.DecodeHopIterators,
- ExtractErrorEncrypter: func(*btcec.PublicKey) (
- hop.ErrorEncrypter, lnwire.FailCode) {
- return h.obfuscator, lnwire.CodeNone
- },
- FetchLastChannelUpdate: mockGetChanUpdateMessage,
- Registry: server.registry,
- FeeEstimator: h.feeEstimator,
- PreimageCache: server.pCache,
- UpdateContractSignals: func(*contractcourt.ContractSignals) er.R {
- return nil
- },
- ChainEvents: &contractcourt.ChainEventSubscription{},
- SyncStates: true,
- BatchSize: 10,
- BatchTicker: ticker.NewForce(testBatchTimeout),
- FwdPkgGCTicker: ticker.NewForce(fwdPkgTimeout),
- PendingCommitTicker: ticker.NewForce(2 * time.Minute),
- MinFeeUpdateTimeout: minFeeUpdateTimeout,
- MaxFeeUpdateTimeout: maxFeeUpdateTimeout,
- OnChannelFailure: func(lnwire.ChannelID, lnwire.ShortChannelID, LinkFailureError) {},
- OutgoingCltvRejectDelta: 3,
- MaxOutgoingCltvExpiry: DefaultMaxOutgoingCltvExpiry,
- MaxFeeAllocation: DefaultMaxLinkFeeAllocation,
- NotifyActiveLink: func(wire.OutPoint) {},
- NotifyActiveChannel: func(wire.OutPoint) {},
- NotifyInactiveChannel: func(wire.OutPoint) {},
- HtlcNotifier: server.htlcSwitch.cfg.HtlcNotifier,
- },
- channel,
- )
- if err := server.htlcSwitch.AddLink(link); err != nil {
- return nil, er.Errorf("unable to add channel link: %v", err)
- }
-
- go func() {
- for {
- select {
- case <-link.(*channelLink).htlcUpdates:
- case <-link.(*channelLink).quit:
- return
- }
- }
- }()
-
- return link, nil
-}
-
-// twoHopNetwork is used for managing the created cluster of 2 hops.
-type twoHopNetwork struct {
- hopNetwork
-
- aliceServer *mockServer
- aliceChannelLink *channelLink
-
- bobServer *mockServer
- bobChannelLink *channelLink
-}
-
-// newTwoHopNetwork function creates the following topology and returns the
-// control object to manage this cluster:
-//
-// alice bob
-// server - <-connection-> - server
-// | |
-// alice htlc bob htlc
-// switch switch
-// | |
-// | |
-// alice bob
-// channel link channel link
-//
-func newTwoHopNetwork(t testing.TB,
- aliceChannel, bobChannel *lnwallet.LightningChannel,
- startingHeight uint32) *twoHopNetwork {
-
- aliceDb := aliceChannel.State().Db
- bobDb := bobChannel.State().Db
-
- hopNetwork := newHopNetwork()
-
- // Create two peers/servers.
- aliceServer, err := newMockServer(
- t, "alice", startingHeight, aliceDb, hopNetwork.defaultDelta,
- )
- if err != nil {
- t.Fatalf("unable to create alice server: %v", err)
- }
- bobServer, err := newMockServer(
- t, "bob", startingHeight, bobDb, hopNetwork.defaultDelta,
- )
- if err != nil {
- t.Fatalf("unable to create bob server: %v", err)
- }
-
- // Create mock decoder instead of sphinx one in order to mock the route
- // which htlc should follow.
- aliceDecoder := newMockIteratorDecoder()
- bobDecoder := newMockIteratorDecoder()
-
- aliceChannelLink, err := hopNetwork.createChannelLink(
- aliceServer, bobServer, aliceChannel, aliceDecoder,
- )
- if err != nil {
- t.Fatal(err)
- }
-
- bobChannelLink, err := hopNetwork.createChannelLink(
- bobServer, aliceServer, bobChannel, bobDecoder,
- )
- if err != nil {
- t.Fatal(err)
- }
-
- return &twoHopNetwork{
- aliceServer: aliceServer,
- aliceChannelLink: aliceChannelLink.(*channelLink),
-
- bobServer: bobServer,
- bobChannelLink: bobChannelLink.(*channelLink),
-
- hopNetwork: *hopNetwork,
- }
-}
-
-// start starts the two hop network alice,bob servers.
-func (n *twoHopNetwork) start() er.R {
- if err := n.aliceServer.Start(); err != nil {
- return err
- }
- if err := n.bobServer.Start(); err != nil {
- n.aliceServer.Stop()
- return err
- }
-
- return waitLinksEligible(map[string]*channelLink{
- "alice": n.aliceChannelLink,
- "bob": n.bobChannelLink,
- })
-}
-
-// stop stops nodes and cleanup its databases.
-func (n *twoHopNetwork) stop() {
- done := make(chan struct{})
- go func() {
- n.aliceServer.Stop()
- done <- struct{}{}
- }()
-
- go func() {
- n.bobServer.Stop()
- done <- struct{}{}
- }()
-
- for i := 0; i < 2; i++ {
- <-done
- }
-}
-
-func (n *twoHopNetwork) makeHoldPayment(sendingPeer, receivingPeer lnpeer.Peer,
- firstHop lnwire.ShortChannelID, hops []*hop.Payload,
- invoiceAmt, htlcAmt lnwire.MilliSatoshi,
- timelock uint32, preimage lntypes.Preimage) chan er.R {
-
- paymentErr := make(chan er.R, 1)
-
- sender := sendingPeer.(*mockServer)
- receiver := receivingPeer.(*mockServer)
-
- // Generate route convert it to blob, and return next destination for
- // htlc add request.
- blob, err := generateRoute(hops...)
- if err != nil {
- paymentErr <- err
- return paymentErr
- }
-
- rhash := preimage.Hash()
-
- var payAddr [32]byte
- if _, err := crand.Read(payAddr[:]); err != nil {
- panic(err)
- }
-
- // Generate payment: invoice and htlc.
- invoice, htlc, pid, err := generatePaymentWithPreimage(
- invoiceAmt, htlcAmt, timelock, blob,
- nil, rhash, payAddr,
- )
- if err != nil {
- paymentErr <- err
- return paymentErr
- }
-
- // Check who is last in the route and add invoice to server registry.
- if err := receiver.registry.AddInvoice(*invoice, rhash); err != nil {
- paymentErr <- err
- return paymentErr
- }
-
- // Send payment and expose err channel.
- err = sender.htlcSwitch.SendHTLC(firstHop, pid, htlc)
- if err != nil {
- paymentErr <- err
- return paymentErr
- }
-
- go func() {
- resultChan, err := sender.htlcSwitch.GetPaymentResult(
- pid, rhash, newMockDeobfuscator(),
- )
- if err != nil {
- paymentErr <- err
- return
- }
-
- result, ok := <-resultChan
- if !ok {
- paymentErr <- er.Errorf("shutting down")
- return
- }
-
- if result.Error != nil {
- paymentErr <- result.Error
- return
- }
- paymentErr <- nil
- }()
-
- return paymentErr
-}
-
-// waitLinksEligible blocks until all links the provided name-to-link map are
-// eligible to forward HTLCs.
-func waitLinksEligible(links map[string]*channelLink) er.R {
- return wait.NoError(func() er.R {
- for name, link := range links {
- if link.EligibleToForward() {
- continue
- }
- return er.Errorf("%s channel link not eligible", name)
- }
- return nil
- }, 3*time.Second)
-}
-
-// timeout implements a test level timeout.
-func timeout(t *testing.T) func() {
- done := make(chan struct{})
- go func() {
- select {
- case <-time.After(10 * time.Second):
- pprof.Lookup("goroutine").WriteTo(os.Stdout, 1)
-
- panic("test timeout")
- case <-done:
- }
- }()
-
- return func() {
- close(done)
- }
-}
diff --git a/lnd/input/input.go b/lnd/input/input.go
deleted file mode 100644
index 7e125861..00000000
--- a/lnd/input/input.go
+++ /dev/null
@@ -1,248 +0,0 @@
-package input
-
-import (
- "github.com/pkt-cash/pktd/btcutil"
- "github.com/pkt-cash/pktd/btcutil/er"
- "github.com/pkt-cash/pktd/txscript"
- "github.com/pkt-cash/pktd/wire"
-)
-
-// Input represents an abstract UTXO which is to be spent using a sweeping
-// transaction. The method provided give the caller all information needed to
-// construct a valid input within a sweeping transaction to sweep this
-// lingering UTXO.
-type Input interface {
- // Outpoint returns the reference to the output being spent, used to
- // construct the corresponding transaction input.
- OutPoint() *wire.OutPoint
-
- // RequiredTxOut returns a non-nil TxOut if input commits to a certain
- // transaction output. This is used in the SINGLE|ANYONECANPAY case to
- // make sure any presigned input is still valid by including the
- // output.
- RequiredTxOut() *wire.TxOut
-
- // RequiredLockTime returns whether this input commits to a tx locktime
- // that must be used in the transaction including it.
- RequiredLockTime() (uint32, bool)
-
- // WitnessType returns an enum specifying the type of witness that must
- // be generated in order to spend this output.
- WitnessType() WitnessType
-
- // SignDesc returns a reference to a spendable output's sign
- // descriptor, which is used during signing to compute a valid witness
- // that spends this output.
- SignDesc() *SignDescriptor
-
- // CraftInputScript returns a valid set of input scripts allowing this
- // output to be spent. The returns input scripts should target the
- // input at location txIndex within the passed transaction. The input
- // scripts generated by this method support spending p2wkh, p2wsh, and
- // also nested p2sh outputs.
- CraftInputScript(signer Signer, txn *wire.MsgTx,
- hashCache *txscript.TxSigHashes,
- txinIdx int) (*Script, er.R)
-
- // BlocksToMaturity returns the relative timelock, as a number of
- // blocks, that must be built on top of the confirmation height before
- // the output can be spent. For non-CSV locked inputs this is always
- // zero.
- BlocksToMaturity() uint32
-
- // HeightHint returns the minimum height at which a confirmed spending
- // tx can occur.
- HeightHint() uint32
-
- // UnconfParent returns information about a possibly unconfirmed parent
- // tx.
- UnconfParent() *TxInfo
-}
-
-// TxInfo describes properties of a parent tx that are relevant for CPFP.
-type TxInfo struct {
- // Fee is the fee of the tx.
- Fee btcutil.Amount
-
- // Weight is the weight of the tx.
- Weight int64
-}
-
-type inputKit struct {
- outpoint wire.OutPoint
- witnessType WitnessType
- signDesc SignDescriptor
- heightHint uint32
- blockToMaturity uint32
-
- // unconfParent contains information about a potential unconfirmed
- // parent transaction.
- unconfParent *TxInfo
-}
-
-// OutPoint returns the breached output's identifier that is to be included as
-// a transaction input.
-func (i *inputKit) OutPoint() *wire.OutPoint {
- return &i.outpoint
-}
-
-// RequiredTxOut returns a nil for the base input type.
-func (i *inputKit) RequiredTxOut() *wire.TxOut {
- return nil
-}
-
-// RequiredLockTime returns whether this input commits to a tx locktime that
-// must be used in the transaction including it. This will be false for the
-// base input type since we can re-sign for any lock time.
-func (i *inputKit) RequiredLockTime() (uint32, bool) {
- return 0, false
-}
-
-// WitnessType returns the type of witness that must be generated to spend the
-// breached output.
-func (i *inputKit) WitnessType() WitnessType {
- return i.witnessType
-}
-
-// SignDesc returns the breached output's SignDescriptor, which is used during
-// signing to compute the witness.
-func (i *inputKit) SignDesc() *SignDescriptor {
- return &i.signDesc
-}
-
-// HeightHint returns the minimum height at which a confirmed spending
-// tx can occur.
-func (i *inputKit) HeightHint() uint32 {
- return i.heightHint
-}
-
-// BlocksToMaturity returns the relative timelock, as a number of blocks, that
-// must be built on top of the confirmation height before the output can be
-// spent. For non-CSV locked inputs this is always zero.
-func (i *inputKit) BlocksToMaturity() uint32 {
- return i.blockToMaturity
-}
-
-// Cpfp returns information about a possibly unconfirmed parent tx.
-func (i *inputKit) UnconfParent() *TxInfo {
- return i.unconfParent
-}
-
-// BaseInput contains all the information needed to sweep a basic output
-// (CSV/CLTV/no time lock)
-type BaseInput struct {
- inputKit
-}
-
-// MakeBaseInput assembles a new BaseInput that can be used to construct a
-// sweep transaction.
-func MakeBaseInput(outpoint *wire.OutPoint, witnessType WitnessType,
- signDescriptor *SignDescriptor, heightHint uint32,
- unconfParent *TxInfo) BaseInput {
-
- return BaseInput{
- inputKit{
- outpoint: *outpoint,
- witnessType: witnessType,
- signDesc: *signDescriptor,
- heightHint: heightHint,
- unconfParent: unconfParent,
- },
- }
-}
-
-// NewBaseInput allocates and assembles a new *BaseInput that can be used to
-// construct a sweep transaction.
-func NewBaseInput(outpoint *wire.OutPoint, witnessType WitnessType,
- signDescriptor *SignDescriptor, heightHint uint32) *BaseInput {
-
- input := MakeBaseInput(
- outpoint, witnessType, signDescriptor, heightHint, nil,
- )
-
- return &input
-}
-
-// NewCsvInput assembles a new csv-locked input that can be used to
-// construct a sweep transaction.
-func NewCsvInput(outpoint *wire.OutPoint, witnessType WitnessType,
- signDescriptor *SignDescriptor, heightHint uint32,
- blockToMaturity uint32) *BaseInput {
-
- return &BaseInput{
- inputKit{
- outpoint: *outpoint,
- witnessType: witnessType,
- signDesc: *signDescriptor,
- heightHint: heightHint,
- blockToMaturity: blockToMaturity,
- },
- }
-}
-
-// CraftInputScript returns a valid set of input scripts allowing this output
-// to be spent. The returned input scripts should target the input at location
-// txIndex within the passed transaction. The input scripts generated by this
-// method support spending p2wkh, p2wsh, and also nested p2sh outputs.
-func (bi *BaseInput) CraftInputScript(signer Signer, txn *wire.MsgTx,
- hashCache *txscript.TxSigHashes, txinIdx int) (*Script, er.R) {
-
- witnessFunc := bi.witnessType.WitnessGenerator(signer, bi.SignDesc())
-
- return witnessFunc(txn, hashCache, txinIdx)
-}
-
-// HtlcSucceedInput constitutes a sweep input that needs a pre-image. The input
-// is expected to reside on the commitment tx of the remote party and should
-// not be a second level tx output.
-type HtlcSucceedInput struct {
- inputKit
-
- preimage []byte
-}
-
-// MakeHtlcSucceedInput assembles a new redeem input that can be used to
-// construct a sweep transaction.
-func MakeHtlcSucceedInput(outpoint *wire.OutPoint,
- signDescriptor *SignDescriptor, preimage []byte, heightHint,
- blocksToMaturity uint32) HtlcSucceedInput {
-
- return HtlcSucceedInput{
- inputKit: inputKit{
- outpoint: *outpoint,
- witnessType: HtlcAcceptedRemoteSuccess,
- signDesc: *signDescriptor,
- heightHint: heightHint,
- blockToMaturity: blocksToMaturity,
- },
- preimage: preimage,
- }
-}
-
-// CraftInputScript returns a valid set of input scripts allowing this output
-// to be spent. The returns input scripts should target the input at location
-// txIndex within the passed transaction. The input scripts generated by this
-// method support spending p2wkh, p2wsh, and also nested p2sh outputs.
-func (h *HtlcSucceedInput) CraftInputScript(signer Signer, txn *wire.MsgTx,
- hashCache *txscript.TxSigHashes, txinIdx int) (*Script, er.R) {
-
- desc := h.signDesc
- desc.SigHashes = hashCache
- desc.InputIndex = txinIdx
-
- witness, err := SenderHtlcSpendRedeem(
- signer, &desc, txn, h.preimage,
- )
- if err != nil {
- return nil, err
- }
-
- return &Script{
- Witness: witness,
- }, nil
-}
-
-// Compile-time constraints to ensure each input struct implement the Input
-// interface.
-var _ Input = (*BaseInput)(nil)
-var _ Input = (*HtlcSucceedInput)(nil)
diff --git a/lnd/input/script_utils.go b/lnd/input/script_utils.go
deleted file mode 100644
index ed26dbea..00000000
--- a/lnd/input/script_utils.go
+++ /dev/null
@@ -1,1301 +0,0 @@
-package input
-
-import (
- "bytes"
- "crypto/sha256"
- "math/big"
-
- "golang.org/x/crypto/ripemd160"
-
- "github.com/pkt-cash/pktd/btcec"
- "github.com/pkt-cash/pktd/btcutil"
- "github.com/pkt-cash/pktd/btcutil/er"
- "github.com/pkt-cash/pktd/txscript"
- "github.com/pkt-cash/pktd/txscript/opcode"
- "github.com/pkt-cash/pktd/txscript/params"
- "github.com/pkt-cash/pktd/txscript/scriptbuilder"
- "github.com/pkt-cash/pktd/wire"
-)
-
-var (
- // TODO(roasbeef): remove these and use the one's defined in txscript
- // within testnet-L.
-
- // SequenceLockTimeSeconds is the 22nd bit which indicates the lock
- // time is in seconds.
- SequenceLockTimeSeconds = uint32(1 << 22)
-)
-
-// Signature is an interface for objects that can populate signatures during
-// witness construction.
-type Signature interface {
- // Serialize returns a DER-encoded ECDSA signature.
- Serialize() []byte
-
- // Verify return true if the ECDSA signature is valid for the passed
- // message digest under the provided public key.
- Verify([]byte, *btcec.PublicKey) bool
-}
-
-// WitnessScriptHash generates a pay-to-witness-script-hash public key script
-// paying to a version 0 witness program paying to the passed redeem script.
-func WitnessScriptHash(witnessScript []byte) ([]byte, er.R) {
- bldr := scriptbuilder.NewScriptBuilder()
-
- bldr.AddOp(opcode.OP_0)
- scriptHash := sha256.Sum256(witnessScript)
- bldr.AddData(scriptHash[:])
- return bldr.Script()
-}
-
-// GenMultiSigScript generates the non-p2sh'd multisig script for 2 of 2
-// pubkeys.
-func GenMultiSigScript(aPub, bPub []byte) ([]byte, er.R) {
- if len(aPub) != 33 || len(bPub) != 33 {
- return nil, er.Errorf("pubkey size error: compressed pubkeys only")
- }
-
- // Swap to sort pubkeys if needed. Keys are sorted in lexicographical
- // order. The signatures within the scriptSig must also adhere to the
- // order, ensuring that the signatures for each public key appears in
- // the proper order on the stack.
- if bytes.Compare(aPub, bPub) == 1 {
- aPub, bPub = bPub, aPub
- }
-
- bldr := scriptbuilder.NewScriptBuilder()
- bldr.AddOp(opcode.OP_2)
- bldr.AddData(aPub) // Add both pubkeys (sorted).
- bldr.AddData(bPub)
- bldr.AddOp(opcode.OP_2)
- bldr.AddOp(opcode.OP_CHECKMULTISIG)
- return bldr.Script()
-}
-
-// GenFundingPkScript creates a redeem script, and its matching p2wsh
-// output for the funding transaction.
-func GenFundingPkScript(aPub, bPub []byte, amt int64) ([]byte, *wire.TxOut, er.R) {
- // As a sanity check, ensure that the passed amount is above zero.
- if amt <= 0 {
- return nil, nil, er.Errorf("can't create FundTx script with " +
- "zero, or negative coins")
- }
-
- // First, create the 2-of-2 multi-sig script itself.
- witnessScript, err := GenMultiSigScript(aPub, bPub)
- if err != nil {
- return nil, nil, err
- }
-
- // With the 2-of-2 script in had, generate a p2wsh script which pays
- // to the funding script.
- pkScript, err := WitnessScriptHash(witnessScript)
- if err != nil {
- return nil, nil, err
- }
-
- return witnessScript, wire.NewTxOut(amt, pkScript), nil
-}
-
-// SpendMultiSig generates the witness stack required to redeem the 2-of-2 p2wsh
-// multi-sig output.
-func SpendMultiSig(witnessScript, pubA []byte, sigA Signature,
- pubB []byte, sigB Signature) [][]byte {
-
- witness := make([][]byte, 4)
-
- // When spending a p2wsh multi-sig script, rather than an OP_0, we add
- // a nil stack element to eat the extra pop.
- witness[0] = nil
-
- // When initially generating the witnessScript, we sorted the serialized
- // public keys in descending order. So we do a quick comparison in order
- // ensure the signatures appear on the Script Virtual Machine stack in
- // the correct order.
- if bytes.Compare(pubA, pubB) == 1 {
- witness[1] = append(sigB.Serialize(), byte(params.SigHashAll))
- witness[2] = append(sigA.Serialize(), byte(params.SigHashAll))
- } else {
- witness[1] = append(sigA.Serialize(), byte(params.SigHashAll))
- witness[2] = append(sigB.Serialize(), byte(params.SigHashAll))
- }
-
- // Finally, add the preimage as the last witness element.
- witness[3] = witnessScript
-
- return witness
-}
-
-// FindScriptOutputIndex finds the index of the public key script output
-// matching 'script'. Additionally, a boolean is returned indicating if a
-// matching output was found at all.
-//
-// NOTE: The search stops after the first matching script is found.
-func FindScriptOutputIndex(tx *wire.MsgTx, script []byte) (bool, uint32) {
- found := false
- index := uint32(0)
- for i, txOut := range tx.TxOut {
- if bytes.Equal(txOut.PkScript, script) {
- found = true
- index = uint32(i)
- break
- }
- }
-
- return found, index
-}
-
-// Ripemd160H calculates the ripemd160 of the passed byte slice. This is used to
-// calculate the intermediate hash for payment pre-images. Payment hashes are
-// the result of ripemd160(sha256(paymentPreimage)). As a result, the value
-// passed in should be the sha256 of the payment hash.
-func Ripemd160H(d []byte) []byte {
- h := ripemd160.New()
- h.Write(d)
- return h.Sum(nil)
-}
-
-// SenderHTLCScript constructs the public key script for an outgoing HTLC
-// output payment for the sender's version of the commitment transaction. The
-// possible script paths from this output include:
-//
-// * The sender timing out the HTLC using the second level HTLC timeout
-// transaction.
-// * The receiver of the HTLC claiming the output on-chain with the payment
-// preimage.
-// * The receiver of the HTLC sweeping all the funds in the case that a
-// revoked commitment transaction bearing this HTLC was broadcast.
-//
-// If confirmedSpend=true, a 1 OP_CSV check will be added to the non-revocation
-// cases, to allow sweeping only after confirmation.
-//
-// Possible Input Scripts:
-// SENDR: <0> <0> (spend using HTLC timeout transaction)
-// RECVR:
-// REVOK:
-// * receiver revoke
-//
-// OP_DUP OP_HASH160 OP_EQUAL
-// OP_IF
-// OP_CHECKSIG
-// OP_ELSE
-//
-// OP_SWAP OP_SIZE 32 OP_EQUAL
-// OP_NOTIF
-// OP_DROP 2 OP_SWAP 2 OP_CHECKMULTISIG
-// OP_ELSE
-// OP_HASH160 OP_EQUALVERIFY
-// OP_CHECKSIG
-// OP_ENDIF
-// [1 OP_CHECKSEQUENCEVERIFY OP_DROP] <- if allowing confirmed spend only.
-// OP_ENDIF
-func SenderHTLCScript(senderHtlcKey, receiverHtlcKey,
- revocationKey *btcec.PublicKey, paymentHash []byte,
- confirmedSpend bool) ([]byte, er.R) {
-
- builder := scriptbuilder.NewScriptBuilder()
-
- // The opening operations are used to determine if this is the receiver
- // of the HTLC attempting to sweep all the funds due to a contract
- // breach. In this case, they'll place the revocation key at the top of
- // the stack.
- builder.AddOp(opcode.OP_DUP)
- builder.AddOp(opcode.OP_HASH160)
- builder.AddData(btcutil.Hash160(revocationKey.SerializeCompressed()))
- builder.AddOp(opcode.OP_EQUAL)
-
- // If the hash matches, then this is the revocation clause. The output
- // can be spent if the check sig operation passes.
- builder.AddOp(opcode.OP_IF)
- builder.AddOp(opcode.OP_CHECKSIG)
-
- // Otherwise, this may either be the receiver of the HTLC claiming with
- // the pre-image, or the sender of the HTLC sweeping the output after
- // it has timed out.
- builder.AddOp(opcode.OP_ELSE)
-
- // We'll do a bit of set up by pushing the receiver's key on the top of
- // the stack. This will be needed later if we decide that this is the
- // sender activating the time out clause with the HTLC timeout
- // transaction.
- builder.AddData(receiverHtlcKey.SerializeCompressed())
-
- // Atm, the top item of the stack is the receiverKey's so we use a swap
- // to expose what is either the payment pre-image or a signature.
- builder.AddOp(opcode.OP_SWAP)
-
- // With the top item swapped, check if it's 32 bytes. If so, then this
- // *may* be the payment pre-image.
- builder.AddOp(opcode.OP_SIZE)
- builder.AddInt64(32)
- builder.AddOp(opcode.OP_EQUAL)
-
- // If it isn't then this might be the sender of the HTLC activating the
- // time out clause.
- builder.AddOp(opcode.OP_NOTIF)
-
- // We'll drop the OP_IF return value off the top of the stack so we can
- // reconstruct the multi-sig script used as an off-chain covenant. If
- // two valid signatures are provided, ten then output will be deemed as
- // spendable.
- builder.AddOp(opcode.OP_DROP)
- builder.AddOp(opcode.OP_2)
- builder.AddOp(opcode.OP_SWAP)
- builder.AddData(senderHtlcKey.SerializeCompressed())
- builder.AddOp(opcode.OP_2)
- builder.AddOp(opcode.OP_CHECKMULTISIG)
-
- // Otherwise, then the only other case is that this is the receiver of
- // the HTLC sweeping it on-chain with the payment pre-image.
- builder.AddOp(opcode.OP_ELSE)
-
- // Hash the top item of the stack and compare it with the hash160 of
- // the payment hash, which is already the sha256 of the payment
- // pre-image. By using this little trick we're able save space on-chain
- // as the witness includes a 20-byte hash rather than a 32-byte hash.
- builder.AddOp(opcode.OP_HASH160)
- builder.AddData(Ripemd160H(paymentHash))
- builder.AddOp(opcode.OP_EQUALVERIFY)
-
- // This checks the receiver's signature so that a third party with
- // knowledge of the payment preimage still cannot steal the output.
- builder.AddOp(opcode.OP_CHECKSIG)
-
- // Close out the OP_IF statement above.
- builder.AddOp(opcode.OP_ENDIF)
-
- // Add 1 block CSV delay if a confirmation is required for the
- // non-revocation clauses.
- if confirmedSpend {
- builder.AddOp(opcode.OP_1)
- builder.AddOp(opcode.OP_CHECKSEQUENCEVERIFY)
- builder.AddOp(opcode.OP_DROP)
- }
-
- // Close out the OP_IF statement at the top of the script.
- builder.AddOp(opcode.OP_ENDIF)
-
- return builder.Script()
-}
-
-// SenderHtlcSpendRevokeWithKey constructs a valid witness allowing the receiver of an
-// HTLC to claim the output with knowledge of the revocation private key in the
-// scenario that the sender of the HTLC broadcasts a previously revoked
-// commitment transaction. A valid spend requires knowledge of the private key
-// that corresponds to their revocation base point and also the private key fro
-// the per commitment point, and a valid signature under the combined public
-// key.
-func SenderHtlcSpendRevokeWithKey(signer Signer, signDesc *SignDescriptor,
- revokeKey *btcec.PublicKey, sweepTx *wire.MsgTx) (wire.TxWitness, er.R) {
-
- sweepSig, err := signer.SignOutputRaw(sweepTx, signDesc)
- if err != nil {
- return nil, err
- }
-
- // The stack required to sweep a revoke HTLC output consists simply of
- // the exact witness stack as one of a regular p2wkh spend. The only
- // difference is that the keys used were derived in an adversarial
- // manner in order to encode the revocation contract into a sig+key
- // pair.
- witnessStack := wire.TxWitness(make([][]byte, 3))
- witnessStack[0] = append(sweepSig.Serialize(), byte(signDesc.HashType))
- witnessStack[1] = revokeKey.SerializeCompressed()
- witnessStack[2] = signDesc.WitnessScript
-
- return witnessStack, nil
-}
-
-// SenderHtlcSpendRevoke constructs a valid witness allowing the receiver of an
-// HTLC to claim the output with knowledge of the revocation private key in the
-// scenario that the sender of the HTLC broadcasts a previously revoked
-// commitment transaction. This method first derives the appropriate revocation
-// key, and requires that the provided SignDescriptor has a local revocation
-// basepoint and commitment secret in the PubKey and DoubleTweak fields,
-// respectively.
-func SenderHtlcSpendRevoke(signer Signer, signDesc *SignDescriptor,
- sweepTx *wire.MsgTx) (wire.TxWitness, er.R) {
-
- if signDesc.KeyDesc.PubKey == nil {
- return nil, er.Errorf("cannot generate witness with nil " +
- "KeyDesc pubkey")
- }
-
- // Derive the revocation key using the local revocation base point and
- // commitment point.
- revokeKey := DeriveRevocationPubkey(
- signDesc.KeyDesc.PubKey,
- signDesc.DoubleTweak.PubKey(),
- )
-
- return SenderHtlcSpendRevokeWithKey(signer, signDesc, revokeKey, sweepTx)
-}
-
-// SenderHtlcSpendRedeem constructs a valid witness allowing the receiver of an
-// HTLC to redeem the pending output in the scenario that the sender broadcasts
-// their version of the commitment transaction. A valid spend requires
-// knowledge of the payment preimage, and a valid signature under the receivers
-// public key.
-func SenderHtlcSpendRedeem(signer Signer, signDesc *SignDescriptor,
- sweepTx *wire.MsgTx, paymentPreimage []byte) (wire.TxWitness, er.R) {
-
- sweepSig, err := signer.SignOutputRaw(sweepTx, signDesc)
- if err != nil {
- return nil, err
- }
-
- // The stack required to spend this output is simply the signature
- // generated above under the receiver's public key, and the payment
- // pre-image.
- witnessStack := wire.TxWitness(make([][]byte, 3))
- witnessStack[0] = append(sweepSig.Serialize(), byte(signDesc.HashType))
- witnessStack[1] = paymentPreimage
- witnessStack[2] = signDesc.WitnessScript
-
- return witnessStack, nil
-}
-
-// SenderHtlcSpendTimeout constructs a valid witness allowing the sender of an
-// HTLC to activate the time locked covenant clause of a soon to be expired
-// HTLC. This script simply spends the multi-sig output using the
-// pre-generated HTLC timeout transaction.
-func SenderHtlcSpendTimeout(receiverSig Signature,
- receiverSigHash params.SigHashType, signer Signer,
- signDesc *SignDescriptor, htlcTimeoutTx *wire.MsgTx) (
- wire.TxWitness, er.R) {
-
- sweepSig, err := signer.SignOutputRaw(htlcTimeoutTx, signDesc)
- if err != nil {
- return nil, err
- }
-
- // We place a zero as the first item of the evaluated witness stack in
- // order to force Script execution to the HTLC timeout clause. The
- // second zero is required to consume the extra pop due to a bug in the
- // original OP_CHECKMULTISIG.
- witnessStack := wire.TxWitness(make([][]byte, 5))
- witnessStack[0] = nil
- witnessStack[1] = append(receiverSig.Serialize(), byte(receiverSigHash))
- witnessStack[2] = append(sweepSig.Serialize(), byte(signDesc.HashType))
- witnessStack[3] = nil
- witnessStack[4] = signDesc.WitnessScript
-
- return witnessStack, nil
-}
-
-// ReceiverHTLCScript constructs the public key script for an incoming HTLC
-// output payment for the receiver's version of the commitment transaction. The
-// possible execution paths from this script include:
-// * The receiver of the HTLC uses its second level HTLC transaction to
-// advance the state of the HTLC into the delay+claim state.
-// * The sender of the HTLC sweeps all the funds of the HTLC as a breached
-// commitment was broadcast.
-// * The sender of the HTLC sweeps the HTLC on-chain after the timeout period
-// of the HTLC has passed.
-//
-// If confirmedSpend=true, a 1 OP_CSV check will be added to the non-revocation
-// cases, to allow sweeping only after confirmation.
-//
-// Possible Input Scripts:
-// RECVR: <0> (spend using HTLC success transaction)
-// REVOK:
-// SENDR: 0
-//
-//
-// OP_DUP OP_HASH160 OP_EQUAL
-// OP_IF
-// OP_CHECKSIG
-// OP_ELSE
-//
-// OP_SWAP OP_SIZE 32 OP_EQUAL
-// OP_IF
-// OP_HASH160 OP_EQUALVERIFY
-// 2 OP_SWAP 2 OP_CHECKMULTISIG
-// OP_ELSE
-// OP_DROP OP_CHECKLOCKTIMEVERIFY OP_DROP
-// OP_CHECKSIG
-// OP_ENDIF
-// [1 OP_CHECKSEQUENCEVERIFY OP_DROP] <- if allowing confirmed spend only.
-// OP_ENDIF
-func ReceiverHTLCScript(cltvExpiry uint32, senderHtlcKey,
- receiverHtlcKey, revocationKey *btcec.PublicKey,
- paymentHash []byte, confirmedSpend bool) ([]byte, er.R) {
-
- builder := scriptbuilder.NewScriptBuilder()
-
- // The opening operations are used to determine if this is the sender
- // of the HTLC attempting to sweep all the funds due to a contract
- // breach. In this case, they'll place the revocation key at the top of
- // the stack.
- builder.AddOp(opcode.OP_DUP)
- builder.AddOp(opcode.OP_HASH160)
- builder.AddData(btcutil.Hash160(revocationKey.SerializeCompressed()))
- builder.AddOp(opcode.OP_EQUAL)
-
- // If the hash matches, then this is the revocation clause. The output
- // can be spent if the check sig operation passes.
- builder.AddOp(opcode.OP_IF)
- builder.AddOp(opcode.OP_CHECKSIG)
-
- // Otherwise, this may either be the receiver of the HTLC starting the
- // claiming process via the second level HTLC success transaction and
- // the pre-image, or the sender of the HTLC sweeping the output after
- // it has timed out.
- builder.AddOp(opcode.OP_ELSE)
-
- // We'll do a bit of set up by pushing the sender's key on the top of
- // the stack. This will be needed later if we decide that this is the
- // receiver transitioning the output to the claim state using their
- // second-level HTLC success transaction.
- builder.AddData(senderHtlcKey.SerializeCompressed())
-
- // Atm, the top item of the stack is the sender's key so we use a swap
- // to expose what is either the payment pre-image or something else.
- builder.AddOp(opcode.OP_SWAP)
-
- // With the top item swapped, check if it's 32 bytes. If so, then this
- // *may* be the payment pre-image.
- builder.AddOp(opcode.OP_SIZE)
- builder.AddInt64(32)
- builder.AddOp(opcode.OP_EQUAL)
-
- // If the item on the top of the stack is 32-bytes, then it is the
- // proper size, so this indicates that the receiver of the HTLC is
- // attempting to claim the output on-chain by transitioning the state
- // of the HTLC to delay+claim.
- builder.AddOp(opcode.OP_IF)
-
- // Next we'll hash the item on the top of the stack, if it matches the
- // payment pre-image, then we'll continue. Otherwise, we'll end the
- // script here as this is the invalid payment pre-image.
- builder.AddOp(opcode.OP_HASH160)
- builder.AddData(Ripemd160H(paymentHash))
- builder.AddOp(opcode.OP_EQUALVERIFY)
-
- // If the payment hash matches, then we'll also need to satisfy the
- // multi-sig covenant by providing both signatures of the sender and
- // receiver. If the convenient is met, then we'll allow the spending of
- // this output, but only by the HTLC success transaction.
- builder.AddOp(opcode.OP_2)
- builder.AddOp(opcode.OP_SWAP)
- builder.AddData(receiverHtlcKey.SerializeCompressed())
- builder.AddOp(opcode.OP_2)
- builder.AddOp(opcode.OP_CHECKMULTISIG)
-
- // Otherwise, this might be the sender of the HTLC attempting to sweep
- // it on-chain after the timeout.
- builder.AddOp(opcode.OP_ELSE)
-
- // We'll drop the extra item (which is the output from evaluating the
- // OP_EQUAL) above from the stack.
- builder.AddOp(opcode.OP_DROP)
-
- // With that item dropped off, we can now enforce the absolute
- // lock-time required to timeout the HTLC. If the time has passed, then
- // we'll proceed with a checksig to ensure that this is actually the
- // sender of he original HTLC.
- builder.AddInt64(int64(cltvExpiry))
- builder.AddOp(opcode.OP_CHECKLOCKTIMEVERIFY)
- builder.AddOp(opcode.OP_DROP)
- builder.AddOp(opcode.OP_CHECKSIG)
-
- // Close out the inner if statement.
- builder.AddOp(opcode.OP_ENDIF)
-
- // Add 1 block CSV delay for non-revocation clauses if confirmation is
- // required.
- if confirmedSpend {
- builder.AddOp(opcode.OP_1)
- builder.AddOp(opcode.OP_CHECKSEQUENCEVERIFY)
- builder.AddOp(opcode.OP_DROP)
- }
-
- // Close out the outer if statement.
- builder.AddOp(opcode.OP_ENDIF)
-
- return builder.Script()
-}
-
-// ReceiverHtlcSpendRedeem constructs a valid witness allowing the receiver of
-// an HTLC to redeem the conditional payment in the event that their commitment
-// transaction is broadcast. This clause transitions the state of the HLTC
-// output into the delay+claim state by activating the off-chain covenant bound
-// by the 2-of-2 multi-sig output. The HTLC success timeout transaction being
-// signed has a relative timelock delay enforced by its sequence number. This
-// delay give the sender of the HTLC enough time to revoke the output if this
-// is a breach commitment transaction.
-func ReceiverHtlcSpendRedeem(senderSig Signature,
- senderSigHash params.SigHashType, paymentPreimage []byte,
- signer Signer, signDesc *SignDescriptor, htlcSuccessTx *wire.MsgTx) (
- wire.TxWitness, er.R) {
-
- // First, we'll generate a signature for the HTLC success transaction.
- // The signDesc should be signing with the public key used as the
- // receiver's public key and also the correct single tweak.
- sweepSig, err := signer.SignOutputRaw(htlcSuccessTx, signDesc)
- if err != nil {
- return nil, err
- }
-
- // The final witness stack is used the provide the script with the
- // payment pre-image, and also execute the multi-sig clause after the
- // pre-images matches. We add a nil item at the bottom of the stack in
- // order to consume the extra pop within OP_CHECKMULTISIG.
- witnessStack := wire.TxWitness(make([][]byte, 5))
- witnessStack[0] = nil
- witnessStack[1] = append(senderSig.Serialize(), byte(senderSigHash))
- witnessStack[2] = append(sweepSig.Serialize(), byte(signDesc.HashType))
- witnessStack[3] = paymentPreimage
- witnessStack[4] = signDesc.WitnessScript
-
- return witnessStack, nil
-}
-
-// ReceiverHtlcSpendRevokeWithKey constructs a valid witness allowing the sender of an
-// HTLC within a previously revoked commitment transaction to re-claim the
-// pending funds in the case that the receiver broadcasts this revoked
-// commitment transaction.
-func ReceiverHtlcSpendRevokeWithKey(signer Signer, signDesc *SignDescriptor,
- revokeKey *btcec.PublicKey, sweepTx *wire.MsgTx) (wire.TxWitness, er.R) {
-
- // First, we'll generate a signature for the sweep transaction. The
- // signDesc should be signing with the public key used as the fully
- // derived revocation public key and also the correct double tweak
- // value.
- sweepSig, err := signer.SignOutputRaw(sweepTx, signDesc)
- if err != nil {
- return nil, err
- }
-
- // We place a zero, then one as the first items in the evaluated
- // witness stack in order to force script execution to the HTLC
- // revocation clause.
- witnessStack := wire.TxWitness(make([][]byte, 3))
- witnessStack[0] = append(sweepSig.Serialize(), byte(signDesc.HashType))
- witnessStack[1] = revokeKey.SerializeCompressed()
- witnessStack[2] = signDesc.WitnessScript
-
- return witnessStack, nil
-}
-
-// ReceiverHtlcSpendRevoke constructs a valid witness allowing the sender of an
-// HTLC within a previously revoked commitment transaction to re-claim the
-// pending funds in the case that the receiver broadcasts this revoked
-// commitment transaction. This method first derives the appropriate revocation
-// key, and requires that the provided SignDescriptor has a local revocation
-// basepoint and commitment secret in the PubKey and DoubleTweak fields,
-// respectively.
-func ReceiverHtlcSpendRevoke(signer Signer, signDesc *SignDescriptor,
- sweepTx *wire.MsgTx) (wire.TxWitness, er.R) {
-
- if signDesc.KeyDesc.PubKey == nil {
- return nil, er.Errorf("cannot generate witness with nil " +
- "KeyDesc pubkey")
- }
-
- // Derive the revocation key using the local revocation base point and
- // commitment point.
- revokeKey := DeriveRevocationPubkey(
- signDesc.KeyDesc.PubKey,
- signDesc.DoubleTweak.PubKey(),
- )
-
- return ReceiverHtlcSpendRevokeWithKey(signer, signDesc, revokeKey, sweepTx)
-}
-
-// ReceiverHtlcSpendTimeout constructs a valid witness allowing the sender of
-// an HTLC to recover the pending funds after an absolute timeout in the
-// scenario that the receiver of the HTLC broadcasts their version of the
-// commitment transaction. If the caller has already set the lock time on the
-// spending transaction, than a value of -1 can be passed for the cltvExpiry
-// value.
-//
-// NOTE: The target input of the passed transaction MUST NOT have a final
-// sequence number. Otherwise, the OP_CHECKLOCKTIMEVERIFY check will fail.
-func ReceiverHtlcSpendTimeout(signer Signer, signDesc *SignDescriptor,
- sweepTx *wire.MsgTx, cltvExpiry int32) (wire.TxWitness, er.R) {
-
- // If the caller set a proper timeout value, then we'll apply it
- // directly to the transaction.
- if cltvExpiry != -1 {
- // The HTLC output has an absolute time period before we are
- // permitted to recover the pending funds. Therefore we need to
- // set the locktime on this sweeping transaction in order to
- // pass Script verification.
- sweepTx.LockTime = uint32(cltvExpiry)
- }
-
- // With the lock time on the transaction set, we'll not generate a
- // signature for the sweep transaction. The passed sign descriptor
- // should be created using the raw public key of the sender (w/o the
- // single tweak applied), and the single tweak set to the proper value
- // taking into account the current state's point.
- sweepSig, err := signer.SignOutputRaw(sweepTx, signDesc)
- if err != nil {
- return nil, err
- }
-
- witnessStack := wire.TxWitness(make([][]byte, 3))
- witnessStack[0] = append(sweepSig.Serialize(), byte(signDesc.HashType))
- witnessStack[1] = nil
- witnessStack[2] = signDesc.WitnessScript
-
- return witnessStack, nil
-}
-
-// SecondLevelHtlcScript is the uniform script that's used as the output for
-// the second-level HTLC transactions. The second level transaction act as a
-// sort of covenant, ensuring that a 2-of-2 multi-sig output can only be
-// spent in a particular way, and to a particular output.
-//
-// Possible Input Scripts:
-// * To revoke an HTLC output that has been transitioned to the claim+delay
-// state:
-// * 1
-//
-// * To claim and HTLC output, either with a pre-image or due to a timeout:
-// * 0
-//
-// OP_IF
-//
-// OP_ELSE
-//
-// OP_CHECKSEQUENCEVERIFY
-// OP_DROP
-//
-// OP_ENDIF
-// OP_CHECKSIG
-//
-// TODO(roasbeef): possible renames for second-level
-// * transition?
-// * covenant output
-func SecondLevelHtlcScript(revocationKey, delayKey *btcec.PublicKey,
- csvDelay uint32) ([]byte, er.R) {
-
- builder := scriptbuilder.NewScriptBuilder()
-
- // If this is the revocation clause for this script is to be executed,
- // the spender will push a 1, forcing us to hit the true clause of this
- // if statement.
- builder.AddOp(opcode.OP_IF)
-
- // If this this is the revocation case, then we'll push the revocation
- // public key on the stack.
- builder.AddData(revocationKey.SerializeCompressed())
-
- // Otherwise, this is either the sender or receiver of the HTLC
- // attempting to claim the HTLC output.
- builder.AddOp(opcode.OP_ELSE)
-
- // In order to give the other party time to execute the revocation
- // clause above, we require a relative timeout to pass before the
- // output can be spent.
- builder.AddInt64(int64(csvDelay))
- builder.AddOp(opcode.OP_CHECKSEQUENCEVERIFY)
- builder.AddOp(opcode.OP_DROP)
-
- // If the relative timelock passes, then we'll add the delay key to the
- // stack to ensure that we properly authenticate the spending party.
- builder.AddData(delayKey.SerializeCompressed())
-
- // Close out the if statement.
- builder.AddOp(opcode.OP_ENDIF)
-
- // In either case, we'll ensure that only either the party possessing
- // the revocation private key, or the delay private key is able to
- // spend this output.
- builder.AddOp(opcode.OP_CHECKSIG)
-
- return builder.Script()
-}
-
-// HtlcSpendSuccess spends a second-level HTLC output. This function is to be
-// used by the sender of an HTLC to claim the output after a relative timeout
-// or the receiver of the HTLC to claim on-chain with the pre-image.
-func HtlcSpendSuccess(signer Signer, signDesc *SignDescriptor,
- sweepTx *wire.MsgTx, csvDelay uint32) (wire.TxWitness, er.R) {
-
- // We're required to wait a relative period of time before we can sweep
- // the output in order to allow the other party to contest our claim of
- // validity to this version of the commitment transaction.
- sweepTx.TxIn[0].Sequence = LockTimeToSequence(false, csvDelay)
-
- // Finally, OP_CSV requires that the version of the transaction
- // spending a pkscript with OP_CSV within it *must* be >= 2.
- sweepTx.Version = 2
-
- // As we mutated the transaction, we'll re-calculate the sighashes for
- // this instance.
- signDesc.SigHashes = txscript.NewTxSigHashes(sweepTx)
-
- // With the proper sequence and version set, we'll now sign the timeout
- // transaction using the passed signed descriptor. In order to generate
- // a valid signature, then signDesc should be using the base delay
- // public key, and the proper single tweak bytes.
- sweepSig, err := signer.SignOutputRaw(sweepTx, signDesc)
- if err != nil {
- return nil, err
- }
-
- // We set a zero as the first element the witness stack (ignoring the
- // witness script), in order to force execution to the second portion
- // of the if clause.
- witnessStack := wire.TxWitness(make([][]byte, 3))
- witnessStack[0] = append(sweepSig.Serialize(), byte(signDesc.HashType))
- witnessStack[1] = nil
- witnessStack[2] = signDesc.WitnessScript
-
- return witnessStack, nil
-}
-
-// HtlcSpendRevoke spends a second-level HTLC output. This function is to be
-// used by the sender or receiver of an HTLC to claim the HTLC after a revoked
-// commitment transaction was broadcast.
-func HtlcSpendRevoke(signer Signer, signDesc *SignDescriptor,
- revokeTx *wire.MsgTx) (wire.TxWitness, er.R) {
-
- // We don't need any spacial modifications to the transaction as this
- // is just sweeping a revoked HTLC output. So we'll generate a regular
- // witness signature.
- sweepSig, err := signer.SignOutputRaw(revokeTx, signDesc)
- if err != nil {
- return nil, err
- }
-
- // We set a one as the first element the witness stack (ignoring the
- // witness script), in order to force execution to the revocation
- // clause in the second level HTLC script.
- witnessStack := wire.TxWitness(make([][]byte, 3))
- witnessStack[0] = append(sweepSig.Serialize(), byte(signDesc.HashType))
- witnessStack[1] = []byte{1}
- witnessStack[2] = signDesc.WitnessScript
-
- return witnessStack, nil
-}
-
-// HtlcSecondLevelSpend exposes the public witness generation function for
-// spending an HTLC success transaction, either due to an expiring time lock or
-// having had the payment preimage. This method is able to spend any
-// second-level HTLC transaction, assuming the caller sets the locktime or
-// seqno properly.
-//
-// NOTE: The caller MUST set the txn version, sequence number, and sign
-// descriptor's sig hash cache before invocation.
-func HtlcSecondLevelSpend(signer Signer, signDesc *SignDescriptor,
- sweepTx *wire.MsgTx) (wire.TxWitness, er.R) {
-
- // With the proper sequence and version set, we'll now sign the timeout
- // transaction using the passed signed descriptor. In order to generate
- // a valid signature, then signDesc should be using the base delay
- // public key, and the proper single tweak bytes.
- sweepSig, err := signer.SignOutputRaw(sweepTx, signDesc)
- if err != nil {
- return nil, err
- }
-
- // We set a zero as the first element the witness stack (ignoring the
- // witness script), in order to force execution to the second portion
- // of the if clause.
- witnessStack := wire.TxWitness(make([][]byte, 3))
- witnessStack[0] = append(sweepSig.Serialize(), byte(params.SigHashAll))
- witnessStack[1] = nil
- witnessStack[2] = signDesc.WitnessScript
-
- return witnessStack, nil
-}
-
-// LockTimeToSequence converts the passed relative locktime to a sequence
-// number in accordance to BIP-68.
-// See: https://github.com/bitcoin/bips/blob/master/bip-0068.mediawiki
-// * (Compatibility)
-func LockTimeToSequence(isSeconds bool, locktime uint32) uint32 {
- if !isSeconds {
- // The locktime is to be expressed in confirmations.
- return locktime
- }
-
- // Set the 22nd bit which indicates the lock time is in seconds, then
- // shift the locktime over by 9 since the time granularity is in
- // 512-second intervals (2^9). This results in a max lock-time of
- // 33,554,431 seconds, or 1.06 years.
- return SequenceLockTimeSeconds | (locktime >> 9)
-}
-
-// CommitScriptToSelf constructs the public key script for the output on the
-// commitment transaction paying to the "owner" of said commitment transaction.
-// If the other party learns of the preimage to the revocation hash, then they
-// can claim all the settled funds in the channel, plus the unsettled funds.
-//
-// Possible Input Scripts:
-// REVOKE: 1
-// SENDRSWEEP:
-//
-// Output Script:
-// OP_IF
-//
-// OP_ELSE
-// OP_CHECKSEQUENCEVERIFY OP_DROP
-//
-// OP_ENDIF
-// OP_CHECKSIG
-func CommitScriptToSelf(csvTimeout uint32, selfKey, revokeKey *btcec.PublicKey) ([]byte, er.R) {
- // This script is spendable under two conditions: either the
- // 'csvTimeout' has passed and we can redeem our funds, or they can
- // produce a valid signature with the revocation public key. The
- // revocation public key will *only* be known to the other party if we
- // have divulged the revocation hash, allowing them to homomorphically
- // derive the proper private key which corresponds to the revoke public
- // key.
- builder := scriptbuilder.NewScriptBuilder()
-
- builder.AddOp(opcode.OP_IF)
-
- // If a valid signature using the revocation key is presented, then
- // allow an immediate spend provided the proper signature.
- builder.AddData(revokeKey.SerializeCompressed())
-
- builder.AddOp(opcode.OP_ELSE)
-
- // Otherwise, we can re-claim our funds after a CSV delay of
- // 'csvTimeout' timeout blocks, and a valid signature.
- builder.AddInt64(int64(csvTimeout))
- builder.AddOp(opcode.OP_CHECKSEQUENCEVERIFY)
- builder.AddOp(opcode.OP_DROP)
- builder.AddData(selfKey.SerializeCompressed())
-
- builder.AddOp(opcode.OP_ENDIF)
-
- // Finally, we'll validate the signature against the public key that's
- // left on the top of the stack.
- builder.AddOp(opcode.OP_CHECKSIG)
-
- return builder.Script()
-}
-
-// CommitSpendTimeout constructs a valid witness allowing the owner of a
-// particular commitment transaction to spend the output returning settled
-// funds back to themselves after a relative block timeout. In order to
-// properly spend the transaction, the target input's sequence number should be
-// set accordingly based off of the target relative block timeout within the
-// redeem script. Additionally, OP_CSV requires that the version of the
-// transaction spending a pkscript with OP_CSV within it *must* be >= 2.
-func CommitSpendTimeout(signer Signer, signDesc *SignDescriptor,
- sweepTx *wire.MsgTx) (wire.TxWitness, er.R) {
-
- // Ensure the transaction version supports the validation of sequence
- // locks and CSV semantics.
- if sweepTx.Version < 2 {
- return nil, er.Errorf("version of passed transaction MUST "+
- "be >= 2, not %v", sweepTx.Version)
- }
-
- // With the sequence number in place, we're now able to properly sign
- // off on the sweep transaction.
- sweepSig, err := signer.SignOutputRaw(sweepTx, signDesc)
- if err != nil {
- return nil, err
- }
-
- // Place an empty byte as the first item in the evaluated witness stack
- // to force script execution to the timeout spend clause. We need to
- // place an empty byte in order to ensure our script is still valid
- // from the PoV of nodes that are enforcing minimal OP_IF/OP_NOTIF.
- witnessStack := wire.TxWitness(make([][]byte, 3))
- witnessStack[0] = append(sweepSig.Serialize(), byte(signDesc.HashType))
- witnessStack[1] = nil
- witnessStack[2] = signDesc.WitnessScript
-
- return witnessStack, nil
-}
-
-// CommitSpendRevoke constructs a valid witness allowing a node to sweep the
-// settled output of a malicious counterparty who broadcasts a revoked
-// commitment transaction.
-//
-// NOTE: The passed SignDescriptor should include the raw (untweaked)
-// revocation base public key of the receiver and also the proper double tweak
-// value based on the commitment secret of the revoked commitment.
-func CommitSpendRevoke(signer Signer, signDesc *SignDescriptor,
- sweepTx *wire.MsgTx) (wire.TxWitness, er.R) {
-
- sweepSig, err := signer.SignOutputRaw(sweepTx, signDesc)
- if err != nil {
- return nil, err
- }
-
- // Place a 1 as the first item in the evaluated witness stack to
- // force script execution to the revocation clause.
- witnessStack := wire.TxWitness(make([][]byte, 3))
- witnessStack[0] = append(sweepSig.Serialize(), byte(signDesc.HashType))
- witnessStack[1] = []byte{1}
- witnessStack[2] = signDesc.WitnessScript
-
- return witnessStack, nil
-}
-
-// CommitSpendNoDelay constructs a valid witness allowing a node to spend their
-// settled no-delay output on the counterparty's commitment transaction. If the
-// tweakless field is true, then we'll omit the set where we tweak the pubkey
-// with a random set of bytes, and use it directly in the witness stack.
-//
-// NOTE: The passed SignDescriptor should include the raw (untweaked) public
-// key of the receiver and also the proper single tweak value based on the
-// current commitment point.
-func CommitSpendNoDelay(signer Signer, signDesc *SignDescriptor,
- sweepTx *wire.MsgTx, tweakless bool) (wire.TxWitness, er.R) {
-
- if signDesc.KeyDesc.PubKey == nil {
- return nil, er.Errorf("cannot generate witness with nil " +
- "KeyDesc pubkey")
- }
-
- // This is just a regular p2wkh spend which looks something like:
- // * witness: